diff --git a/docker-compose.yml b/docker-compose.yml index 4fdea07a99..a6a3329621 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -107,7 +107,7 @@ services: zkevm-prover: container_name: zkevm-prover restart: unless-stopped - image: hermeznetwork/zkevm-prover:v5.0.0-RC5 + image: hermeznetwork/zkevm-prover:v5.0.0-RC8 depends_on: zkevm-state-db: condition: service_healthy diff --git a/event/event.go b/event/event.go index c69380db80..7c325cc242 100644 --- a/event/event.go +++ b/event/event.go @@ -42,9 +42,10 @@ const ( EventID_SynchronizerHalt EventID = "SYNCHRONIZER HALT" // EventID_SequenceSenderHalt is triggered when the SequenceSender halts EventID_SequenceSenderHalt EventID = "SEQUENCESENDER HALT" - // EventID_NodeOOC is triggered when an OOC at node level is detected EventID_NodeOOC EventID = "NODE OOC" + // EventID_ReservedZKCountersOverflow is triggered when reserved ZK counters exceeds remaining batch ZK counters + EventID_ReservedZKCountersOverflow EventID = "RESERVED ZKCOUNTERS OVERFLOW" // Source_Node is the source of the event Source_Node Source = "node" diff --git a/sequencer/addrqueue.go b/sequencer/addrqueue.go index e479a35560..177521c449 100644 --- a/sequencer/addrqueue.go +++ b/sequencer/addrqueue.go @@ -217,17 +217,17 @@ func (a *addrQueue) updateCurrentNonceBalance(nonce *uint64, balance *big.Int) ( } // UpdateTxZKCounters updates the ZKCounters for the given tx (txHash) -func (a *addrQueue) UpdateTxZKCounters(txHash common.Hash, counters state.ZKCounters) { +func (a *addrQueue) UpdateTxZKCounters(txHash common.Hash, usedZKCounters state.ZKCounters, reservedZKCounters state.ZKCounters) { txHashStr := txHash.String() if (a.readyTx != nil) && (a.readyTx.HashStr == txHashStr) { log.Debugf("updating readyTx %s with new ZKCounters from addrQueue %s", txHashStr, a.fromStr) - a.readyTx.updateZKCounters(counters) + a.readyTx.updateZKCounters(usedZKCounters, reservedZKCounters) } else { for _, txTracker := range a.notReadyTxs { if txTracker.HashStr == txHashStr { log.Debugf("updating notReadyTx %s with new ZKCounters from addrQueue %s", txHashStr, a.fromStr) - txTracker.updateZKCounters(counters) + txTracker.updateZKCounters(usedZKCounters, reservedZKCounters) break } } diff --git a/sequencer/batch.go b/sequencer/batch.go index caf86142e5..00ad8678f6 100644 --- a/sequencer/batch.go +++ b/sequencer/batch.go @@ -238,7 +238,7 @@ func (f *finalizer) closeAndOpenNewWIPBatch(ctx context.Context, closeReason sta if f.wipL2Block != nil { f.wipBatch.imStateRoot = f.wipL2Block.imStateRoot // Subtract the WIP L2 block used resources to batch - overflow, overflowResource := f.wipBatch.imRemainingResources.Sub(f.wipL2Block.usedResources) + overflow, overflowResource := f.wipBatch.imRemainingResources.Sub(state.BatchResources{ZKCounters: f.wipL2Block.usedZKCounters, Bytes: f.wipL2Block.bytes}) if overflow { return fmt.Errorf("failed to subtract L2 block [%d] used resources to new wip batch %d, overflow resource: %s", f.wipL2Block.trackingNum, f.wipBatch.batchNumber, overflowResource) @@ -424,19 +424,7 @@ func (f *finalizer) batchSanityCheck(ctx context.Context, batchNum uint64, initi if err != nil { log.Errorf("error marshaling payload, error: %v", err) } else { - event := &event.Event{ - ReceivedAt: time.Now(), - Source: event.Source_Node, - Component: event.Component_Sequencer, - Level: event.Level_Critical, - EventID: event.EventID_ReprocessFullBatchOOC, - Description: string(payload), - Json: batchRequest, - } - err = f.eventLog.LogEvent(ctx, event) - if err != nil { - log.Errorf("error storing payload, error: %v", err) - } + f.LogEvent(ctx, event.Level_Critical, event.EventID_ReprocessFullBatchOOC, string(payload), batchRequest) } return nil, ErrProcessBatchOOC @@ -469,7 +457,7 @@ func (f *finalizer) maxTxsPerBatchReached(batch *Batch) bool { // isBatchResourcesMarginExhausted checks if one of resources of the batch has reached the exhausted margin and returns the name of the exhausted resource func (f *finalizer) isBatchResourcesMarginExhausted(resources state.BatchResources) (bool, string) { - zkCounters := resources.UsedZKCounters + zkCounters := resources.ZKCounters result := false resourceName := "" if resources.Bytes <= f.getConstraintThresholdUint64(f.batchConstraints.MaxBatchBytesSize) { @@ -517,16 +505,16 @@ func (f *finalizer) getConstraintThresholdUint32(input uint32) uint32 { // getUsedBatchResources calculates and returns the used resources of a batch from remaining resources func getUsedBatchResources(constraints state.BatchConstraintsCfg, remainingResources state.BatchResources) state.BatchResources { return state.BatchResources{ - UsedZKCounters: state.ZKCounters{ - GasUsed: constraints.MaxCumulativeGasUsed - remainingResources.UsedZKCounters.GasUsed, - KeccakHashes: constraints.MaxKeccakHashes - remainingResources.UsedZKCounters.KeccakHashes, - PoseidonHashes: constraints.MaxPoseidonHashes - remainingResources.UsedZKCounters.PoseidonHashes, - PoseidonPaddings: constraints.MaxPoseidonPaddings - remainingResources.UsedZKCounters.PoseidonPaddings, - MemAligns: constraints.MaxMemAligns - remainingResources.UsedZKCounters.MemAligns, - Arithmetics: constraints.MaxArithmetics - remainingResources.UsedZKCounters.Arithmetics, - Binaries: constraints.MaxBinaries - remainingResources.UsedZKCounters.Binaries, - Steps: constraints.MaxSteps - remainingResources.UsedZKCounters.Steps, - Sha256Hashes_V2: constraints.MaxSHA256Hashes - remainingResources.UsedZKCounters.Sha256Hashes_V2, + ZKCounters: state.ZKCounters{ + GasUsed: constraints.MaxCumulativeGasUsed - remainingResources.ZKCounters.GasUsed, + KeccakHashes: constraints.MaxKeccakHashes - remainingResources.ZKCounters.KeccakHashes, + PoseidonHashes: constraints.MaxPoseidonHashes - remainingResources.ZKCounters.PoseidonHashes, + PoseidonPaddings: constraints.MaxPoseidonPaddings - remainingResources.ZKCounters.PoseidonPaddings, + MemAligns: constraints.MaxMemAligns - remainingResources.ZKCounters.MemAligns, + Arithmetics: constraints.MaxArithmetics - remainingResources.ZKCounters.Arithmetics, + Binaries: constraints.MaxBinaries - remainingResources.ZKCounters.Binaries, + Steps: constraints.MaxSteps - remainingResources.ZKCounters.Steps, + Sha256Hashes_V2: constraints.MaxSHA256Hashes - remainingResources.ZKCounters.Sha256Hashes_V2, }, Bytes: constraints.MaxBatchBytesSize - remainingResources.Bytes, } @@ -535,7 +523,7 @@ func getUsedBatchResources(constraints state.BatchConstraintsCfg, remainingResou // getMaxRemainingResources returns the max resources that can be used in a batch func getMaxRemainingResources(constraints state.BatchConstraintsCfg) state.BatchResources { return state.BatchResources{ - UsedZKCounters: state.ZKCounters{ + ZKCounters: state.ZKCounters{ GasUsed: constraints.MaxCumulativeGasUsed, KeccakHashes: constraints.MaxKeccakHashes, PoseidonHashes: constraints.MaxPoseidonHashes, diff --git a/sequencer/errors.go b/sequencer/errors.go index a61ae7b4c1..8251cd79e5 100644 --- a/sequencer/errors.go +++ b/sequencer/errors.go @@ -29,8 +29,8 @@ var ( ErrExecutorError = errors.New("executor error") // ErrNoFittingTransaction happens when there is not a tx (from the txSortedList) that fits in the remaining batch resources ErrNoFittingTransaction = errors.New("no fit transaction") - // ErrBatchResourceUnderFlow happens when there is batch resoure underflow after sustract the resources from a tx - ErrBatchResourceUnderFlow = errors.New("batch resource underflow") + // ErrBatchResourceOverFlow happens when there is a tx that overlows remaining batch resources + ErrBatchResourceOverFlow = errors.New("batch resource overflow") // ErrTransactionsListEmpty happens when txSortedList is empty ErrTransactionsListEmpty = errors.New("transactions list empty") ) diff --git a/sequencer/finalizer.go b/sequencer/finalizer.go index ad759183fd..4a96c421c9 100644 --- a/sequencer/finalizer.go +++ b/sequencer/finalizer.go @@ -277,7 +277,6 @@ func (f *finalizer) finalizeBatches(ctx context.Context) { continue } - closeWIPBatch := false metrics.WorkerProcessingTime(time.Since(start)) if tx != nil { showNotFoundTxLog = true @@ -286,14 +285,14 @@ func (f *finalizer) finalizeBatches(ctx context.Context) { for { var err error - _, closeWIPBatch, err = f.processTransaction(ctx, tx, firstTxProcess) + _, err = f.processTransaction(ctx, tx, firstTxProcess) if err != nil { if err == ErrEffectiveGasPriceReprocess { firstTxProcess = false log.Infof("reprocessing tx %s because of effective gas price calculation", tx.HashStr) continue - } else if err == ErrBatchResourceUnderFlow { - log.Infof("skipping tx %s due to a batch resource underflow", tx.HashStr) + } else if err == ErrBatchResourceOverFlow { + log.Infof("skipping tx %s due to a batch resource overflow", tx.HashStr) break } else { log.Errorf("failed to process tx %s, error: %v", err) @@ -321,11 +320,7 @@ func (f *finalizer) finalizeBatches(ctx context.Context) { } // Check if we must finalize the batch due to a closing reason (resources exhausted, max txs, timestamp resolution, forced batches deadline) - finalize, closeReason := f.checkIfFinalizeBatch() - if closeWIPBatch || finalize { - if closeWIPBatch { - closeReason = "Executor close batch" - } + if finalize, closeReason := f.checkIfFinalizeBatch(); finalize { f.finalizeWIPBatch(ctx, closeReason) } @@ -337,7 +332,7 @@ func (f *finalizer) finalizeBatches(ctx context.Context) { } // processTransaction processes a single transaction. -func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, firstTxProcess bool) (errWg *sync.WaitGroup, closeWIPBatch bool, err error) { +func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, firstTxProcess bool) (errWg *sync.WaitGroup, err error) { start := time.Now() defer func() { metrics.ProcessingTime(time.Since(start)) @@ -374,14 +369,14 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first // Save values for later logging tx.EGPLog.L1GasPrice = tx.L1GasPrice tx.EGPLog.L2GasPrice = txL2GasPrice - tx.EGPLog.GasUsedFirst = tx.BatchResources.UsedZKCounters.GasUsed + tx.EGPLog.GasUsedFirst = tx.UsedZKCounters.GasUsed tx.EGPLog.GasPrice.Set(txGasPrice) // Calculate EffectiveGasPrice - egp, err := f.effectiveGasPrice.CalculateEffectiveGasPrice(tx.RawTx, txGasPrice, tx.BatchResources.UsedZKCounters.GasUsed, tx.L1GasPrice, txL2GasPrice) + egp, err := f.effectiveGasPrice.CalculateEffectiveGasPrice(tx.RawTx, txGasPrice, tx.UsedZKCounters.GasUsed, tx.L1GasPrice, txL2GasPrice) if err != nil { if f.effectiveGasPrice.IsEnabled() { - return nil, false, err + return nil, err } else { log.Warnf("effectiveGasPrice is disabled, but failed to calculate effectiveGasPrice for tx %s, error: %v", tx.HashStr, err) tx.EGPLog.Error = fmt.Sprintf("CalculateEffectiveGasPrice#1: %s", err) @@ -409,7 +404,7 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first egpPercentage, err := f.effectiveGasPrice.CalculateEffectiveGasPricePercentage(txGasPrice, tx.EffectiveGasPrice) if err != nil { if f.effectiveGasPrice.IsEnabled() { - return nil, false, err + return nil, err } else { log.Warnf("effectiveGasPrice is disabled, but failed to to calculate efftive gas price percentage (#1), error: %v", err) tx.EGPLog.Error = fmt.Sprintf("%s; CalculateEffectiveGasPricePercentage#1: %s", tx.EGPLog.Error, err) @@ -429,7 +424,7 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first effectivePercentageAsDecodedHex, err := hex.DecodeHex(fmt.Sprintf("%x", tx.EGPPercentage)) if err != nil { - return nil, false, err + return nil, err } batchRequest.Transactions = append(batchRequest.Transactions, effectivePercentageAsDecodedHex...) @@ -438,7 +433,7 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first if err != nil && (errors.Is(err, runtime.ErrExecutorDBError) || errors.Is(err, runtime.ErrInvalidTxChangeL2BlockMinTimestamp)) { log.Errorf("failed to process tx %s, error: %v", tx.HashStr, err) - return nil, false, err + return nil, err } else if err == nil && !batchResponse.IsRomLevelError && len(batchResponse.BlockResponses) == 0 { err = fmt.Errorf("executor returned no errors and no responses for tx %s", tx.HashStr) f.Halt(ctx, err, false) @@ -456,35 +451,35 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first } else { metrics.TxProcessed(metrics.TxProcessedLabelInvalid, 1) } - return nil, false, err + return nil, err } - closeBatch := false oldStateRoot := f.wipBatch.imStateRoot if len(batchResponse.BlockResponses) > 0 { - errWg, closeBatch, err = f.handleProcessTransactionResponse(ctx, tx, batchResponse, oldStateRoot) + errWg, err = f.handleProcessTransactionResponse(ctx, tx, batchResponse, oldStateRoot) if err != nil { - return errWg, false, err + return errWg, err } } // Update imStateRoot f.wipBatch.imStateRoot = batchResponse.NewStateRoot - log.Infof("processed tx %s, batchNumber: %d, l2Block: [%d], newStateRoot: %s, oldStateRoot: %s, used counters: %s", - tx.HashStr, batchRequest.BatchNumber, f.wipL2Block.trackingNum, batchResponse.NewStateRoot.String(), batchRequest.OldStateRoot.String(), f.logZKCounters(batchResponse.UsedZkCounters)) + log.Infof("processed tx %s, batchNumber: %d, l2Block: [%d], newStateRoot: %s, oldStateRoot: %s, used counters: %s, reserved counters: %s", + tx.HashStr, batchRequest.BatchNumber, f.wipL2Block.trackingNum, batchResponse.NewStateRoot.String(), batchRequest.OldStateRoot.String(), + f.logZKCounters(batchResponse.UsedZkCounters), f.logZKCounters(batchResponse.ReservedZkCounters)) - return nil, closeBatch, nil + return nil, nil } // handleProcessTransactionResponse handles the response of transaction processing. -func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *TxTracker, result *state.ProcessBatchResponse, oldStateRoot common.Hash) (errWg *sync.WaitGroup, closeWIPBatch bool, err error) { +func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *TxTracker, result *state.ProcessBatchResponse, oldStateRoot common.Hash) (errWg *sync.WaitGroup, err error) { // Handle Transaction Error errorCode := executor.RomErrorCode(result.BlockResponses[0].TransactionResponses[0].RomError) if !state.IsStateRootChanged(errorCode) { // If intrinsic error or OOC error, we skip adding the transaction to the batch errWg = f.handleProcessTransactionError(ctx, result, tx) - return errWg, false, result.BlockResponses[0].TransactionResponses[0].RomError + return errWg, result.BlockResponses[0].TransactionResponses[0].RomError } egpEnabled := f.effectiveGasPrice.IsEnabled() @@ -499,7 +494,7 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx if err != nil { if egpEnabled { log.Errorf("failed to calculate effective gas price with new gasUsed for tx %s, error: %v", tx.HashStr, err.Error()) - return nil, false, err + return nil, err } else { log.Warnf("effectiveGasPrice is disabled, but failed to calculate effective gas price with new gasUsed for tx %s, error: %v", tx.HashStr, err.Error()) tx.EGPLog.Error = fmt.Sprintf("%s; CalculateEffectiveGasPrice#2: %s", tx.EGPLog.Error, err) @@ -524,32 +519,29 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx } if errCompare != nil && egpEnabled { - return nil, false, errCompare + return nil, errCompare } } } - // Check remaining resources - - overflow, overflowResource := f.wipBatch.imRemainingResources.Sub(state.BatchResources{UsedZKCounters: result.UsedZkCounters, Bytes: uint64(len(tx.RawTx))}) - if overflow { - log.Infof("current tx %s exceeds the remaining batch resources, overflow resource: %s, updating metadata for tx in worker and continuing", tx.HashStr, overflowResource) - if !f.batchConstraints.IsWithinConstraints(result.UsedZkCounters) { - log.Warnf("current tx %s exceeds the max limit for batch resources (node OOC), setting tx as invalid in the pool", tx.HashStr) - - event := &event.Event{ - ReceivedAt: time.Now(), - Source: event.Source_Node, - Component: event.Component_Sequencer, - Level: event.Level_Error, - EventID: event.EventID_NodeOOC, - Description: fmt.Sprintf("tx: %s exceeds node max limit batch resources (node OOC), from: %s, IP: %s", tx.HashStr, tx.FromStr, tx.IP), - } + // Check if reserved resources of the tx fits in the remaining batch resources + subOverflow := false + fits, overflowResource := f.wipBatch.imRemainingResources.Fits(state.BatchResources{ZKCounters: result.ReservedZkCounters, Bytes: uint64(len(tx.RawTx))}) + if fits { + // Sustract the used resources from the batch + subOverflow, overflowResource = f.wipBatch.imRemainingResources.Sub(state.BatchResources{ZKCounters: result.UsedZkCounters, Bytes: uint64(len(tx.RawTx))}) + if subOverflow { // Sanity check, this cannot happen as reservedZKCounters should be >= that usedZKCounters + log.Infof("current tx %s used resources exceeds the remaining batch resources, overflow resource: %s, updating metadata for tx in worker and continuing. Batch counters: %s, tx used counters: %s", + tx.HashStr, overflowResource, f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(result.UsedZkCounters)) + } + } else { + log.Infof("current tx %s reserved resources exceeds the remaining batch resources, overflow resource: %s, updating metadata for tx in worker and continuing. Batch counters: %s, tx reserved counters: %s", + tx.HashStr, overflowResource, f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(result.ReservedZkCounters)) + if !f.batchConstraints.IsWithinConstraints(result.ReservedZkCounters) { + log.Warnf("current tx %s reserved resources exceeds the max limit for batch resources (node OOC), setting tx as invalid in the pool", tx.HashStr) - eventErr := f.eventLog.LogEvent(ctx, event) - if eventErr != nil { - log.Errorf("error storing finalizer halt event, error: %v", eventErr) - } + f.LogEvent(ctx, event.Level_Error, event.EventID_NodeOOC, + fmt.Sprintf("tx: %s exceeds node max limit batch resources (node OOC), from: %s, IP: %s", tx.HashStr, tx.FromStr, tx.IP), nil) // Delete the transaction from the txSorted list f.workerIntf.DeleteTx(tx.Hash, tx.From) @@ -559,16 +551,18 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx if err != nil { log.Errorf("failed to update status to invalid in the pool for tx %s, error: %v", tx.Hash.String(), err) } - - return nil, false, ErrBatchResourceUnderFlow - } else { - start := time.Now() - f.workerIntf.UpdateTxZKCounters(result.BlockResponses[0].TransactionResponses[0].TxHash, tx.From, result.UsedZkCounters) - metrics.WorkerProcessingTime(time.Since(start)) - return nil, false, ErrBatchResourceUnderFlow } } + // If reserved tx resources don't fit in the remaining batch resources (or we got an overflow when trying to subtract the used resources) + // we update the ZKCounters of the tx and returns ErrBatchResourceOverFlow error + if !fits || subOverflow { + start := time.Now() + f.workerIntf.UpdateTxZKCounters(result.BlockResponses[0].TransactionResponses[0].TxHash, tx.From, result.UsedZkCounters, result.ReservedZkCounters) + metrics.WorkerProcessingTime(time.Since(start)) + return nil, ErrBatchResourceOverFlow + } + // Save Enabled, GasPriceOC, BalanceOC and final effective gas price for later logging tx.EGPLog.Enabled = egpEnabled tx.EGPLog.GasPriceOC = result.BlockResponses[0].TransactionResponses[0].HasGaspriceOpcode @@ -581,10 +575,12 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx tx.EGPLog.GasPrice, tx.EGPLog.L1GasPrice, tx.EGPLog.L2GasPrice, tx.EGPLog.Reprocess, tx.EGPLog.GasPriceOC, tx.EGPLog.BalanceOC, egpEnabled, len(tx.RawTx), tx.HashStr, tx.EGPLog.Error) f.wipL2Block.addTx(tx) + f.wipBatch.countOfTxs++ + f.updateWorkerAfterSuccessfulProcessing(ctx, tx.Hash, tx.From, false, result) - return nil, false, nil + return nil, nil } // compareTxEffectiveGasPrice compares newEffectiveGasPrice with tx.EffectiveGasPrice. @@ -728,19 +724,8 @@ func (f *finalizer) handleProcessTransactionError(ctx context.Context, result *s // checkIfProverRestarted checks if the proverID changed func (f *finalizer) checkIfProverRestarted(proverID string) { if f.proverID != "" && f.proverID != proverID { - event := &event.Event{ - ReceivedAt: time.Now(), - Source: event.Source_Node, - Component: event.Component_Sequencer, - Level: event.Level_Critical, - EventID: event.EventID_FinalizerRestart, - Description: fmt.Sprintf("proverID changed from %s to %s, restarting sequencer to discard current WIP batch and work with new executor", f.proverID, proverID), - } - - err := f.eventLog.LogEvent(context.Background(), event) - if err != nil { - log.Errorf("error storing payload, error: %v", err) - } + f.LogEvent(context.Background(), event.Level_Critical, event.EventID_FinalizerRestart, + fmt.Sprintf("proverID changed from %s to %s, restarting sequencer to discard current WIP batch and work with new executor", f.proverID, proverID), nil) log.Fatal("proverID changed from %s to %s, restarting sequencer to discard current WIP batch and work with new executor") } @@ -757,19 +742,7 @@ func (f *finalizer) logZKCounters(counters state.ZKCounters) string { func (f *finalizer) Halt(ctx context.Context, err error, isFatal bool) { f.haltFinalizer.Store(true) - event := &event.Event{ - ReceivedAt: time.Now(), - Source: event.Source_Node, - Component: event.Component_Sequencer, - Level: event.Level_Critical, - EventID: event.EventID_FinalizerHalt, - Description: fmt.Sprintf("finalizer halted due to error, error: %s", err), - } - - eventErr := f.eventLog.LogEvent(ctx, event) - if eventErr != nil { - log.Errorf("error storing finalizer halt event, error: %v", eventErr) - } + f.LogEvent(ctx, event.Level_Critical, event.EventID_FinalizerHalt, fmt.Sprintf("finalizer halted due to error, error: %s", err), nil) if isFatal { log.Fatalf("fatal error on finalizer, error: %v", err) @@ -780,3 +753,24 @@ func (f *finalizer) Halt(ctx context.Context, err error, isFatal bool) { } } } + +// LogEvent adds an event for runtime debugging +func (f *finalizer) LogEvent(ctx context.Context, level event.Level, eventId event.EventID, description string, json interface{}) { + event := &event.Event{ + ReceivedAt: time.Now(), + Source: event.Source_Node, + Component: event.Component_Sequencer, + Level: level, + EventID: eventId, + Description: description, + } + + if json != nil { + event.Json = json + } + + eventErr := f.eventLog.LogEvent(ctx, event) + if eventErr != nil { + log.Errorf("error storing log event, error: %v", eventErr) + } +} diff --git a/sequencer/finalizer_test.go b/sequencer/finalizer_test.go index 278e8c50cd..e119919ffe 100644 --- a/sequencer/finalizer_test.go +++ b/sequencer/finalizer_test.go @@ -1077,8 +1077,8 @@ func TestFinalizer_checkRemainingResources(t *testing.T) { BlockResponses: []*state.ProcessBlockResponse{blockResponse}, } remainingResources := state.BatchResources{ - UsedZKCounters: state.ZKCounters{GasUsed: 9000}, - Bytes: 10000, + ZKCounters: state.ZKCounters{GasUsed: 9000}, + Bytes: 10000, } f.wipBatch.imRemainingResources = remainingResources testCases := []struct { @@ -1109,7 +1109,7 @@ func TestFinalizer_checkRemainingResources(t *testing.T) { { name: "ZkCounter Resource Exceeded", remaining: state.BatchResources{ - UsedZKCounters: state.ZKCounters{GasUsed: 0}, + ZKCounters: state.ZKCounters{GasUsed: 0}, }, overflow: true, overflowResource: "CumulativeGas", @@ -1128,7 +1128,7 @@ func TestFinalizer_checkRemainingResources(t *testing.T) { } // act - overflow, overflowResource := f.wipBatch.imRemainingResources.Sub(state.BatchResources{UsedZKCounters: result.UsedZkCounters, Bytes: uint64(len(tc.expectedTxTracker.RawTx))}) + overflow, overflowResource := f.wipBatch.imRemainingResources.Sub(state.BatchResources{ZKCounters: result.UsedZkCounters, Bytes: uint64(len(tc.expectedTxTracker.RawTx))}) // assert assert.Equal(t, tc.overflow, overflow) @@ -1906,7 +1906,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) { { name: "Is ready - MaxCumulativeGasUsed", modifyResourceFunc: func(resources state.BatchResources) state.BatchResources { - resources.UsedZKCounters.GasUsed = f.getConstraintThresholdUint64(bc.MaxCumulativeGasUsed) - 1 + resources.ZKCounters.GasUsed = f.getConstraintThresholdUint64(bc.MaxCumulativeGasUsed) - 1 return resources }, expectedResult: true, @@ -1914,7 +1914,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) { { name: "Is NOT ready - MaxCumulativeGasUsed", modifyResourceFunc: func(resources state.BatchResources) state.BatchResources { - resources.UsedZKCounters.GasUsed = f.getConstraintThresholdUint64(bc.MaxCumulativeGasUsed) + 1 + resources.ZKCounters.GasUsed = f.getConstraintThresholdUint64(bc.MaxCumulativeGasUsed) + 1 return resources }, expectedResult: false, @@ -1922,7 +1922,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) { { name: "Is ready - MaxSteps", modifyResourceFunc: func(resources state.BatchResources) state.BatchResources { - resources.UsedZKCounters.Steps = f.getConstraintThresholdUint32(bc.MaxSteps) - 1 + resources.ZKCounters.Steps = f.getConstraintThresholdUint32(bc.MaxSteps) - 1 return resources }, expectedResult: true, @@ -1930,7 +1930,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) { { name: "Is NOT ready - MaxSteps", modifyResourceFunc: func(resources state.BatchResources) state.BatchResources { - resources.UsedZKCounters.Steps = f.getConstraintThresholdUint32(bc.MaxSteps) + 1 + resources.ZKCounters.Steps = f.getConstraintThresholdUint32(bc.MaxSteps) + 1 return resources }, expectedResult: false, @@ -1938,7 +1938,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) { { name: "Is ready - MaxPoseidonPaddings", modifyResourceFunc: func(resources state.BatchResources) state.BatchResources { - resources.UsedZKCounters.PoseidonPaddings = f.getConstraintThresholdUint32(bc.MaxPoseidonPaddings) - 1 + resources.ZKCounters.PoseidonPaddings = f.getConstraintThresholdUint32(bc.MaxPoseidonPaddings) - 1 return resources }, expectedResult: true, @@ -1946,7 +1946,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) { { name: "Is NOT ready - MaxPoseidonPaddings", modifyResourceFunc: func(resources state.BatchResources) state.BatchResources { - resources.UsedZKCounters.PoseidonPaddings = f.getConstraintThresholdUint32(bc.MaxPoseidonPaddings) + 1 + resources.ZKCounters.PoseidonPaddings = f.getConstraintThresholdUint32(bc.MaxPoseidonPaddings) + 1 return resources }, expectedResult: false, @@ -1954,7 +1954,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) { { name: "Is ready - MaxBinaries", modifyResourceFunc: func(resources state.BatchResources) state.BatchResources { - resources.UsedZKCounters.Binaries = f.getConstraintThresholdUint32(bc.MaxBinaries) - 1 + resources.ZKCounters.Binaries = f.getConstraintThresholdUint32(bc.MaxBinaries) - 1 return resources }, expectedResult: true, @@ -1962,7 +1962,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) { { name: "Is NOT ready - MaxBinaries", modifyResourceFunc: func(resources state.BatchResources) state.BatchResources { - resources.UsedZKCounters.Binaries = f.getConstraintThresholdUint32(bc.MaxBinaries) + 1 + resources.ZKCounters.Binaries = f.getConstraintThresholdUint32(bc.MaxBinaries) + 1 return resources }, expectedResult: false, @@ -1970,7 +1970,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) { { name: "Is ready - MaxKeccakHashes", modifyResourceFunc: func(resources state.BatchResources) state.BatchResources { - resources.UsedZKCounters.KeccakHashes = f.getConstraintThresholdUint32(bc.MaxKeccakHashes) - 1 + resources.ZKCounters.KeccakHashes = f.getConstraintThresholdUint32(bc.MaxKeccakHashes) - 1 return resources }, expectedResult: true, @@ -1978,7 +1978,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) { { name: "Is NOT ready - MaxKeccakHashes", modifyResourceFunc: func(resources state.BatchResources) state.BatchResources { - resources.UsedZKCounters.KeccakHashes = f.getConstraintThresholdUint32(bc.MaxKeccakHashes) + 1 + resources.ZKCounters.KeccakHashes = f.getConstraintThresholdUint32(bc.MaxKeccakHashes) + 1 return resources }, expectedResult: false, @@ -1986,7 +1986,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) { { name: "Is ready - MaxArithmetics", modifyResourceFunc: func(resources state.BatchResources) state.BatchResources { - resources.UsedZKCounters.Arithmetics = f.getConstraintThresholdUint32(bc.MaxArithmetics) - 1 + resources.ZKCounters.Arithmetics = f.getConstraintThresholdUint32(bc.MaxArithmetics) - 1 return resources }, expectedResult: true, @@ -1994,7 +1994,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) { { name: "Is NOT ready - MaxArithmetics", modifyResourceFunc: func(resources state.BatchResources) state.BatchResources { - resources.UsedZKCounters.Arithmetics = f.getConstraintThresholdUint32(bc.MaxArithmetics) + 1 + resources.ZKCounters.Arithmetics = f.getConstraintThresholdUint32(bc.MaxArithmetics) + 1 return resources }, expectedResult: false, @@ -2002,7 +2002,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) { { name: "Is ready - MaxMemAligns", modifyResourceFunc: func(resources state.BatchResources) state.BatchResources { - resources.UsedZKCounters.MemAligns = f.getConstraintThresholdUint32(bc.MaxMemAligns) - 1 + resources.ZKCounters.MemAligns = f.getConstraintThresholdUint32(bc.MaxMemAligns) - 1 return resources }, expectedResult: true, @@ -2010,7 +2010,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) { { name: "Is NOT ready - MaxMemAligns", modifyResourceFunc: func(resources state.BatchResources) state.BatchResources { - resources.UsedZKCounters.MemAligns = f.getConstraintThresholdUint32(bc.MaxMemAligns) + 1 + resources.ZKCounters.MemAligns = f.getConstraintThresholdUint32(bc.MaxMemAligns) + 1 return resources }, expectedResult: false, @@ -2018,7 +2018,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) { { name: "Is ready - MaxSHA256Hashes", modifyResourceFunc: func(resources state.BatchResources) state.BatchResources { - resources.UsedZKCounters.Sha256Hashes_V2 = f.getConstraintThresholdUint32(bc.MaxSHA256Hashes) - 1 + resources.ZKCounters.Sha256Hashes_V2 = f.getConstraintThresholdUint32(bc.MaxSHA256Hashes) - 1 return resources }, expectedResult: true, @@ -2026,7 +2026,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) { { name: "Is NOT ready - MaxSHA256Hashes", modifyResourceFunc: func(resources state.BatchResources) state.BatchResources { - resources.UsedZKCounters.Sha256Hashes_V2 = f.getConstraintThresholdUint32(bc.MaxSHA256Hashes) + 1 + resources.ZKCounters.Sha256Hashes_V2 = f.getConstraintThresholdUint32(bc.MaxSHA256Hashes) + 1 return resources }, expectedResult: false, @@ -2101,15 +2101,15 @@ func TestFinalizer_getRemainingResources(t *testing.T) { remainingResources := getMaxRemainingResources(bc) // assert - assert.Equal(t, remainingResources.UsedZKCounters.GasUsed, bc.MaxCumulativeGasUsed) - assert.Equal(t, remainingResources.UsedZKCounters.KeccakHashes, bc.MaxKeccakHashes) - assert.Equal(t, remainingResources.UsedZKCounters.PoseidonHashes, bc.MaxPoseidonHashes) - assert.Equal(t, remainingResources.UsedZKCounters.PoseidonPaddings, bc.MaxPoseidonPaddings) - assert.Equal(t, remainingResources.UsedZKCounters.MemAligns, bc.MaxMemAligns) - assert.Equal(t, remainingResources.UsedZKCounters.Arithmetics, bc.MaxArithmetics) - assert.Equal(t, remainingResources.UsedZKCounters.Binaries, bc.MaxBinaries) - assert.Equal(t, remainingResources.UsedZKCounters.Steps, bc.MaxSteps) - assert.Equal(t, remainingResources.UsedZKCounters.Sha256Hashes_V2, bc.MaxSHA256Hashes) + assert.Equal(t, remainingResources.ZKCounters.GasUsed, bc.MaxCumulativeGasUsed) + assert.Equal(t, remainingResources.ZKCounters.KeccakHashes, bc.MaxKeccakHashes) + assert.Equal(t, remainingResources.ZKCounters.PoseidonHashes, bc.MaxPoseidonHashes) + assert.Equal(t, remainingResources.ZKCounters.PoseidonPaddings, bc.MaxPoseidonPaddings) + assert.Equal(t, remainingResources.ZKCounters.MemAligns, bc.MaxMemAligns) + assert.Equal(t, remainingResources.ZKCounters.Arithmetics, bc.MaxArithmetics) + assert.Equal(t, remainingResources.ZKCounters.Binaries, bc.MaxBinaries) + assert.Equal(t, remainingResources.ZKCounters.Steps, bc.MaxSteps) + assert.Equal(t, remainingResources.ZKCounters.Sha256Hashes_V2, bc.MaxSHA256Hashes) assert.Equal(t, remainingResources.Bytes, bc.MaxBatchBytesSize) } diff --git a/sequencer/forcedbatch.go b/sequencer/forcedbatch.go index d27485377b..4f035ec708 100644 --- a/sequencer/forcedbatch.go +++ b/sequencer/forcedbatch.go @@ -121,8 +121,8 @@ func (f *finalizer) processForcedBatch(ctx context.Context, forcedBatch state.Fo LocalExitRoot: batchResponse.NewLocalExitRoot, BatchL2Data: forcedBatch.RawTxsData, BatchResources: state.BatchResources{ - UsedZKCounters: batchResponse.UsedZkCounters, - Bytes: uint64(len(forcedBatch.RawTxsData)), + ZKCounters: batchResponse.UsedZkCounters, + Bytes: uint64(len(forcedBatch.RawTxsData)), }, ClosingReason: state.ForcedBatchClosingReason, } diff --git a/sequencer/interfaces.go b/sequencer/interfaces.go index 9008fbef09..41dca25096 100644 --- a/sequencer/interfaces.go +++ b/sequencer/interfaces.go @@ -82,13 +82,13 @@ type stateInterface interface { type workerInterface interface { GetBestFittingTx(resources state.BatchResources) (*TxTracker, error) UpdateAfterSingleSuccessfulTxExecution(from common.Address, touchedAddresses map[common.Address]*state.InfoReadWrite) []*TxTracker - UpdateTxZKCounters(txHash common.Hash, from common.Address, ZKCounters state.ZKCounters) + UpdateTxZKCounters(txHash common.Hash, from common.Address, usedZKCounters state.ZKCounters, reservedZKCounters state.ZKCounters) AddTxTracker(ctx context.Context, txTracker *TxTracker) (replacedTx *TxTracker, dropReason error) MoveTxToNotReady(txHash common.Hash, from common.Address, actualNonce *uint64, actualBalance *big.Int) []*TxTracker DeleteTx(txHash common.Hash, from common.Address) AddPendingTxToStore(txHash common.Hash, addr common.Address) DeletePendingTxToStore(txHash common.Hash, addr common.Address) - NewTxTracker(tx types.Transaction, counters state.ZKCounters, ip string) (*TxTracker, error) + NewTxTracker(tx types.Transaction, usedZKcounters state.ZKCounters, reservedZKCouners state.ZKCounters, ip string) (*TxTracker, error) AddForcedTx(txHash common.Hash, addr common.Address) DeleteForcedTx(txHash common.Hash, addr common.Address) } diff --git a/sequencer/l2block.go b/sequencer/l2block.go index b9ed8d0b6c..8c81932fd2 100644 --- a/sequencer/l2block.go +++ b/sequencer/l2block.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/0xPolygonHermez/zkevm-node/event" "github.com/0xPolygonHermez/zkevm-node/hex" "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/pool" @@ -23,7 +24,9 @@ type L2Block struct { imStateRoot common.Hash l1InfoTreeExitRoot state.L1InfoTreeExitRootStorageEntry l1InfoTreeExitRootChanged bool - usedResources state.BatchResources + bytes uint64 + usedZKCounters state.ZKCounters + reservedZKCounters state.ZKCounters transactions []*TxTracker batchResponse *state.ProcessBatchResponse } @@ -109,6 +112,7 @@ func (f *finalizer) processPendingL2Blocks(ctx context.Context) { } err := f.processL2Block(ctx, l2Block) + f.dumpL2Block(l2Block) if err != nil { // Dump L2Block info @@ -139,6 +143,7 @@ func (f *finalizer) storePendingL2Blocks(ctx context.Context) { } err := f.storeL2Block(ctx, l2Block) + f.dumpL2Block(l2Block) if err != nil { // Dump L2Block info @@ -197,10 +202,20 @@ func (f *finalizer) processL2Block(ctx context.Context, l2Block *L2Block) error l2Block.batchResponse = batchResponse // Update finalRemainingResources of the batch - overflow, overflowResource := f.wipBatch.finalRemainingResources.Sub(state.BatchResources{UsedZKCounters: batchResponse.UsedZkCounters, Bytes: batchL2DataSize}) - if overflow { - return fmt.Errorf("error sustracting L2 block %d [%d] resources from the batch %d, overflow resource: %s, batch remaining counters: %s, L2Block used counters: %s, batch remaining bytes: %d, L2Block used bytes: %d", - blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, overflowResource, f.logZKCounters(f.wipBatch.finalRemainingResources.UsedZKCounters), f.logZKCounters(batchResponse.UsedZkCounters), f.wipBatch.finalRemainingResources.Bytes, batchL2DataSize) + fits, overflowResource := f.wipBatch.finalRemainingResources.Fits(state.BatchResources{ZKCounters: batchResponse.ReservedZkCounters, Bytes: batchL2DataSize}) + if fits { + subOverflow, overflowResource := f.wipBatch.finalRemainingResources.Sub(state.BatchResources{ZKCounters: batchResponse.UsedZkCounters, Bytes: batchL2DataSize}) + if subOverflow { // Sanity check, this cannot happen as reservedZKCounters should be >= that usedZKCounters + return fmt.Errorf("error sustracting L2 block %d [%d] used resources from the batch %d, overflow resource: %s, batch counters: %s, L2 block used counters: %s, batch bytes: %d, L2 block bytes: %d", + blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, overflowResource, f.logZKCounters(f.wipBatch.finalRemainingResources.ZKCounters), f.logZKCounters(batchResponse.UsedZkCounters), f.wipBatch.finalRemainingResources.Bytes, batchL2DataSize) + } + } else { + overflowLog := fmt.Sprintf("L2 block %d [%d] reserved resources exceeds the remaining batch %d resources, overflow resource: %s, batch counters: %s, L2 block reserved counters: %s, batch bytes: %d, L2 block bytes: %d", + blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, overflowResource, f.logZKCounters(f.wipBatch.finalRemainingResources.ZKCounters), f.logZKCounters(batchResponse.ReservedZkCounters), f.wipBatch.finalRemainingResources.Bytes, batchL2DataSize) + + log.Warnf(overflowLog) + + f.LogEvent(ctx, event.Level_Warning, event.EventID_ReservedZKCountersOverflow, overflowLog, nil) } // Update finalStateRoot of the batch to the newStateRoot for the L2 block @@ -212,9 +227,9 @@ func (f *finalizer) processL2Block(ctx context.Context, l2Block *L2Block) error endProcessing := time.Now() - log.Infof("processed L2 block %d [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, initialStateRoot: %s, newStateRoot: %s, txs: %d/%d, blockHash: %s, infoRoot: %s, time: %v, used counters: %s", - blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, l2Block.l1InfoTreeExitRootChanged, initialStateRoot, - l2Block.batchResponse.NewStateRoot, len(l2Block.transactions), len(blockResponse.TransactionResponses), blockResponse.BlockHash, blockResponse.BlockInfoRoot, endProcessing.Sub(startProcessing), f.logZKCounters(batchResponse.UsedZkCounters)) + log.Infof("processed L2 block %d [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, initialStateRoot: %s, newStateRoot: %s, txs: %d/%d, blockHash: %s, infoRoot: %s, time: %v, used counters: %s, reserved counters: %s", + blockResponse.BlockNumber, l2Block.trackingNum, f.wipBatch.batchNumber, l2Block.deltaTimestamp, l2Block.timestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, l2Block.l1InfoTreeExitRootChanged, initialStateRoot, l2Block.batchResponse.NewStateRoot, + len(l2Block.transactions), len(blockResponse.TransactionResponses), blockResponse.BlockHash, blockResponse.BlockInfoRoot, endProcessing.Sub(startProcessing), f.logZKCounters(batchResponse.UsedZkCounters), f.logZKCounters(batchResponse.ReservedZkCounters)) return nil } @@ -360,7 +375,7 @@ func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error { } batch.BatchL2Data = append(batch.BatchL2Data, blockL2Data...) - batch.Resources.SumUp(state.BatchResources{UsedZKCounters: l2Block.batchResponse.UsedZkCounters, Bytes: uint64(len(blockL2Data))}) + batch.Resources.SumUp(state.BatchResources{ZKCounters: l2Block.batchResponse.UsedZkCounters, Bytes: uint64(len(blockL2Data))}) receipt := state.ProcessingReceipt{ BatchNumber: f.wipBatch.batchNumber, @@ -504,25 +519,39 @@ func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp uint64, f.wipL2Block.imStateRoot = batchResponse.NewStateRoot f.wipBatch.imStateRoot = f.wipL2Block.imStateRoot - // Save and sustract the resources used by the new WIP L2 block from the wip batch + // Save the resources used/reserved and subtract the ZKCounters reserved by the new WIP L2 block from the WIP batch // We need to increase the poseidon hashes to reserve in the batch the hashes needed to write the L1InfoRoot when processing the final L2 Block (SkipWriteBlockInfoRoot_V2=false) - f.wipL2Block.usedResources.UsedZKCounters = batchResponse.UsedZkCounters - f.wipL2Block.usedResources.UsedZKCounters.PoseidonHashes = (batchResponse.UsedZkCounters.PoseidonHashes * 2) + 2 // nolint:gomnd - f.wipL2Block.usedResources.Bytes = changeL2BlockSize - - overflow, overflowResource := f.wipBatch.imRemainingResources.Sub(f.wipL2Block.usedResources) - if overflow { - log.Infof("new WIP L2 block [%d] exceeds the remaining resources from the batch %d, overflow resource: %s, closing WIP batch and creating new one", - f.wipL2Block.trackingNum, f.wipBatch.batchNumber, overflowResource) + f.wipL2Block.usedZKCounters = batchResponse.UsedZkCounters + f.wipL2Block.usedZKCounters.PoseidonHashes = (batchResponse.UsedZkCounters.PoseidonHashes * 2) + 2 // nolint:gomnd + f.wipL2Block.reservedZKCounters = batchResponse.ReservedZkCounters + f.wipL2Block.reservedZKCounters.PoseidonHashes = (batchResponse.ReservedZkCounters.PoseidonHashes * 2) + 2 // nolint:gomnd + f.wipL2Block.bytes = changeL2BlockSize + + subOverflow := false + fits, overflowResource := f.wipBatch.imRemainingResources.Fits(state.BatchResources{ZKCounters: f.wipL2Block.reservedZKCounters, Bytes: f.wipL2Block.bytes}) + if fits { + subOverflow, overflowResource = f.wipBatch.imRemainingResources.Sub(state.BatchResources{ZKCounters: f.wipL2Block.usedZKCounters, Bytes: f.wipL2Block.bytes}) + if subOverflow { // Sanity check, this cannot happen as reservedZKCounters should be >= that usedZKCounters + log.Infof("new WIP L2 block [%d] used resources exceeds the remaining batch resources, overflow resource: %s, closing WIP batch and creating new one. Batch counters: %s, L2 block used counters: %s", + f.wipL2Block.trackingNum, overflowResource, f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(f.wipL2Block.usedZKCounters)) + } + } else { + log.Infof("new WIP L2 block [%d] reserved resources exceeds the remaining batch resources, overflow resource: %s, closing WIP batch and creating new one. Batch counters: %s, L2 block reserved counters: %s", + f.wipL2Block.trackingNum, overflowResource, f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(f.wipL2Block.reservedZKCounters)) + } + + // If reserved WIP L2 block resources don't fit in the remaining batch resources (or we got an overflow when trying to subtract the used resources) + // we close the WIP batch and we create a new one + if !fits || subOverflow { err := f.closeAndOpenNewWIPBatch(ctx, state.ResourceExhaustedClosingReason) if err != nil { f.Halt(ctx, fmt.Errorf("failed to create new WIP batch [%d], error: %v", f.wipL2Block.trackingNum, err), true) } } - log.Infof("created new WIP L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, oldStateRoot: %s, imStateRoot: %s, used counters: %s", + log.Infof("created new WIP L2 block [%d], batch: %d, deltaTimestamp: %d, timestamp: %d, l1InfoTreeIndex: %d, l1InfoTreeIndexChanged: %v, oldStateRoot: %s, imStateRoot: %s, used counters: %s, reserved counters: %s", f.wipL2Block.trackingNum, f.wipBatch.batchNumber, f.wipL2Block.deltaTimestamp, f.wipL2Block.timestamp, f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, - f.wipL2Block.l1InfoTreeExitRootChanged, oldIMStateRoot, f.wipL2Block.imStateRoot, f.logZKCounters(f.wipL2Block.usedResources.UsedZKCounters)) + f.wipL2Block.l1InfoTreeExitRootChanged, oldIMStateRoot, f.wipL2Block.imStateRoot, f.logZKCounters(f.wipL2Block.usedZKCounters), f.logZKCounters(f.wipL2Block.reservedZKCounters)) } // executeNewWIPL2Block executes an empty L2 Block in the executor and returns the batch response from the executor @@ -579,16 +608,25 @@ func (f *finalizer) dumpL2Block(l2Block *L2Block) { } } - txsLog := "" + sLog := "" + for i, tx := range l2Block.transactions { + sLog += fmt.Sprintf(" tx[%d] hash: %s, from: %s, nonce: %d, gas: %d, gasPrice: %d, bytes: %d, egpPct: %d, used counters: %s, reserved counters: %s\n", + i, tx.HashStr, tx.FromStr, tx.Nonce, tx.Gas, tx.GasPrice, tx.Bytes, tx.EGPPercentage, f.logZKCounters(tx.UsedZKCounters), f.logZKCounters(tx.ReservedZKCounters)) + } + log.Infof("DUMP L2 block [%d], timestamp: %d, deltaTimestamp: %d, imStateRoot: %s, l1InfoTreeIndex: %d, bytes: %d, used counters: %s, reserved counters: %s\n%s", + l2Block.trackingNum, l2Block.timestamp, l2Block.deltaTimestamp, l2Block.imStateRoot, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex, l2Block.bytes, + f.logZKCounters(l2Block.usedZKCounters), f.logZKCounters(l2Block.reservedZKCounters), sLog) + + sLog = "" if blockResp != nil { for i, txResp := range blockResp.TransactionResponses { - txsLog += fmt.Sprintf(" tx[%d] Hash: %s, HashL2: %s, StateRoot: %s, Type: %d, GasLeft: %d, GasUsed: %d, GasRefund: %d, CreateAddress: %s, ChangesStateRoot: %v, EGP: %s, EGPPct: %d, HasGaspriceOpcode: %v, HasBalanceOpcode: %v\n", + sLog += fmt.Sprintf(" tx[%d] hash: %s, hashL2: %s, stateRoot: %s, type: %d, gasLeft: %d, gasUsed: %d, gasRefund: %d, createAddress: %s, changesStateRoot: %v, egp: %s, egpPct: %d, hasGaspriceOpcode: %v, hasBalanceOpcode: %v\n", i, txResp.TxHash, txResp.TxHashL2_V2, txResp.StateRoot, txResp.Type, txResp.GasLeft, txResp.GasUsed, txResp.GasRefunded, txResp.CreateAddress, txResp.ChangesStateRoot, txResp.EffectiveGasPrice, txResp.EffectivePercentage, txResp.HasGaspriceOpcode, txResp.HasBalanceOpcode) } - log.Infof("DUMP L2 block %d [%d], Timestamp: %d, ParentHash: %s, Coinbase: %s, GER: %s, BlockHashL1: %s, GasUsed: %d, BlockInfoRoot: %s, BlockHash: %s\n%s", + log.Infof("DUMP L2 block %d [%d] response, timestamp: %d, parentHash: %s, coinbase: %s, ger: %s, blockHashL1: %s, gasUsed: %d, blockInfoRoot: %s, blockHash: %s, used counters: %s, reserved counters: %s\n%s", blockResp.BlockNumber, l2Block.trackingNum, blockResp.Timestamp, blockResp.ParentHash, blockResp.Coinbase, blockResp.GlobalExitRoot, blockResp.BlockHashL1, - blockResp.GasUsed, blockResp.BlockInfoRoot, blockResp.BlockHash, txsLog) + blockResp.GasUsed, blockResp.BlockInfoRoot, blockResp.BlockHash, f.logZKCounters(l2Block.batchResponse.UsedZkCounters), f.logZKCounters(l2Block.batchResponse.ReservedZkCounters), sLog) } } diff --git a/sequencer/mock_worker.go b/sequencer/mock_worker.go index 628037dfd2..215cd08c8e 100644 --- a/sequencer/mock_worker.go +++ b/sequencer/mock_worker.go @@ -125,9 +125,9 @@ func (_m *WorkerMock) MoveTxToNotReady(txHash common.Hash, from common.Address, return r0 } -// NewTxTracker provides a mock function with given fields: tx, counters, ip -func (_m *WorkerMock) NewTxTracker(tx types.Transaction, counters state.ZKCounters, ip string) (*TxTracker, error) { - ret := _m.Called(tx, counters, ip) +// NewTxTracker provides a mock function with given fields: tx, usedZKcounters, reservedZKCouners, ip +func (_m *WorkerMock) NewTxTracker(tx types.Transaction, usedZKcounters state.ZKCounters, reservedZKCouners state.ZKCounters, ip string) (*TxTracker, error) { + ret := _m.Called(tx, usedZKcounters, reservedZKCouners, ip) if len(ret) == 0 { panic("no return value specified for NewTxTracker") @@ -135,19 +135,19 @@ func (_m *WorkerMock) NewTxTracker(tx types.Transaction, counters state.ZKCounte var r0 *TxTracker var r1 error - if rf, ok := ret.Get(0).(func(types.Transaction, state.ZKCounters, string) (*TxTracker, error)); ok { - return rf(tx, counters, ip) + if rf, ok := ret.Get(0).(func(types.Transaction, state.ZKCounters, state.ZKCounters, string) (*TxTracker, error)); ok { + return rf(tx, usedZKcounters, reservedZKCouners, ip) } - if rf, ok := ret.Get(0).(func(types.Transaction, state.ZKCounters, string) *TxTracker); ok { - r0 = rf(tx, counters, ip) + if rf, ok := ret.Get(0).(func(types.Transaction, state.ZKCounters, state.ZKCounters, string) *TxTracker); ok { + r0 = rf(tx, usedZKcounters, reservedZKCouners, ip) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*TxTracker) } } - if rf, ok := ret.Get(1).(func(types.Transaction, state.ZKCounters, string) error); ok { - r1 = rf(tx, counters, ip) + if rf, ok := ret.Get(1).(func(types.Transaction, state.ZKCounters, state.ZKCounters, string) error); ok { + r1 = rf(tx, usedZKcounters, reservedZKCouners, ip) } else { r1 = ret.Error(1) } @@ -175,9 +175,9 @@ func (_m *WorkerMock) UpdateAfterSingleSuccessfulTxExecution(from common.Address return r0 } -// UpdateTxZKCounters provides a mock function with given fields: txHash, from, ZKCounters -func (_m *WorkerMock) UpdateTxZKCounters(txHash common.Hash, from common.Address, ZKCounters state.ZKCounters) { - _m.Called(txHash, from, ZKCounters) +// UpdateTxZKCounters provides a mock function with given fields: txHash, from, usedZKCounters, reservedZKCounters +func (_m *WorkerMock) UpdateTxZKCounters(txHash common.Hash, from common.Address, usedZKCounters state.ZKCounters, reservedZKCounters state.ZKCounters) { + _m.Called(txHash, from, usedZKCounters, reservedZKCounters) } // NewWorkerMock creates a new instance of WorkerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. diff --git a/sequencer/sequencer.go b/sequencer/sequencer.go index 95b9794dcd..d53dab0c7d 100644 --- a/sequencer/sequencer.go +++ b/sequencer/sequencer.go @@ -211,7 +211,7 @@ func (s *Sequencer) loadFromPool(ctx context.Context) { } func (s *Sequencer) addTxToWorker(ctx context.Context, tx pool.Transaction) error { - txTracker, err := s.worker.NewTxTracker(tx.Transaction, tx.ZKCounters, tx.IP) + txTracker, err := s.worker.NewTxTracker(tx.Transaction, tx.ZKCounters, tx.ReservedZKCounters, tx.IP) if err != nil { return err } diff --git a/sequencer/txtracker.go b/sequencer/txtracker.go index 4f85ef947f..bf280d036e 100644 --- a/sequencer/txtracker.go +++ b/sequencer/txtracker.go @@ -11,29 +11,31 @@ import ( // TxTracker is a struct that contains all the tx data needed to be managed by the worker type TxTracker struct { - Hash common.Hash - HashStr string - From common.Address - FromStr string - Nonce uint64 - Gas uint64 // To check if it fits into a batch - GasPrice *big.Int - Cost *big.Int // Cost = Amount + Benefit - BatchResources state.BatchResources // To check if it fits into a batch - RawTx []byte - ReceivedAt time.Time // To check if it has been in the txSortedList for too long - IP string // IP of the tx sender - FailedReason *string // FailedReason is the reason why the tx failed, if it failed - EffectiveGasPrice *big.Int - EGPPercentage byte - IsLastExecution bool - EGPLog state.EffectiveGasPriceLog - L1GasPrice uint64 - L2GasPrice uint64 + Hash common.Hash + HashStr string + From common.Address + FromStr string + Nonce uint64 + Gas uint64 // To check if it fits into a batch + GasPrice *big.Int + Cost *big.Int // Cost = Amount + Benefit + Bytes uint64 + UsedZKCounters state.ZKCounters + ReservedZKCounters state.ZKCounters + RawTx []byte + ReceivedAt time.Time // To check if it has been in the txSortedList for too long + IP string // IP of the tx sender + FailedReason *string // FailedReason is the reason why the tx failed, if it failed + EffectiveGasPrice *big.Int + EGPPercentage byte + IsLastExecution bool + EGPLog state.EffectiveGasPriceLog + L1GasPrice uint64 + L2GasPrice uint64 } // newTxTracker creates and inti a TxTracker -func newTxTracker(tx types.Transaction, counters state.ZKCounters, ip string) (*TxTracker, error) { +func newTxTracker(tx types.Transaction, usedZKCounters state.ZKCounters, reservedZKCounters state.ZKCounters, ip string) (*TxTracker, error) { addr, err := state.GetSender(tx) if err != nil { return nil, err @@ -45,22 +47,21 @@ func newTxTracker(tx types.Transaction, counters state.ZKCounters, ip string) (* } txTracker := &TxTracker{ - Hash: tx.Hash(), - HashStr: tx.Hash().String(), - From: addr, - FromStr: addr.String(), - Nonce: tx.Nonce(), - Gas: tx.Gas(), - GasPrice: tx.GasPrice(), - Cost: tx.Cost(), - BatchResources: state.BatchResources{ - Bytes: uint64(len(rawTx)) + state.EfficiencyPercentageByteLength, - UsedZKCounters: counters, - }, - RawTx: rawTx, - ReceivedAt: time.Now(), - IP: ip, - EffectiveGasPrice: new(big.Int).SetUint64(0), + Hash: tx.Hash(), + HashStr: tx.Hash().String(), + From: addr, + FromStr: addr.String(), + Nonce: tx.Nonce(), + Gas: tx.Gas(), + GasPrice: tx.GasPrice(), + Cost: tx.Cost(), + Bytes: uint64(len(rawTx)) + state.EfficiencyPercentageByteLength, + UsedZKCounters: usedZKCounters, + ReservedZKCounters: reservedZKCounters, + RawTx: rawTx, + ReceivedAt: time.Now(), + IP: ip, + EffectiveGasPrice: new(big.Int).SetUint64(0), EGPLog: state.EffectiveGasPriceLog{ ValueFinal: new(big.Int).SetUint64(0), ValueFirst: new(big.Int).SetUint64(0), @@ -74,7 +75,8 @@ func newTxTracker(tx types.Transaction, counters state.ZKCounters, ip string) (* return txTracker, nil } -// updateZKCounters updates the counters of the tx -func (tx *TxTracker) updateZKCounters(counters state.ZKCounters) { - tx.BatchResources.UsedZKCounters = counters +// updateZKCounters updates the used and reserved ZKCounters of the tx +func (tx *TxTracker) updateZKCounters(usedZKCounters state.ZKCounters, reservedZKCounters state.ZKCounters) { + tx.UsedZKCounters = usedZKCounters + tx.ReservedZKCounters = reservedZKCounters } diff --git a/sequencer/worker.go b/sequencer/worker.go index 919fe50132..d068788bf5 100644 --- a/sequencer/worker.go +++ b/sequencer/worker.go @@ -37,8 +37,8 @@ func NewWorker(state stateInterface, constraints state.BatchConstraintsCfg) *Wor } // NewTxTracker creates and inits a TxTracker -func (w *Worker) NewTxTracker(tx types.Transaction, counters state.ZKCounters, ip string) (*TxTracker, error) { - return newTxTracker(tx, counters, ip) +func (w *Worker) NewTxTracker(tx types.Transaction, usedZKCounters state.ZKCounters, reservedZKCounters state.ZKCounters, ip string) (*TxTracker, error) { + return newTxTracker(tx, usedZKCounters, reservedZKCounters, ip) } // AddTxTracker adds a new Tx to the Worker @@ -51,8 +51,8 @@ func (w *Worker) AddTxTracker(ctx context.Context, tx *TxTracker) (replacedTx *T return nil, pool.ErrInvalidIP } - // Make sure the transaction's batch resources are within the constraints. - if !w.batchConstraints.IsWithinConstraints(tx.BatchResources.UsedZKCounters) { + // Make sure the transaction's reserved ZKCounters are within the constraints. + if !w.batchConstraints.IsWithinConstraints(tx.ReservedZKCounters) { log.Errorf("outOfCounters error (node level) for tx %s", tx.Hash.String()) w.workerMutex.Unlock() return nil, pool.ErrOutOfCounters @@ -219,25 +219,26 @@ func (w *Worker) DeleteForcedTx(txHash common.Hash, addr common.Address) { } // UpdateTxZKCounters updates the ZKCounter of a tx -func (w *Worker) UpdateTxZKCounters(txHash common.Hash, addr common.Address, counters state.ZKCounters) { +func (w *Worker) UpdateTxZKCounters(txHash common.Hash, addr common.Address, usedZKCounters state.ZKCounters, reservedZKCounters state.ZKCounters) { w.workerMutex.Lock() defer w.workerMutex.Unlock() log.Infof("update ZK counters for tx %s addr %s", txHash.String(), addr.String()) - log.Debugf("counters.CumulativeGasUsed: %d", counters.GasUsed) - log.Debugf("counters.UsedKeccakHashes: %d", counters.KeccakHashes) - log.Debugf("counters.UsedPoseidonHashes: %d", counters.PoseidonHashes) - log.Debugf("counters.UsedPoseidonPaddings: %d", counters.PoseidonPaddings) - log.Debugf("counters.UsedMemAligns: %d", counters.MemAligns) - log.Debugf("counters.UsedArithmetics: %d", counters.Arithmetics) - log.Debugf("counters.UsedBinaries: %d", counters.Binaries) - log.Debugf("counters.UsedSteps: %d", counters.Steps) - log.Debugf("counters.UsedSha256Hashes_V2: %d", counters.Sha256Hashes_V2) + // TODO: log in a single line, log also reserved resources + log.Debugf("counters.CumulativeGasUsed: %d", usedZKCounters.GasUsed) + log.Debugf("counters.UsedKeccakHashes: %d", usedZKCounters.KeccakHashes) + log.Debugf("counters.UsedPoseidonHashes: %d", usedZKCounters.PoseidonHashes) + log.Debugf("counters.UsedPoseidonPaddings: %d", usedZKCounters.PoseidonPaddings) + log.Debugf("counters.UsedMemAligns: %d", usedZKCounters.MemAligns) + log.Debugf("counters.UsedArithmetics: %d", usedZKCounters.Arithmetics) + log.Debugf("counters.UsedBinaries: %d", usedZKCounters.Binaries) + log.Debugf("counters.UsedSteps: %d", usedZKCounters.Steps) + log.Debugf("counters.UsedSha256Hashes_V2: %d", usedZKCounters.Sha256Hashes_V2) addrQueue, found := w.pool[addr.String()] if found { - addrQueue.UpdateTxZKCounters(txHash, counters) + addrQueue.UpdateTxZKCounters(txHash, usedZKCounters, reservedZKCounters) } else { log.Warnf("addrQueue %s not found", addr.String()) } @@ -318,7 +319,7 @@ func (w *Worker) GetBestFittingTx(resources state.BatchResources) (*TxTracker, e foundMutex.RUnlock() txCandidate := w.txSortedList.getByIndex(i) - overflow, _ := bresources.Sub(txCandidate.BatchResources) + overflow, _ := bresources.Sub(state.BatchResources{ZKCounters: txCandidate.ReservedZKCounters, Bytes: txCandidate.Bytes}) if overflow { // We don't add this Tx continue diff --git a/sequencer/worker_test.go b/sequencer/worker_test.go index 0acbe9c798..688576fa7b 100644 --- a/sequencer/worker_test.go +++ b/sequencer/worker_test.go @@ -37,7 +37,7 @@ type workerAddTxTestCase struct { txHash common.Hash nonce uint64 cost *big.Int - counters state.ZKCounters + reservedZKCounters state.ZKCounters usedBytes uint64 gasPrice *big.Int expectedTxSortedList []common.Hash @@ -62,9 +62,9 @@ func processWorkerAddTxTestCases(ctx context.Context, t *testing.T, worker *Work tx.FromStr = testCase.from.String() tx.Nonce = testCase.nonce tx.Cost = testCase.cost - tx.BatchResources.Bytes = testCase.usedBytes + tx.Bytes = testCase.usedBytes tx.GasPrice = testCase.gasPrice - tx.updateZKCounters(testCase.counters) + tx.updateZKCounters(testCase.reservedZKCounters, testCase.reservedZKCounters) if testCase.ip == "" { // A random valid IP Address tx.IP = validIP @@ -118,7 +118,6 @@ func TestWorkerAddTx(t *testing.T) { { name: "Adding from:0x01, tx:0x01/gp:10", from: common.Address{1}, txHash: common.Hash{1}, nonce: 1, gasPrice: new(big.Int).SetInt64(10), cost: new(big.Int).SetInt64(5), - counters: state.ZKCounters{GasUsed: 1, KeccakHashes: 1, PoseidonHashes: 1, PoseidonPaddings: 1, MemAligns: 1, Arithmetics: 1, Binaries: 1, Steps: 1, Sha256Hashes_V2: 1}, usedBytes: 1, expectedTxSortedList: []common.Hash{ {1}, @@ -127,25 +126,22 @@ func TestWorkerAddTx(t *testing.T) { { name: "Adding from:0x02, tx:0x02/gp:4", from: common.Address{2}, txHash: common.Hash{2}, nonce: 1, gasPrice: new(big.Int).SetInt64(4), cost: new(big.Int).SetInt64(5), - counters: state.ZKCounters{GasUsed: 1, KeccakHashes: 1, PoseidonHashes: 1, PoseidonPaddings: 1, MemAligns: 1, Arithmetics: 1, Binaries: 1, Steps: 1, Sha256Hashes_V2: 1}, usedBytes: 1, expectedTxSortedList: []common.Hash{ {1}, {2}, }, }, { - name: "Readding from:0x02, tx:0x02/gp:20", from: common.Address{2}, txHash: common.Hash{2}, nonce: 1, gasPrice: new(big.Int).SetInt64(20), + name: "Adding from:0x02, tx:0x02/gp:20", from: common.Address{2}, txHash: common.Hash{2}, nonce: 1, gasPrice: new(big.Int).SetInt64(20), cost: new(big.Int).SetInt64(5), - counters: state.ZKCounters{GasUsed: 5, KeccakHashes: 5, PoseidonHashes: 5, PoseidonPaddings: 5, MemAligns: 5, Arithmetics: 5, Binaries: 5, Steps: 5, Sha256Hashes_V2: 5}, usedBytes: 5, expectedTxSortedList: []common.Hash{ {2}, {1}, }, }, { - name: "Readding from:0x03, tx:0x03/gp:25", from: common.Address{3}, txHash: common.Hash{3}, nonce: 1, gasPrice: new(big.Int).SetInt64(25), + name: "Adding from:0x03, tx:0x03/gp:25", from: common.Address{3}, txHash: common.Hash{3}, nonce: 1, gasPrice: new(big.Int).SetInt64(25), cost: new(big.Int).SetInt64(5), - counters: state.ZKCounters{GasUsed: 2, KeccakHashes: 2, PoseidonHashes: 2, PoseidonPaddings: 2, MemAligns: 2, Arithmetics: 2, Binaries: 2, Steps: 2, Sha256Hashes_V2: 2}, usedBytes: 2, expectedTxSortedList: []common.Hash{ {3}, {2}, {1}, @@ -153,7 +149,6 @@ func TestWorkerAddTx(t *testing.T) { }, { name: "Invalid IP address", from: common.Address{5}, txHash: common.Hash{5}, nonce: 1, - counters: state.ZKCounters{GasUsed: 1, KeccakHashes: 1, PoseidonHashes: 1, PoseidonPaddings: 1, MemAligns: 1, Arithmetics: 1, Binaries: 1, Steps: 1, Sha256Hashes_V2: 2}, usedBytes: 1, ip: "invalid IP", expectedErr: pool.ErrInvalidIP, @@ -162,8 +157,8 @@ func TestWorkerAddTx(t *testing.T) { name: "Out Of Counters Err", from: common.Address{5}, txHash: common.Hash{5}, nonce: 1, cost: new(big.Int).SetInt64(5), - // Here, we intentionally set the counters such that they violate the constraints - counters: state.ZKCounters{ + // Here, we intentionally set the reserved counters such that they violate the constraints + reservedZKCounters: state.ZKCounters{ GasUsed: worker.batchConstraints.MaxCumulativeGasUsed + 1, KeccakHashes: worker.batchConstraints.MaxKeccakHashes + 1, PoseidonHashes: worker.batchConstraints.MaxPoseidonHashes + 1, @@ -180,7 +175,6 @@ func TestWorkerAddTx(t *testing.T) { { name: "Adding from:0x04, tx:0x04/gp:100", from: common.Address{4}, txHash: common.Hash{4}, nonce: 1, gasPrice: new(big.Int).SetInt64(100), cost: new(big.Int).SetInt64(5), - counters: state.ZKCounters{GasUsed: 1, KeccakHashes: 1, PoseidonHashes: 1, PoseidonPaddings: 1, MemAligns: 1, Arithmetics: 1, Binaries: 1, Steps: 1, Sha256Hashes_V2: 1}, usedBytes: 1, expectedTxSortedList: []common.Hash{ {4}, {3}, {2}, {1}, @@ -195,8 +189,8 @@ func TestWorkerGetBestTx(t *testing.T) { var nilErr error rc := state.BatchResources{ - UsedZKCounters: state.ZKCounters{GasUsed: 10, KeccakHashes: 10, PoseidonHashes: 10, PoseidonPaddings: 10, MemAligns: 10, Arithmetics: 10, Binaries: 10, Steps: 10, Sha256Hashes_V2: 10}, - Bytes: 10, + ZKCounters: state.ZKCounters{GasUsed: 10, KeccakHashes: 10, PoseidonHashes: 10, PoseidonPaddings: 10, MemAligns: 10, Arithmetics: 10, Binaries: 10, Steps: 10, Sha256Hashes_V2: 10}, + Bytes: 10, } stateMock := NewStateMock(t) @@ -221,36 +215,36 @@ func TestWorkerGetBestTx(t *testing.T) { addTxsTC := []workerAddTxTestCase{ { name: "Adding from:0x01, tx:0x01/gp:10", from: common.Address{1}, txHash: common.Hash{1}, nonce: 1, gasPrice: new(big.Int).SetInt64(10), - cost: new(big.Int).SetInt64(5), - counters: state.ZKCounters{GasUsed: 1, KeccakHashes: 1, PoseidonHashes: 1, PoseidonPaddings: 1, MemAligns: 1, Arithmetics: 1, Binaries: 1, Steps: 1, Sha256Hashes_V2: 1}, - usedBytes: 1, + cost: new(big.Int).SetInt64(5), + reservedZKCounters: state.ZKCounters{GasUsed: 1, KeccakHashes: 1, PoseidonHashes: 1, PoseidonPaddings: 1, MemAligns: 1, Arithmetics: 1, Binaries: 1, Steps: 1, Sha256Hashes_V2: 1}, + usedBytes: 1, expectedTxSortedList: []common.Hash{ {1}, }, }, { name: "Adding from:0x02, tx:0x02/gp:12", from: common.Address{2}, txHash: common.Hash{2}, nonce: 1, gasPrice: new(big.Int).SetInt64(12), - cost: new(big.Int).SetInt64(5), - counters: state.ZKCounters{GasUsed: 5, KeccakHashes: 5, PoseidonHashes: 5, PoseidonPaddings: 5, MemAligns: 5, Arithmetics: 5, Binaries: 5, Steps: 5, Sha256Hashes_V2: 5}, - usedBytes: 5, + cost: new(big.Int).SetInt64(5), + reservedZKCounters: state.ZKCounters{GasUsed: 5, KeccakHashes: 5, PoseidonHashes: 5, PoseidonPaddings: 5, MemAligns: 5, Arithmetics: 5, Binaries: 5, Steps: 5, Sha256Hashes_V2: 5}, + usedBytes: 5, expectedTxSortedList: []common.Hash{ {2}, {1}, }, }, { name: "Readding from:0x03, tx:0x03/gp:25", from: common.Address{3}, txHash: common.Hash{3}, nonce: 1, gasPrice: new(big.Int).SetInt64(25), - cost: new(big.Int).SetInt64(5), - counters: state.ZKCounters{GasUsed: 2, KeccakHashes: 2, PoseidonHashes: 2, PoseidonPaddings: 2, MemAligns: 2, Arithmetics: 2, Binaries: 2, Steps: 2, Sha256Hashes_V2: 2}, - usedBytes: 2, + cost: new(big.Int).SetInt64(5), + reservedZKCounters: state.ZKCounters{GasUsed: 2, KeccakHashes: 2, PoseidonHashes: 2, PoseidonPaddings: 2, MemAligns: 2, Arithmetics: 2, Binaries: 2, Steps: 2, Sha256Hashes_V2: 2}, + usedBytes: 2, expectedTxSortedList: []common.Hash{ {3}, {2}, {1}, }, }, { name: "Adding from:0x04, tx:0x04/gp:100", from: common.Address{4}, txHash: common.Hash{4}, nonce: 1, gasPrice: new(big.Int).SetInt64(100), - cost: new(big.Int).SetInt64(5), - counters: state.ZKCounters{GasUsed: 4, KeccakHashes: 4, PoseidonHashes: 4, PoseidonPaddings: 4, MemAligns: 4, Arithmetics: 4, Binaries: 4, Steps: 4, Sha256Hashes_V2: 4}, - usedBytes: 4, + cost: new(big.Int).SetInt64(5), + reservedZKCounters: state.ZKCounters{GasUsed: 4, KeccakHashes: 4, PoseidonHashes: 4, PoseidonPaddings: 4, MemAligns: 4, Arithmetics: 4, Binaries: 4, Steps: 4, Sha256Hashes_V2: 4}, + usedBytes: 4, expectedTxSortedList: []common.Hash{ {4}, {3}, {2}, {1}, }, @@ -271,7 +265,7 @@ func TestWorkerGetBestTx(t *testing.T) { if tx.HashStr != expectedGetBestTx[ct].String() { t.Fatalf("Error GetBestFittingTx(%d). Expected=%s, Actual=%s", ct, expectedGetBestTx[ct].String(), tx.HashStr) } - overflow, _ := rc.Sub(tx.BatchResources) + overflow, _ := rc.Sub(state.BatchResources{ZKCounters: tx.ReservedZKCounters, Bytes: tx.Bytes}) assert.Equal(t, false, overflow) touch := make(map[common.Address]*state.InfoReadWrite) diff --git a/state/pgstatestorage/pgstatestorage_test.go b/state/pgstatestorage/pgstatestorage_test.go index 3433619603..8c0b553514 100644 --- a/state/pgstatestorage/pgstatestorage_test.go +++ b/state/pgstatestorage/pgstatestorage_test.go @@ -55,7 +55,7 @@ var ( mtDBServiceClient hashdb.HashDBServiceClient executorClientConn, mtDBClientConn *grpc.ClientConn batchResources = state.BatchResources{ - UsedZKCounters: state.ZKCounters{ + ZKCounters: state.ZKCounters{ KeccakHashes: 1, }, Bytes: 1, diff --git a/state/types.go b/state/types.go index d5c0da1c3a..ea7a0ae289 100644 --- a/state/types.go +++ b/state/types.go @@ -180,9 +180,41 @@ func (z *ZKCounters) SumUp(other ZKCounters) { z.Sha256Hashes_V2 += other.Sha256Hashes_V2 } -// Sub subtract zk counters with passed zk counters (not safe). if there is a counter underflow it returns true and the name of the counter that caused the overflow +// Fits checks if other zk counters fits in the zk counters. if there is a counter underflow it returns false and the name of the counter that caused the underflow +func (z *ZKCounters) Fits(other ZKCounters) (bool, string) { + if other.GasUsed > z.GasUsed { + return false, "CumulativeGas" + } + if other.KeccakHashes > z.KeccakHashes { + return false, "KeccakHashes" + } + if other.PoseidonHashes > z.PoseidonHashes { + return false, "PoseidonHashes" + } + if other.PoseidonPaddings > z.PoseidonPaddings { + return false, "PoseidonPaddings" + } + if other.MemAligns > z.MemAligns { + return false, "UsedMemAligns" + } + if other.Arithmetics > z.Arithmetics { + return false, "UsedArithmetics" + } + if other.Binaries > z.Binaries { + return false, "UsedBinaries" + } + if other.Steps > z.Steps { + return false, "UsedSteps" + } + if other.Sha256Hashes_V2 > z.Sha256Hashes_V2 { + return false, "UsedSha256Hashes_V2" + } + + return true, "" +} + +// Sub subtract zk counters with passed zk counters (not safe). if there is a counter underflow it returns true and the name of the counter that caused the underflow func (z *ZKCounters) Sub(other ZKCounters) (bool, string) { - // ZKCounters if other.GasUsed > z.GasUsed { return true, "CumulativeGas" } @@ -224,21 +256,28 @@ func (z *ZKCounters) Sub(other ZKCounters) (bool, string) { return false, "" } -// BatchResources is a struct that contains the ZKEVM resources used by a batch/tx +// BatchResources is a struct that contains the limited resources of a batch type BatchResources struct { - UsedZKCounters ZKCounters - Bytes uint64 + ZKCounters ZKCounters + Bytes uint64 +} + +// Fits check if the other batch resources fit in the batch resources. If there is a resource underflow it returns false and the name of the resource that caused the overflow +func (r *BatchResources) Fits(other BatchResources) (bool, string) { + if other.Bytes > r.Bytes { + return false, "Bytes" + } + return r.ZKCounters.Fits(other.ZKCounters) } -// Sub subtracts the batch resources from other. if there is a resource underflow it returns true and the name of the resource that caused the overflow +// Sub subtracts the batch resources from "other". If there is a resource overflow it returns true and the name of the resource that caused the overflow func (r *BatchResources) Sub(other BatchResources) (bool, string) { - // Bytes if other.Bytes > r.Bytes { return true, "Bytes" } bytesBackup := r.Bytes r.Bytes -= other.Bytes - exhausted, resourceName := r.UsedZKCounters.Sub(other.UsedZKCounters) + exhausted, resourceName := r.ZKCounters.Sub(other.ZKCounters) if exhausted { r.Bytes = bytesBackup return exhausted, resourceName @@ -250,7 +289,7 @@ func (r *BatchResources) Sub(other BatchResources) (bool, string) { // SumUp sum ups the batch resources from other func (r *BatchResources) SumUp(other BatchResources) { r.Bytes += other.Bytes - r.UsedZKCounters.SumUp(other.UsedZKCounters) + r.ZKCounters.SumUp(other.ZKCounters) } // InfoReadWrite has information about modified addresses during the execution diff --git a/test/docker-compose.yml b/test/docker-compose.yml index 2b006678cb..f9c8ef62bd 100644 --- a/test/docker-compose.yml +++ b/test/docker-compose.yml @@ -513,7 +513,7 @@ services: zkevm-prover: container_name: zkevm-prover - image: hermeznetwork/zkevm-prover:v5.0.0-RC5 + image: hermeznetwork/zkevm-prover:v5.0.0-RC8 ports: - 50061:50061 # MT - 50071:50071 # Executor @@ -602,7 +602,7 @@ services: zkevm-permissionless-prover: container_name: zkevm-permissionless-prover - image: hermeznetwork/zkevm-prover:v5.0.0-RC5 + image: hermeznetwork/zkevm-prover:v5.0.0-RC8 ports: # - 50058:50058 # Prover - 50059:50052 # Mock prover