diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6f921abbf0..6c5f5f57b9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -132,8 +132,7 @@ jobs: skip-pkg-cache: true - name: Custom Lint run: | - go run ./linter/koanf ./... - go run ./linter/pointercheck ./... + go run ./linters ./... - name: Set environment variables run: | diff --git a/.github/workflows/release-ci.yml b/.github/workflows/release-ci.yml new file mode 100644 index 0000000000..5282510e87 --- /dev/null +++ b/.github/workflows/release-ci.yml @@ -0,0 +1,30 @@ +name: Release CI +run-name: Release CI triggered from @${{ github.actor }} of ${{ github.head_ref }} + +on: + workflow_dispatch: + +jobs: + build_and_run: + runs-on: ubuntu-8 + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + driver-opts: network=host + + - name: Cache Docker layers + uses: actions/cache@v3 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ hashFiles('Dockerfile') }} + restore-keys: ${{ runner.os }}-buildx- + + - name: Startup Nitro testnode + run: ./scripts/startup-testnode.bash diff --git a/Dockerfile b/Dockerfile index e347110d84..642905a525 100644 --- a/Dockerfile +++ b/Dockerfile @@ -171,6 +171,7 @@ RUN ./download-machine.sh consensus-v10.2 0x0754e09320c381566cc0449904c377a52bd3 RUN ./download-machine.sh consensus-v10.3 0xf559b6d4fa869472dabce70fe1c15221bdda837533dfd891916836975b434dec RUN ./download-machine.sh consensus-v11 0xf4389b835497a910d7ba3ebfb77aa93da985634f3c052de1290360635be40c4a RUN ./download-machine.sh consensus-v11.1 0x68e4fe5023f792d4ef584796c84d710303a5e12ea02d6e37e2b5e9c4332507c4 +RUN ./download-machine.sh consensus-v20 0x8b104a2e80ac6165dc58b9048de12f301d70b02a0ab51396c22b4b4b802a16a4 FROM golang:1.20-bookworm as node-builder WORKDIR /workspace @@ -207,7 +208,7 @@ FROM debian:bookworm-slim as nitro-fuzzer COPY --from=fuzz-builder /workspace/fuzzers/*.fuzz /usr/local/bin/ COPY ./scripts/fuzz.bash /usr/local/bin RUN mkdir /fuzzcache -ENTRYPOINT [ "/usr/local/bin/fuzz.bash", "--binary-path", "/usr/local/bin/", "--fuzzcache-path", "/fuzzcache" ] +ENTRYPOINT [ "/usr/local/bin/fuzz.bash", "FuzzStateTransition", "--binary-path", "/usr/local/bin/", "--fuzzcache-path", "/fuzzcache" ] FROM debian:bookworm-slim as nitro-node-slim WORKDIR /home/user diff --git a/Makefile b/Makefile index a96546af4f..17cf642143 100644 --- a/Makefile +++ b/Makefile @@ -319,8 +319,7 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro # strategic rules to minimize dependency building .make/lint: $(DEP_PREDICATE) build-node-deps $(ORDER_ONLY_PREDICATE) .make - go run ./linter/koanf ./... - go run ./linter/pointercheck ./... + go run ./linters ./... golangci-lint run --fix yarn --cwd contracts solhint @touch $@ diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index e3af0b2afb..14d5affa08 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -41,6 +41,7 @@ import ( "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/das" + "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util" "github.com/offchainlabs/nitro/util/arbmath" @@ -73,22 +74,24 @@ type batchPosterPosition struct { type BatchPoster struct { stopwaiter.StopWaiter - l1Reader *headerreader.HeaderReader - inbox *InboxTracker - streamer *TransactionStreamer - config BatchPosterConfigFetcher - seqInbox *bridgegen.SequencerInbox - bridge *bridgegen.Bridge - syncMonitor *SyncMonitor - seqInboxABI *abi.ABI - seqInboxAddr common.Address - bridgeAddr common.Address - gasRefunderAddr common.Address - building *buildingBatch - daWriter das.DataAvailabilityServiceWriter - dataPoster *dataposter.DataPoster - redisLock *redislock.Simple - messagesPerBatch *arbmath.MovingAverage[uint64] + l1Reader *headerreader.HeaderReader + inbox *InboxTracker + streamer *TransactionStreamer + arbOSVersionGetter execution.FullExecutionClient + config BatchPosterConfigFetcher + seqInbox *bridgegen.SequencerInbox + bridge *bridgegen.Bridge + syncMonitor *SyncMonitor + seqInboxABI *abi.ABI + seqInboxAddr common.Address + bridgeAddr common.Address + gasRefunderAddr common.Address + building *buildingBatch + daWriter das.DataAvailabilityServiceWriter + dataPoster *dataposter.DataPoster + redisLock *redislock.Simple + messagesPerBatch *arbmath.MovingAverage[uint64] + non4844BatchCount int // Count of consecutive non-4844 batches posted // This is an atomic variable that should only be accessed atomically. // An estimate of the number of batches we want to post but haven't yet. // This doesn't include batches which we don't want to post yet due to the L1 bounds. @@ -136,7 +139,7 @@ type BatchPosterConfig struct { RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` ExtraBatchGas uint64 `koanf:"extra-batch-gas" reload:"hot"` Post4844Blobs bool `koanf:"post-4844-blobs" reload:"hot"` - ForcePost4844Blobs bool `koanf:"force-post-4844-blobs" reload:"hot"` + IgnoreBlobPrice bool `koanf:"ignore-blob-price" reload:"hot"` ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` @@ -186,7 +189,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".gas-refunder-address", DefaultBatchPosterConfig.GasRefunderAddress, "The gas refunder contract address (optional)") f.Uint64(prefix+".extra-batch-gas", DefaultBatchPosterConfig.ExtraBatchGas, "use this much more gas than estimation says is necessary to post batches") f.Bool(prefix+".post-4844-blobs", DefaultBatchPosterConfig.Post4844Blobs, "if the parent chain supports 4844 blobs and they're well priced, post EIP-4844 blobs") - f.Bool(prefix+".force-post-4844-blobs", DefaultBatchPosterConfig.ForcePost4844Blobs, "if the parent chain supports 4844 blobs and post-4844-blobs is true, post 4844 blobs even if it's not price efficient") + f.Bool(prefix+".ignore-blob-price", DefaultBatchPosterConfig.IgnoreBlobPrice, "if the parent chain supports 4844 blobs and ignore-blob-price is true, post 4844 blobs even if it's not price efficient") f.String(prefix+".redis-url", DefaultBatchPosterConfig.RedisUrl, "if non-empty, the Redis URL to store queued transactions in") f.String(prefix+".l1-block-bound", DefaultBatchPosterConfig.L1BlockBound, "only post messages to batches when they're within the max future block/timestamp as of this L1 block tag (\"safe\", \"finalized\", \"latest\", or \"ignore\" to ignore this check)") f.Duration(prefix+".l1-block-bound-bypass", DefaultBatchPosterConfig.L1BlockBoundBypass, "post batches even if not within the layer 1 future bounds if we're within this margin of the max delay") @@ -202,7 +205,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go MaxSize: 100000, // TODO: is 1000 bytes an appropriate margin for error vs blob space efficiency? - Max4844BatchSize: (254 * params.BlobTxFieldElementsPerBlob / 8 * (params.MaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob)) - 1000, + Max4844BatchSize: blobs.BlobEncodableData*(params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) - 1000, PollInterval: time.Second * 10, ErrorDelay: time.Second * 10, MaxDelay: time.Hour, @@ -212,7 +215,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ GasRefunderAddress: "", ExtraBatchGas: 50_000, Post4844Blobs: false, - ForcePost4844Blobs: false, + IgnoreBlobPrice: false, DataPoster: dataposter.DefaultDataPosterConfig, ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", @@ -242,7 +245,7 @@ var TestBatchPosterConfig = BatchPosterConfig{ GasRefunderAddress: "", ExtraBatchGas: 10_000, Post4844Blobs: true, - ForcePost4844Blobs: false, + IgnoreBlobPrice: false, DataPoster: dataposter.TestDataPosterConfig, ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", @@ -255,6 +258,7 @@ type BatchPosterOpts struct { L1Reader *headerreader.HeaderReader Inbox *InboxTracker Streamer *TransactionStreamer + VersionGetter execution.FullExecutionClient SyncMonitor *SyncMonitor Config BatchPosterConfigFetcher DeployInfo *chaininfo.RollupAddresses @@ -293,19 +297,20 @@ func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, e return nil, err } b := &BatchPoster{ - l1Reader: opts.L1Reader, - inbox: opts.Inbox, - streamer: opts.Streamer, - syncMonitor: opts.SyncMonitor, - config: opts.Config, - bridge: bridge, - seqInbox: seqInbox, - seqInboxABI: seqInboxABI, - seqInboxAddr: opts.DeployInfo.SequencerInbox, - gasRefunderAddr: opts.Config().gasRefunder, - bridgeAddr: opts.DeployInfo.Bridge, - daWriter: opts.DAWriter, - redisLock: redisLock, + l1Reader: opts.L1Reader, + inbox: opts.Inbox, + streamer: opts.Streamer, + arbOSVersionGetter: opts.VersionGetter, + syncMonitor: opts.SyncMonitor, + config: opts.Config, + bridge: bridge, + seqInbox: seqInbox, + seqInboxABI: seqInboxABI, + seqInboxAddr: opts.DeployInfo.SequencerInbox, + gasRefunderAddr: opts.Config().gasRefunder, + bridgeAddr: opts.DeployInfo.Bridge, + daWriter: opts.DAWriter, + redisLock: redisLock, } b.messagesPerBatch, err = arbmath.NewMovingAverage[uint64](20) if err != nil { @@ -947,7 +952,6 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if dbBatchCount > batchPosition.NextSeqNum { return false, fmt.Errorf("attempting to post batch %v, but the local inbox tracker database already has %v batches", batchPosition.NextSeqNum, dbBatchCount) } - if b.building == nil || b.building.startMsgCount != batchPosition.MessageCount { latestHeader, err := b.l1Reader.LastHeader(ctx) if err != nil { @@ -956,17 +960,34 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) var use4844 bool config := b.config() if config.Post4844Blobs && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { - if config.ForcePost4844Blobs { - use4844 = true - } else { - blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) - blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) - blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) - - calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) - use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) + arbOSVersion, err := b.arbOSVersionGetter.ArbOSVersionForMessageNumber(arbutil.MessageIndex(arbmath.SaturatingUSub(uint64(batchPosition.MessageCount), 1))) + if err != nil { + return false, err + } + if arbOSVersion >= 20 { + if config.IgnoreBlobPrice { + use4844 = true + } else { + backlog := atomic.LoadUint64(&b.backlog) + // Logic to prevent switching from non-4844 batches to 4844 batches too often, + // so that blocks can be filled efficiently. The geth txpool rejects txs for + // accounts that already have the other type of txs in the pool with + // "address already reserved". This logic makes sure that, if there is a backlog, + // that enough non-4844 batches have been posted to fill a block before switching. + if backlog == 0 || + b.non4844BatchCount == 0 || + b.non4844BatchCount > 16 { + blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) + blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) + blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) + + calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) + use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) + } + } } } + b.building = &buildingBatch{ segments: newBatchSegments(batchPosition.DelayedMessageCount, b.config(), b.GetBacklogEstimate(), use4844), msgCount: batchPosition.MessageCount, @@ -1198,9 +1219,15 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) "totalSegments", len(b.building.segments.rawSegments), "numBlobs", len(kzgBlobs), ) + recentlyHitL1Bounds := time.Since(b.lastHitL1Bounds) < config.PollInterval*3 postedMessages := b.building.msgCount - batchPosition.MessageCount b.messagesPerBatch.Update(uint64(postedMessages)) + if b.building.use4844 { + b.non4844BatchCount = 0 + } else { + b.non4844BatchCount++ + } unpostedMessages := msgCount - b.building.msgCount messagesPerBatch := b.messagesPerBatch.Average() if messagesPerBatch == 0 { @@ -1340,3 +1367,56 @@ func (b *BatchPoster) StopAndWait() { b.dataPoster.StopAndWait() b.redisLock.StopAndWait() } + +type BoolRing struct { + buffer []bool + bufferPosition int +} + +func NewBoolRing(size int) *BoolRing { + return &BoolRing{ + buffer: make([]bool, 0, size), + } +} + +func (b *BoolRing) Update(value bool) { + period := cap(b.buffer) + if period == 0 { + return + } + if len(b.buffer) < period { + b.buffer = append(b.buffer, value) + } else { + b.buffer[b.bufferPosition] = value + } + b.bufferPosition = (b.bufferPosition + 1) % period +} + +func (b *BoolRing) Empty() bool { + return len(b.buffer) == 0 +} + +// Peek returns the most recently inserted value. +// Assumes not empty, check Empty() first +func (b *BoolRing) Peek() bool { + lastPosition := b.bufferPosition - 1 + if lastPosition < 0 { + // This is the case where we have wrapped around, since Peek() shouldn't + // be called without checking Empty(), so we can just use capactity. + lastPosition = cap(b.buffer) - 1 + } + return b.buffer[lastPosition] +} + +// All returns true if the BoolRing is full and all values equal value. +func (b *BoolRing) All(value bool) bool { + if len(b.buffer) < cap(b.buffer) { + return false + } + for _, v := range b.buffer { + if v != value { + return false + } + } + return true +} diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 1415f78140..c106df08fa 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -31,10 +31,10 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/go-redis/redis/v8" "github.com/holiman/uint256" "github.com/offchainlabs/nitro/arbnode/dataposter/dbstorage" + "github.com/offchainlabs/nitro/arbnode/dataposter/externalsigner" "github.com/offchainlabs/nitro/arbnode/dataposter/noop" "github.com/offchainlabs/nitro/arbnode/dataposter/slice" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" @@ -42,6 +42,7 @@ import ( "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/util/headerreader" + "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/util/stopwaiter" "github.com/spf13/pflag" @@ -58,17 +59,18 @@ import ( // DataPoster must be RLP serializable and deserializable type DataPoster struct { stopwaiter.StopWaiter - headerReader *headerreader.HeaderReader - client arbutil.L1Interface - auth *bind.TransactOpts - signer signerFn - config ConfigFetcher - usingNoOpStorage bool - replacementTimes []time.Duration - metadataRetriever func(ctx context.Context, blockNum *big.Int) ([]byte, error) - extraBacklog func() uint64 - parentChainID *big.Int - parentChainID256 *uint256.Int + headerReader *headerreader.HeaderReader + client arbutil.L1Interface + auth *bind.TransactOpts + signer signerFn + config ConfigFetcher + usingNoOpStorage bool + replacementTimes []time.Duration + blobTxReplacementTimes []time.Duration + metadataRetriever func(ctx context.Context, blockNum *big.Int) ([]byte, error) + extraBacklog func() uint64 + parentChainID *big.Int + parentChainID256 *uint256.Int // These fields are protected by the mutex. // TODO: factor out these fields into separate structure, since now one @@ -129,6 +131,10 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro if err != nil { return nil, err } + blobTxReplacementTimes, err := parseReplacementTimes(cfg.BlobTxReplacementTimes) + if err != nil { + return nil, err + } useNoOpStorage := cfg.UseNoOpStorage if opts.HeaderReader.IsParentChainArbitrum() && !cfg.UseNoOpStorage { useNoOpStorage = true @@ -172,15 +178,16 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro signer: func(_ context.Context, addr common.Address, tx *types.Transaction) (*types.Transaction, error) { return opts.Auth.Signer(addr, tx) }, - config: opts.Config, - usingNoOpStorage: useNoOpStorage, - replacementTimes: replacementTimes, - metadataRetriever: opts.MetadataRetriever, - queue: queue, - errorCount: make(map[uint64]int), - maxFeeCapExpression: expression, - extraBacklog: opts.ExtraBacklog, - parentChainID: opts.ParentChainID, + config: opts.Config, + usingNoOpStorage: useNoOpStorage, + replacementTimes: replacementTimes, + blobTxReplacementTimes: blobTxReplacementTimes, + metadataRetriever: opts.MetadataRetriever, + queue: queue, + errorCount: make(map[uint64]int), + maxFeeCapExpression: expression, + extraBacklog: opts.ExtraBacklog, + parentChainID: opts.ParentChainID, } var overflow bool dp.parentChainID256, overflow = uint256.FromBig(opts.ParentChainID) @@ -244,35 +251,6 @@ func rpcClient(ctx context.Context, opts *ExternalSignerCfg) (*rpc.Client, error ) } -// txToSendTxArgs converts transaction to SendTxArgs. This is needed for -// external signer to specify From field. -func txToSendTxArgs(addr common.Address, tx *types.Transaction) (*apitypes.SendTxArgs, error) { - var to *common.MixedcaseAddress - if tx.To() != nil { - to = new(common.MixedcaseAddress) - *to = common.NewMixedcaseAddress(*tx.To()) - } - data := (hexutil.Bytes)(tx.Data()) - val := (*hexutil.Big)(tx.Value()) - if val == nil { - val = (*hexutil.Big)(big.NewInt(0)) - } - al := tx.AccessList() - return &apitypes.SendTxArgs{ - From: common.NewMixedcaseAddress(addr), - To: to, - Gas: hexutil.Uint64(tx.Gas()), - GasPrice: (*hexutil.Big)(tx.GasPrice()), - MaxFeePerGas: (*hexutil.Big)(tx.GasFeeCap()), - MaxPriorityFeePerGas: (*hexutil.Big)(tx.GasTipCap()), - Value: *val, - Nonce: hexutil.Uint64(tx.Nonce()), - Data: &data, - AccessList: &al, - ChainID: (*hexutil.Big)(tx.ChainId()), - }, nil -} - // externalSigner returns signer function and ethereum address of the signer. // Returns an error if address isn't specified or if it can't connect to the // signer RPC server. @@ -291,7 +269,7 @@ func externalSigner(ctx context.Context, opts *ExternalSignerCfg) (signerFn, com // RLP encoded transaction object. // https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_signtransaction var data hexutil.Bytes - args, err := txToSendTxArgs(addr, tx) + args, err := externalsigner.TxToSignTxArgs(addr, tx) if err != nil { return nil, fmt.Errorf("error converting transaction to sendTxArgs: %w", err) } @@ -322,14 +300,15 @@ func (p *DataPoster) MaxMempoolTransactions() uint64 { if p.usingNoOpStorage { return 1 } - return p.config().MaxMempoolTransactions + config := p.config() + return arbmath.MinInt(config.MaxMempoolTransactions, config.MaxMempoolWeight) } var ErrExceedsMaxMempoolSize = errors.New("posting this transaction will exceed max mempool size") // Does basic check whether posting transaction with specified nonce would // result in exceeding maximum queue length or maximum transactions in mempool. -func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64) error { +func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64, thisWeight uint64) error { cfg := p.config() // If the queue has reached configured max size, don't post a transaction. if cfg.MaxQueuedTransactions > 0 { @@ -352,6 +331,43 @@ func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64) err return fmt.Errorf("%w: transaction nonce: %d, unconfirmed nonce: %d, max mempool size: %d", ErrExceedsMaxMempoolSize, nextNonce, unconfirmedNonce, cfg.MaxMempoolTransactions) } } + // Check that posting a new transaction won't exceed maximum pending + // weight in mempool. + if cfg.MaxMempoolWeight > 0 { + unconfirmedNonce, err := p.client.NonceAt(ctx, p.Sender(), nil) + if err != nil { + return fmt.Errorf("getting nonce of a dataposter sender: %w", err) + } + if unconfirmedNonce > nextNonce { + return fmt.Errorf("latest on-chain nonce %v is greater than to next nonce %v", unconfirmedNonce, nextNonce) + } + + var confirmedWeight uint64 + if unconfirmedNonce > 0 { + confirmedMeta, err := p.queue.Get(ctx, unconfirmedNonce-1) + if err != nil { + return err + } + if confirmedMeta != nil { + confirmedWeight = confirmedMeta.CumulativeWeight() + } + } + previousTxMeta, err := p.queue.FetchLast(ctx) + if err != nil { + return err + } + var previousTxCumulativeWeight uint64 + if previousTxMeta != nil { + previousTxCumulativeWeight = previousTxMeta.CumulativeWeight() + } + previousTxCumulativeWeight = arbmath.MaxInt(previousTxCumulativeWeight, confirmedWeight) + newCumulativeWeight := previousTxCumulativeWeight + thisWeight + + weightDiff := arbmath.MinInt(newCumulativeWeight-confirmedWeight, (nextNonce-unconfirmedNonce)*params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) + if weightDiff > cfg.MaxMempoolWeight { + return fmt.Errorf("%w: transaction nonce: %d, transaction cumulative weight: %d, unconfirmed nonce: %d, confirmed weight: %d, new mempool weight: %d, max mempool weight: %d", ErrExceedsMaxMempoolSize, nextNonce, newCumulativeWeight, unconfirmedNonce, confirmedWeight, weightDiff, cfg.MaxMempoolWeight) + } + } return nil } @@ -360,41 +376,41 @@ func (p *DataPoster) waitForL1Finality() bool { } // Requires the caller hold the mutex. -// Returns the next nonce, its metadata if stored, a bool indicating if the metadata is present, and an error. +// Returns the next nonce, its metadata if stored, a bool indicating if the metadata is present, the cumulative weight, and an error if present. // Unlike GetNextNonceAndMeta, this does not call the metadataRetriever if the metadata is not stored in the queue. -func (p *DataPoster) getNextNonceAndMaybeMeta(ctx context.Context) (uint64, []byte, bool, error) { +func (p *DataPoster) getNextNonceAndMaybeMeta(ctx context.Context, thisWeight uint64) (uint64, []byte, bool, uint64, error) { // Ensure latest finalized block state is available. blockNum, err := p.client.BlockNumber(ctx) if err != nil { - return 0, nil, false, err + return 0, nil, false, 0, err } lastQueueItem, err := p.queue.FetchLast(ctx) if err != nil { - return 0, nil, false, fmt.Errorf("fetching last element from queue: %w", err) + return 0, nil, false, 0, fmt.Errorf("fetching last element from queue: %w", err) } if lastQueueItem != nil { nextNonce := lastQueueItem.FullTx.Nonce() + 1 - if err := p.canPostWithNonce(ctx, nextNonce); err != nil { - return 0, nil, false, err + if err := p.canPostWithNonce(ctx, nextNonce, thisWeight); err != nil { + return 0, nil, false, 0, err } - return nextNonce, lastQueueItem.Meta, true, nil + return nextNonce, lastQueueItem.Meta, true, lastQueueItem.CumulativeWeight(), nil } if err := p.updateNonce(ctx); err != nil { if !p.queue.IsPersistent() && p.waitForL1Finality() { - return 0, nil, false, fmt.Errorf("error getting latest finalized nonce (and queue is not persistent): %w", err) + return 0, nil, false, 0, fmt.Errorf("error getting latest finalized nonce (and queue is not persistent): %w", err) } // Fall back to using a recent block to get the nonce. This is safe because there's nothing in the queue. nonceQueryBlock := arbmath.UintToBig(arbmath.SaturatingUSub(blockNum, 1)) log.Warn("failed to update nonce with queue empty; falling back to using a recent block", "recentBlock", nonceQueryBlock, "err", err) nonce, err := p.client.NonceAt(ctx, p.Sender(), nonceQueryBlock) if err != nil { - return 0, nil, false, fmt.Errorf("failed to get nonce at block %v: %w", nonceQueryBlock, err) + return 0, nil, false, 0, fmt.Errorf("failed to get nonce at block %v: %w", nonceQueryBlock, err) } p.lastBlock = nonceQueryBlock p.nonce = nonce } - return p.nonce, nil, false, nil + return p.nonce, nil, false, p.nonce, nil } // GetNextNonceAndMeta retrieves generates next nonce, validates that a @@ -403,7 +419,7 @@ func (p *DataPoster) getNextNonceAndMaybeMeta(ctx context.Context) (uint64, []by func (p *DataPoster) GetNextNonceAndMeta(ctx context.Context) (uint64, []byte, error) { p.mutex.Lock() defer p.mutex.Unlock() - nonce, meta, hasMeta, err := p.getNextNonceAndMaybeMeta(ctx) + nonce, meta, hasMeta, _, err := p.getNextNonceAndMaybeMeta(ctx, 1) if err != nil { return 0, nil, err } @@ -413,7 +429,8 @@ func (p *DataPoster) GetNextNonceAndMeta(ctx context.Context) (uint64, []byte, e return nonce, meta, err } -const minRbfIncrease = arbmath.OneInBips * 11 / 10 +const minNonBlobRbfIncrease = arbmath.OneInBips * 11 / 10 +const minBlobRbfIncrease = arbmath.OneInBips * 2 // evalMaxFeeCapExpr uses MaxFeeCapFormula from config to calculate the expression's result by plugging in appropriate parameter values // backlogOfBatches should already include extraBacklog @@ -452,7 +469,7 @@ func (p *DataPoster) evalMaxFeeCapExpr(backlogOfBatches uint64, elapsed time.Dur var big4 = big.NewInt(4) // The dataPosterBacklog argument should *not* include extraBacklog (it's added in in this function) -func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit uint64, numBlobs int, lastFeeCap *big.Int, lastTipCap *big.Int, dataCreatedAt time.Time, dataPosterBacklog uint64) (*big.Int, *big.Int, *big.Int, error) { +func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit uint64, numBlobs uint64, lastTx *types.Transaction, dataCreatedAt time.Time, dataPosterBacklog uint64) (*big.Int, *big.Int, *big.Int, error) { config := p.config() dataPosterBacklog += p.extraBacklog() latestHeader, err := p.headerReader.LastHeader(ctx) @@ -462,10 +479,9 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u if latestHeader.BaseFee == nil { return nil, nil, nil, fmt.Errorf("latest parent chain block %v missing BaseFee (either the parent chain does not have EIP-1559 or the parent chain node is not synced)", latestHeader.Number) } - newBlobFeeCap := big.NewInt(0) + currentBlobFee := big.NewInt(0) if latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { - newBlobFeeCap = eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) - newBlobFeeCap.Mul(newBlobFeeCap, common.Big2) + currentBlobFee = eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) } else if numBlobs > 0 { return nil, nil, nil, fmt.Errorf( "latest parent chain block %v missing ExcessBlobGas or BlobGasUsed but blobs were specified in data poster transaction "+ @@ -478,106 +494,163 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u if err != nil { return nil, nil, nil, fmt.Errorf("failed to get latest nonce %v blocks ago (block %v): %w", config.NonceRbfSoftConfs, softConfBlock, err) } - newFeeCap := new(big.Int).Mul(latestHeader.BaseFee, common.Big2) - newFeeCap = arbmath.BigMax(newFeeCap, arbmath.FloatToBig(config.MinFeeCapGwei*params.GWei)) - newTipCap, err := p.client.SuggestGasTipCap(ctx) + suggestedTip, err := p.client.SuggestGasTipCap(ctx) if err != nil { return nil, nil, nil, err } - newTipCap = arbmath.BigMax(newTipCap, arbmath.FloatToBig(config.MinTipCapGwei*params.GWei)) - newTipCap = arbmath.BigMin(newTipCap, arbmath.FloatToBig(config.MaxTipCapGwei*params.GWei)) - - hugeTipIncrease := false - if lastTipCap != nil { - newTipCap = arbmath.BigMax(newTipCap, arbmath.BigMulByBips(lastTipCap, minRbfIncrease)) - // hugeTipIncrease is true if the new tip cap is at least 10x the last tip cap - hugeTipIncrease = lastTipCap.Sign() == 0 || arbmath.BigDiv(newTipCap, lastTipCap).Cmp(big.NewInt(10)) >= 0 + minTipCapGwei, maxTipCapGwei, minRbfIncrease := config.MinTipCapGwei, config.MaxTipCapGwei, minNonBlobRbfIncrease + if numBlobs > 0 { + minTipCapGwei, maxTipCapGwei, minRbfIncrease = config.MinBlobTxTipCapGwei, config.MaxBlobTxTipCapGwei, minBlobRbfIncrease } + newTipCap := suggestedTip + newTipCap = arbmath.BigMax(newTipCap, arbmath.FloatToBig(minTipCapGwei*params.GWei)) + newTipCap = arbmath.BigMin(newTipCap, arbmath.FloatToBig(maxTipCapGwei*params.GWei)) - newFeeCap.Add(newFeeCap, newTipCap) - if lastFeeCap != nil && hugeTipIncrease { - log.Warn("data poster recommending huge tip increase", "lastTipCap", lastTipCap, "newTipCap", newTipCap) - // If we're trying to drastically increase the tip, make sure we increase the fee cap by minRbfIncrease. - newFeeCap = arbmath.BigMax(newFeeCap, arbmath.BigMulByBips(lastFeeCap, minRbfIncrease)) - } - - // TODO: if we're significantly increasing the blob fee cap, we also need to increase the fee cap my minRbfIncrease - // TODO: look more into geth's blob mempool and make sure this behavior conforms (I think minRbfIncrease might be higher there) - + // Compute the max fee with normalized gas so that blob txs aren't priced differently. + // Later, split the total cost bid into blob and non-blob fee caps. elapsed := time.Since(dataCreatedAt) - maxFeeCap, err := p.evalMaxFeeCapExpr(dataPosterBacklog, elapsed) + maxNormalizedFeeCap, err := p.evalMaxFeeCapExpr(dataPosterBacklog, elapsed) if err != nil { return nil, nil, nil, err } - if arbmath.BigGreaterThan(newFeeCap, maxFeeCap) { - log.Warn( - "reducing proposed fee cap to current maximum", - "proposedFeeCap", newFeeCap, - "maxFeeCap", maxFeeCap, - "elapsed", elapsed, - ) - newFeeCap = maxFeeCap - } + normalizedGas := gasLimit + numBlobs*blobs.BlobEncodableData*params.TxDataNonZeroGasEIP2028 + targetMaxCost := arbmath.BigMulByUint(maxNormalizedFeeCap, normalizedGas) - // TODO: also have an expression limiting the max blob fee cap + maxMempoolWeight := arbmath.MinInt(config.MaxMempoolWeight, config.MaxMempoolTransactions) latestBalance := p.balance balanceForTx := new(big.Int).Set(latestBalance) + weight := arbmath.MaxInt(1, numBlobs) + weightRemaining := weight + if config.AllocateMempoolBalance && !p.usingNoOpStorage { - // We split the transactions into three groups: - // - The first transaction gets 1/2 of the balance. - // - The first half of transactions get 1/3 of the balance split among them. - // - The remaining transactions get the remaining 1/6 of the balance split among them. + // We split the transaction weight into three groups: + // - The first weight point gets 1/2 of the balance. + // - The first half of the weight gets 1/3 of the balance split among them. + // - The remaining weight get the remaining 1/6 of the balance split among them. // This helps ensure batch posting is reliable under a variety of fee conditions. // With noop storage, we don't try to replace-by-fee, so we don't need to worry about this. - balanceForTx.Div(balanceForTx, common.Big2) - if nonce != softConfNonce && config.MaxMempoolTransactions > 1 { + balancePerWeight := new(big.Int).Div(balanceForTx, common.Big2) + balanceForTx = big.NewInt(0) + if nonce == softConfNonce || maxMempoolWeight == 1 { + balanceForTx.Add(balanceForTx, balancePerWeight) + weightRemaining -= 1 + } + if weightRemaining > 0 { // Compared to dividing the remaining transactions by balance equally, // the first half of transactions should get a 4/3 weight, // and the remaining half should get a 2/3 weight. // This makes sure the average weight is 1, and the first half of transactions // have twice the weight of the second half of transactions. // The +1 and -1 here are to account for the first transaction being handled separately. - if nonce > softConfNonce && nonce < softConfNonce+1+(config.MaxMempoolTransactions-1)/2 { - balanceForTx.Mul(balanceForTx, big4) + if nonce > softConfNonce && nonce < softConfNonce+1+(maxMempoolWeight-1)/2 { + balancePerWeight.Mul(balancePerWeight, big4) } else { - balanceForTx.Mul(balanceForTx, common.Big2) + balancePerWeight.Mul(balancePerWeight, common.Big2) } - balanceForTx.Div(balanceForTx, common.Big3) + balancePerWeight.Div(balancePerWeight, common.Big3) // After weighting, split the balance between each of the transactions // other than the first tx which already got half. // balanceForTx /= config.MaxMempoolTransactions-1 - balanceForTx.Div(balanceForTx, arbmath.UintToBig(config.MaxMempoolTransactions-1)) + balancePerWeight.Div(balancePerWeight, arbmath.UintToBig(maxMempoolWeight-1)) + balanceForTx.Add(balanceForTx, arbmath.BigMulByUint(balancePerWeight, weight)) } } - // TODO: take into account blob costs - balanceFeeCap := arbmath.BigDivByUint(balanceForTx, gasLimit) - if arbmath.BigGreaterThan(newFeeCap, balanceFeeCap) { + + if arbmath.BigGreaterThan(targetMaxCost, balanceForTx) { log.Warn( "lack of L1 balance prevents posting transaction with desired fee cap", "balance", latestBalance, - "maxTransactions", config.MaxMempoolTransactions, + "weight", weight, + "maxMempoolWeight", maxMempoolWeight, "balanceForTransaction", balanceForTx, "gasLimit", gasLimit, - "desiredFeeCap", newFeeCap, - "balanceFeeCap", balanceFeeCap, + "targetMaxCost", targetMaxCost, "nonce", nonce, "softConfNonce", softConfNonce, ) - newFeeCap = balanceFeeCap - } - - if arbmath.BigGreaterThan(newTipCap, newFeeCap) { - log.Warn( - "reducing new tip cap to new fee cap", + targetMaxCost = balanceForTx + } + + if lastTx != nil { + // Replace by fee rules require that the tip cap is increased + newTipCap = arbmath.BigMax(newTipCap, arbmath.BigMulByBips(lastTx.GasTipCap(), minRbfIncrease)) + } + + // Divide the targetMaxCost into blob and non-blob costs. + currentNonBlobFee := arbmath.BigAdd(latestHeader.BaseFee, newTipCap) + blobGasUsed := params.BlobTxBlobGasPerBlob * numBlobs + currentBlobCost := arbmath.BigMulByUint(currentBlobFee, blobGasUsed) + currentNonBlobCost := arbmath.BigMulByUint(currentNonBlobFee, gasLimit) + newBlobFeeCap := arbmath.BigMul(targetMaxCost, currentBlobFee) + newBlobFeeCap.Div(newBlobFeeCap, arbmath.BigAdd(currentBlobCost, currentNonBlobCost)) + if lastTx != nil && lastTx.BlobGasFeeCap() != nil { + newBlobFeeCap = arbmath.BigMax(newBlobFeeCap, arbmath.BigMulByBips(lastTx.BlobGasFeeCap(), minRbfIncrease)) + } + targetBlobCost := arbmath.BigMulByUint(newBlobFeeCap, blobGasUsed) + targetNonBlobCost := arbmath.BigSub(targetMaxCost, targetBlobCost) + newBaseFeeCap := arbmath.BigDivByUint(targetNonBlobCost, gasLimit) + if lastTx != nil && numBlobs > 0 && arbmath.BigDivToBips(newBaseFeeCap, lastTx.GasFeeCap()) < minRbfIncrease { + // Increase the non-blob fee cap to the minimum rbf increase + newBaseFeeCap = arbmath.BigMulByBips(lastTx.GasFeeCap(), minRbfIncrease) + newNonBlobCost := arbmath.BigMulByUint(newBaseFeeCap, gasLimit) + // Increasing the non-blob fee cap requires lowering the blob fee cap to compensate + baseFeeCostIncrease := arbmath.BigSub(newNonBlobCost, targetNonBlobCost) + newBlobCost := arbmath.BigSub(targetBlobCost, baseFeeCostIncrease) + newBlobFeeCap = arbmath.BigDivByUint(newBlobCost, blobGasUsed) + } + + if arbmath.BigGreaterThan(newTipCap, newBaseFeeCap) { + log.Info( + "reducing new tip cap to new basefee cap", "proposedTipCap", newTipCap, - "newFeeCap", newFeeCap, + "newBasefeeCap", newBaseFeeCap, ) - newTipCap = new(big.Int).Set(newFeeCap) + newTipCap = new(big.Int).Set(newBaseFeeCap) + } + + logFields := []any{ + "targetMaxCost", targetMaxCost, + "elapsed", elapsed, + "dataPosterBacklog", dataPosterBacklog, + "nonce", nonce, + "isReplacing", lastTx != nil, + "balanceForTx", balanceForTx, + "currentBaseFee", latestHeader.BaseFee, + "newBasefeeCap", newBaseFeeCap, + "suggestedTip", suggestedTip, + "newTipCap", newTipCap, + "currentBlobFee", currentBlobFee, + "newBlobFeeCap", newBlobFeeCap, + } + + log.Debug("calculated data poster fee and tip caps", logFields...) + + if newBaseFeeCap.Sign() < 0 || newTipCap.Sign() < 0 || newBlobFeeCap.Sign() < 0 { + msg := "can't meet data poster fee cap obligations with current target max cost" + log.Info(msg, logFields...) + if lastTx != nil { + // wait until we have a higher target max cost to replace by fee + return lastTx.GasFeeCap(), lastTx.GasTipCap(), lastTx.BlobGasFeeCap(), nil + } else { + return nil, nil, nil, errors.New(msg) + } + } + + if lastTx != nil && (arbmath.BigLessThan(newBaseFeeCap, currentNonBlobFee) || (numBlobs > 0 && arbmath.BigLessThan(newBlobFeeCap, currentBlobFee))) { + // Make sure our replace by fee can meet the current parent chain fee demands. + // Without this check, we'd blindly increase each fee component by the min rbf amount each time, + // without looking at which component(s) actually need increased. + // E.g. instead of 2x basefee and 2x blobfee, we might actually want to 4x basefee and 2x blobfee. + // This check lets us hold off on the rbf until we are actually meet the current fee requirements, + // which lets us move in a particular direction (biasing towards either basefee or blobfee). + log.Info("can't meet current parent chain fees with current target max cost", logFields...) + // wait until we have a higher target max cost to replace by fee + return lastTx.GasFeeCap(), lastTx.GasTipCap(), lastTx.BlobGasFeeCap(), nil } - return newFeeCap, newTipCap, newBlobFeeCap, nil + return newBaseFeeCap, newTipCap, newBlobFeeCap, nil } func (p *DataPoster) PostSimpleTransaction(ctx context.Context, nonce uint64, to common.Address, calldata []byte, gasLimit uint64, value *big.Int) (*types.Transaction, error) { @@ -588,7 +661,11 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim p.mutex.Lock() defer p.mutex.Unlock() - expectedNonce, _, _, err := p.getNextNonceAndMaybeMeta(ctx) + var weight uint64 = 1 + if len(kzgBlobs) > 0 { + weight = uint64(len(kzgBlobs)) + } + expectedNonce, _, _, lastCumulativeWeight, err := p.getNextNonceAndMaybeMeta(ctx, weight) if err != nil { return nil, err } @@ -601,14 +678,16 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim return nil, fmt.Errorf("failed to update data poster balance: %w", err) } - feeCap, tipCap, blobFeeCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, len(kzgBlobs), nil, nil, dataCreatedAt, 0) + feeCap, tipCap, blobFeeCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, uint64(len(kzgBlobs)), nil, dataCreatedAt, 0) if err != nil { return nil, err } var deprecatedData types.DynamicFeeTx var inner types.TxData + replacementTimes := p.replacementTimes if len(kzgBlobs) > 0 { + replacementTimes = p.blobTxReplacementTimes value256, overflow := uint256.FromBig(value) if overflow { return nil, fmt.Errorf("blob transaction callvalue %v overflows uint256", value) @@ -662,13 +741,15 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim if err != nil { return nil, fmt.Errorf("signing transaction: %w", err) } + cumulativeWeight := lastCumulativeWeight + weight queuedTx := storage.QueuedTransaction{ - DeprecatedData: deprecatedData, - FullTx: fullTx, - Meta: meta, - Sent: false, - Created: dataCreatedAt, - NextReplacement: time.Now().Add(p.replacementTimes[0]), + DeprecatedData: deprecatedData, + FullTx: fullTx, + Meta: meta, + Sent: false, + Created: dataCreatedAt, + NextReplacement: time.Now().Add(replacementTimes[0]), + StoredCumulativeWeight: &cumulativeWeight, } return fullTx, p.sendTx(ctx, nil, &queuedTx) } @@ -701,17 +782,44 @@ func (p *DataPoster) saveTx(ctx context.Context, prevTx, newTx *storage.QueuedTr } func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransaction, newTx *storage.QueuedTransaction) error { + latestHeader, err := p.client.HeaderByNumber(ctx, nil) + if err != nil { + return err + } + var currentBlobFee *big.Int + if latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { + currentBlobFee = eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) + } + + if arbmath.BigLessThan(newTx.FullTx.GasFeeCap(), latestHeader.BaseFee) { + log.Info( + "submitting transaction with GasFeeCap less than latest basefee", + "txBasefeeCap", newTx.FullTx.GasFeeCap(), + "latestBasefee", latestHeader.BaseFee, + "elapsed", time.Since(newTx.Created), + ) + } + + if newTx.FullTx.BlobGasFeeCap() != nil && currentBlobFee != nil && arbmath.BigLessThan(newTx.FullTx.BlobGasFeeCap(), currentBlobFee) { + log.Info( + "submitting transaction with BlobGasFeeCap less than latest blobfee", + "txBlobGasFeeCap", newTx.FullTx.BlobGasFeeCap(), + "latestBlobFee", currentBlobFee, + "elapsed", time.Since(newTx.Created), + ) + } + if err := p.saveTx(ctx, prevTx, newTx); err != nil { return err } if err := p.client.SendTransaction(ctx, newTx.FullTx); err != nil { - if !strings.Contains(err.Error(), "already known") && !strings.Contains(err.Error(), "nonce too low") { - log.Warn("DataPoster failed to send transaction", "err", err, "nonce", newTx.FullTx.Nonce(), "feeCap", newTx.FullTx.GasFeeCap(), "tipCap", newTx.FullTx.GasTipCap(), "gas", newTx.FullTx.Gas()) + if !rpcclient.IsAlreadyKnownError(err) && !strings.Contains(err.Error(), "nonce too low") { + log.Warn("DataPoster failed to send transaction", "err", err, "nonce", newTx.FullTx.Nonce(), "feeCap", newTx.FullTx.GasFeeCap(), "tipCap", newTx.FullTx.GasTipCap(), "blobFeeCap", newTx.FullTx.BlobGasFeeCap(), "gas", newTx.FullTx.Gas()) return err } log.Info("DataPoster transaction already known", "err", err, "nonce", newTx.FullTx.Nonce(), "hash", newTx.FullTx.Hash()) } else { - log.Info("DataPoster sent transaction", "nonce", newTx.FullTx.Nonce(), "hash", newTx.FullTx.Hash(), "feeCap", newTx.FullTx.GasFeeCap(), "tipCap", newTx.FullTx.GasTipCap(), "gas", newTx.FullTx.Gas()) + log.Info("DataPoster sent transaction", "nonce", newTx.FullTx.Nonce(), "hash", newTx.FullTx.Hash(), "feeCap", newTx.FullTx.GasFeeCap(), "tipCap", newTx.FullTx.GasTipCap(), "blobFeeCap", newTx.FullTx.BlobGasFeeCap(), "gas", newTx.FullTx.Gas()) } newerTx := *newTx newerTx.Sent = true @@ -754,16 +862,20 @@ func updateGasCaps(tx *types.Transaction, newFeeCap, newTipCap, newBlobFeeCap *b } // The mutex must be held by the caller. -func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransaction, backlogOfBatches uint64) error { - newFeeCap, newTipCap, newBlobFeeCap, err := p.feeAndTipCaps(ctx, prevTx.FullTx.Nonce(), prevTx.FullTx.Gas(), len(prevTx.FullTx.BlobHashes()), prevTx.FullTx.GasFeeCap(), prevTx.FullTx.GasTipCap(), prevTx.Created, backlogOfBatches) +func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransaction, backlogWeight uint64) error { + newFeeCap, newTipCap, newBlobFeeCap, err := p.feeAndTipCaps(ctx, prevTx.FullTx.Nonce(), prevTx.FullTx.Gas(), uint64(len(prevTx.FullTx.BlobHashes())), prevTx.FullTx, prevTx.Created, backlogWeight) if err != nil { return err } - minNewFeeCap := arbmath.BigMulByBips(prevTx.FullTx.GasFeeCap(), minRbfIncrease) + minRbfIncrease := minNonBlobRbfIncrease + if len(prevTx.FullTx.BlobHashes()) > 0 { + minRbfIncrease = minBlobRbfIncrease + } + newTx := *prevTx - // TODO: also look at the blob fee cap - if newFeeCap.Cmp(minNewFeeCap) < 0 { + if arbmath.BigDivToBips(newFeeCap, prevTx.FullTx.GasFeeCap()) < minRbfIncrease || + (prevTx.FullTx.BlobGasFeeCap() != nil && arbmath.BigDivToBips(newBlobFeeCap, prevTx.FullTx.BlobGasFeeCap()) < minRbfIncrease) { log.Debug( "no need to replace by fee transaction", "nonce", prevTx.FullTx.Nonce(), @@ -771,13 +883,20 @@ func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransa "recommendedFeeCap", newFeeCap, "lastTipCap", prevTx.FullTx.GasTipCap(), "recommendedTipCap", newTipCap, + "lastBlobFeeCap", prevTx.FullTx.BlobGasFeeCap(), + "recommendedBlobFeeCap", newBlobFeeCap, ) newTx.NextReplacement = time.Now().Add(time.Minute) return p.sendTx(ctx, prevTx, &newTx) } + replacementTimes := p.replacementTimes + if len(prevTx.FullTx.BlobHashes()) > 0 { + replacementTimes = p.blobTxReplacementTimes + } + elapsed := time.Since(prevTx.Created) - for _, replacement := range p.replacementTimes { + for _, replacement := range replacementTimes { if elapsed >= replacement { continue } @@ -877,7 +996,7 @@ func (p *DataPoster) maybeLogError(err error, tx *storage.QueuedTransaction, msg } else { delete(p.errorCount, nonce) } - logLevel(msg, "err", err, "nonce", nonce, "feeCap", tx.FullTx.GasFeeCap(), "tipCap", tx.FullTx.GasTipCap(), "gas", tx.FullTx.Gas()) + logLevel(msg, "err", err, "nonce", nonce, "feeCap", tx.FullTx.GasFeeCap(), "tipCap", tx.FullTx.GasTipCap(), "blobFeeCap", tx.FullTx.BlobGasFeeCap(), "gas", tx.FullTx.Gas()) } const minWait = time.Second * 10 @@ -899,7 +1018,7 @@ func (p *DataPoster) Start(ctxIn context.Context) { log.Warn("failed to update tx poster nonce", "err", err) } now := time.Now() - nextCheck := now.Add(p.replacementTimes[0]) + nextCheck := now.Add(arbmath.MinInt(p.replacementTimes[0], p.blobTxReplacementTimes[0])) maxTxsToRbf := p.config().MaxMempoolTransactions if maxTxsToRbf == 0 { maxTxsToRbf = 512 @@ -917,12 +1036,23 @@ func (p *DataPoster) Start(ctxIn context.Context) { log.Error("Failed to fetch tx queue contents", "err", err) return minWait } - for index, tx := range queueContents { - backlogOfBatches := len(queueContents) - index - 1 + latestQueued, err := p.queue.FetchLast(ctx) + if err != nil { + log.Error("Failed to fetch lastest queued tx", "err", err) + return minWait + } + var latestCumulativeWeight, latestNonce uint64 + if latestQueued != nil { + latestCumulativeWeight = latestQueued.CumulativeWeight() + latestNonce = latestQueued.FullTx.Nonce() + } + for _, tx := range queueContents { replacing := false if now.After(tx.NextReplacement) { replacing = true - err := p.replaceTx(ctx, tx, uint64(backlogOfBatches)) + nonceBacklog := arbmath.SaturatingUSub(latestNonce, tx.FullTx.Nonce()) + weightBacklog := arbmath.SaturatingUSub(latestCumulativeWeight, tx.CumulativeWeight()) + err := p.replaceTx(ctx, tx, arbmath.MaxInt(nonceBacklog, weightBacklog)) p.maybeLogError(err, tx, "failed to replace-by-fee transaction") } if nextCheck.After(tx.NextReplacement) { @@ -957,7 +1087,9 @@ func (p *DataPoster) Start(ctxIn context.Context) { type QueueStorage interface { // Returns at most maxResults items starting from specified index. FetchContents(ctx context.Context, startingIndex uint64, maxResults uint64) ([]*storage.QueuedTransaction, error) - // Returns item with the biggest index. + // Returns the item at index, or nil if not found. + Get(ctx context.Context, index uint64) (*storage.QueuedTransaction, error) + // Returns item with the biggest index, or nil if the queue is empty. FetchLast(ctx context.Context) (*storage.QueuedTransaction, error) // Prunes items up to (excluding) specified index. Prune(ctx context.Context, until uint64) error @@ -970,18 +1102,21 @@ type QueueStorage interface { } type DataPosterConfig struct { - RedisSigner signature.SimpleHmacConfig `koanf:"redis-signer"` - ReplacementTimes string `koanf:"replacement-times"` + RedisSigner signature.SimpleHmacConfig `koanf:"redis-signer"` + ReplacementTimes string `koanf:"replacement-times"` + BlobTxReplacementTimes string `koanf:"blob-tx-replacement-times"` // This is forcibly disabled if the parent chain is an Arbitrum chain, // so you should probably use DataPoster's waitForL1Finality method instead of reading this field directly. WaitForL1Finality bool `koanf:"wait-for-l1-finality" reload:"hot"` MaxMempoolTransactions uint64 `koanf:"max-mempool-transactions" reload:"hot"` + MaxMempoolWeight uint64 `koanf:"max-mempool-weight" reload:"hot"` MaxQueuedTransactions int `koanf:"max-queued-transactions" reload:"hot"` TargetPriceGwei float64 `koanf:"target-price-gwei" reload:"hot"` UrgencyGwei float64 `koanf:"urgency-gwei" reload:"hot"` - MinFeeCapGwei float64 `koanf:"min-fee-cap-gwei" reload:"hot"` MinTipCapGwei float64 `koanf:"min-tip-cap-gwei" reload:"hot"` + MinBlobTxTipCapGwei float64 `koanf:"min-blob-tx-tip-cap-gwei" reload:"hot"` MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"` + MaxBlobTxTipCapGwei float64 `koanf:"max-blob-tx-tip-cap-gwei" reload:"hot"` NonceRbfSoftConfs uint64 `koanf:"nonce-rbf-soft-confs" reload:"hot"` AllocateMempoolBalance bool `koanf:"allocate-mempool-balance" reload:"hot"` UseDBStorage bool `koanf:"use-db-storage"` @@ -1025,14 +1160,17 @@ type ConfigFetcher func() *DataPosterConfig func DataPosterConfigAddOptions(prefix string, f *pflag.FlagSet, defaultDataPosterConfig DataPosterConfig) { f.String(prefix+".replacement-times", defaultDataPosterConfig.ReplacementTimes, "comma-separated list of durations since first posting to attempt a replace-by-fee") + f.String(prefix+".blob-tx-replacement-times", defaultDataPosterConfig.BlobTxReplacementTimes, "comma-separated list of durations since first posting a blob transaction to attempt a replace-by-fee") f.Bool(prefix+".wait-for-l1-finality", defaultDataPosterConfig.WaitForL1Finality, "only treat a transaction as confirmed after L1 finality has been achieved (recommended)") f.Uint64(prefix+".max-mempool-transactions", defaultDataPosterConfig.MaxMempoolTransactions, "the maximum number of transactions to have queued in the mempool at once (0 = unlimited)") + f.Uint64(prefix+".max-mempool-weight", defaultDataPosterConfig.MaxMempoolWeight, "the maximum number of weight (weight = min(1, tx.blobs)) to have queued in the mempool at once (0 = unlimited)") f.Int(prefix+".max-queued-transactions", defaultDataPosterConfig.MaxQueuedTransactions, "the maximum number of unconfirmed transactions to track at once (0 = unlimited)") f.Float64(prefix+".target-price-gwei", defaultDataPosterConfig.TargetPriceGwei, "the target price to use for maximum fee cap calculation") f.Float64(prefix+".urgency-gwei", defaultDataPosterConfig.UrgencyGwei, "the urgency to use for maximum fee cap calculation") - f.Float64(prefix+".min-fee-cap-gwei", defaultDataPosterConfig.MinFeeCapGwei, "the minimum fee cap to post transactions at") f.Float64(prefix+".min-tip-cap-gwei", defaultDataPosterConfig.MinTipCapGwei, "the minimum tip cap to post transactions at") + f.Float64(prefix+".min-blob-tx-tip-cap-gwei", defaultDataPosterConfig.MinBlobTxTipCapGwei, "the minimum tip cap to post EIP-4844 blob carrying transactions at") f.Float64(prefix+".max-tip-cap-gwei", defaultDataPosterConfig.MaxTipCapGwei, "the maximum tip cap to post transactions at") + f.Float64(prefix+".max-blob-tx-tip-cap-gwei", defaultDataPosterConfig.MaxBlobTxTipCapGwei, "the maximum tip cap to post EIP-4844 blob carrying transactions at") f.Uint64(prefix+".nonce-rbf-soft-confs", defaultDataPosterConfig.NonceRbfSoftConfs, "the maximum probable reorg depth, used to determine when a transaction will no longer likely need replaced-by-fee") f.Bool(prefix+".allocate-mempool-balance", defaultDataPosterConfig.AllocateMempoolBalance, "if true, don't put transactions in the mempool that spend a total greater than the batch poster's balance") f.Bool(prefix+".use-db-storage", defaultDataPosterConfig.UseDBStorage, "uses database storage when enabled") @@ -1064,12 +1202,16 @@ func addExternalSignerOptions(prefix string, f *pflag.FlagSet) { var DefaultDataPosterConfig = DataPosterConfig{ ReplacementTimes: "5m,10m,20m,30m,1h,2h,4h,6h,8h,12h,16h,18h,20h,22h", + BlobTxReplacementTimes: "5m,10m,30m,1h,4h,8h,16h,22h", WaitForL1Finality: true, TargetPriceGwei: 60., UrgencyGwei: 2., - MaxMempoolTransactions: 20, + MaxMempoolTransactions: 18, + MaxMempoolWeight: 18, MinTipCapGwei: 0.05, + MinBlobTxTipCapGwei: 1, // default geth minimum, and relays aren't likely to accept lower values given propagation time MaxTipCapGwei: 5, + MaxBlobTxTipCapGwei: 1, // lower than normal because 4844 rbf is a minimum of a 2x NonceRbfSoftConfs: 1, AllocateMempoolBalance: true, UseDBStorage: true, @@ -1084,19 +1226,25 @@ var DefaultDataPosterConfig = DataPosterConfig{ var DefaultDataPosterConfigForValidator = func() DataPosterConfig { config := DefaultDataPosterConfig - config.MaxMempoolTransactions = 1 // the validator cannot queue transactions + // the validator cannot queue transactions + config.MaxMempoolTransactions = 1 + config.MaxMempoolWeight = 1 return config }() var TestDataPosterConfig = DataPosterConfig{ ReplacementTimes: "1s,2s,5s,10s,20s,30s,1m,5m", + BlobTxReplacementTimes: "1s,10s,30s,5m", RedisSigner: signature.TestSimpleHmacConfig, WaitForL1Finality: false, TargetPriceGwei: 60., UrgencyGwei: 2., - MaxMempoolTransactions: 20, + MaxMempoolTransactions: 18, + MaxMempoolWeight: 18, MinTipCapGwei: 0.05, + MinBlobTxTipCapGwei: 1, MaxTipCapGwei: 5, + MaxBlobTxTipCapGwei: 1, NonceRbfSoftConfs: 1, AllocateMempoolBalance: true, UseDBStorage: false, @@ -1110,6 +1258,8 @@ var TestDataPosterConfig = DataPosterConfig{ var TestDataPosterConfigForValidator = func() DataPosterConfig { config := TestDataPosterConfig - config.MaxMempoolTransactions = 1 // the validator cannot queue transactions + // the validator cannot queue transactions + config.MaxMempoolTransactions = 1 + config.MaxMempoolWeight = 1 return config }() diff --git a/arbnode/dataposter/dataposter_test.go b/arbnode/dataposter/dataposter_test.go index 3d7fa60dc7..06e3144ed1 100644 --- a/arbnode/dataposter/dataposter_test.go +++ b/arbnode/dataposter/dataposter_test.go @@ -13,6 +13,8 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" "github.com/google/go-cmp/cmp" + "github.com/holiman/uint256" + "github.com/offchainlabs/nitro/arbnode/dataposter/externalsigner" "github.com/offchainlabs/nitro/arbnode/dataposter/externalsignertest" ) @@ -66,17 +68,42 @@ func signerTestCfg(addr common.Address) (*ExternalSignerCfg, error) { }, nil } +var ( + blobTx = types.NewTx( + &types.BlobTx{ + ChainID: uint256.NewInt(1337), + Nonce: 13, + GasTipCap: uint256.NewInt(1), + GasFeeCap: uint256.NewInt(1), + Gas: 3, + To: common.Address{}, + Value: uint256.NewInt(1), + Data: []byte{0x01, 0x02, 0x03}, + BlobHashes: []common.Hash{ + common.BigToHash(big.NewInt(1)), + common.BigToHash(big.NewInt(2)), + common.BigToHash(big.NewInt(3)), + }, + Sidecar: &types.BlobTxSidecar{}, + }, + ) + dynamicFeeTx = types.NewTx( + &types.DynamicFeeTx{ + Nonce: 13, + GasTipCap: big.NewInt(1), + GasFeeCap: big.NewInt(1), + Gas: 3, + To: nil, + Value: big.NewInt(1), + Data: []byte{0x01, 0x02, 0x03}, + }, + ) +) + func TestExternalSigner(t *testing.T) { - ctx := context.Background() - httpSrv, srv := externalsignertest.NewServer(ctx, t) - t.Cleanup(func() { - if err := httpSrv.Shutdown(ctx); err != nil { - t.Fatalf("Error shutting down http server: %v", err) - } - }) + httpSrv, srv := externalsignertest.NewServer(t) cert, key := "./testdata/localhost.crt", "./testdata/localhost.key" go func() { - fmt.Println("Server is listening on port 1234...") if err := httpSrv.ListenAndServeTLS(cert, key); err != nil && err != http.ErrServerClosed { t.Errorf("ListenAndServeTLS() unexpected error: %v", err) return @@ -86,35 +113,48 @@ func TestExternalSigner(t *testing.T) { if err != nil { t.Fatalf("Error getting signer test config: %v", err) } + ctx := context.Background() signer, addr, err := externalSigner(ctx, signerCfg) if err != nil { t.Fatalf("Error getting external signer: %v", err) } - tx := types.NewTx( - &types.DynamicFeeTx{ - Nonce: 13, - GasTipCap: big.NewInt(1), - GasFeeCap: big.NewInt(1), - Gas: 3, - To: nil, - Value: big.NewInt(1), - Data: []byte{0x01, 0x02, 0x03}, + + for _, tc := range []struct { + desc string + tx *types.Transaction + }{ + { + desc: "blob transaction", + tx: blobTx, }, - ) - got, err := signer(ctx, addr, tx) - if err != nil { - t.Fatalf("Error signing transaction with external signer: %v", err) - } - args, err := txToSendTxArgs(addr, tx) - if err != nil { - t.Fatalf("Error converting transaction to sendTxArgs: %v", err) - } - want, err := srv.SignerFn(addr, args.ToTransaction()) - if err != nil { - t.Fatalf("Error signing transaction: %v", err) - } - if diff := cmp.Diff(want.Hash(), got.Hash()); diff != "" { - t.Errorf("Signing transaction: unexpected diff: %v\n", diff) + { + desc: "dynamic fee transaction", + tx: dynamicFeeTx, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + { + got, err := signer(ctx, addr, tc.tx) + if err != nil { + t.Fatalf("Error signing transaction with external signer: %v", err) + } + args, err := externalsigner.TxToSignTxArgs(addr, tc.tx) + if err != nil { + t.Fatalf("Error converting transaction to sendTxArgs: %v", err) + } + want, err := srv.SignerFn(addr, args.ToTransaction()) + if err != nil { + t.Fatalf("Error signing transaction: %v", err) + } + if diff := cmp.Diff(want.Hash(), got.Hash()); diff != "" { + t.Errorf("Signing transaction: unexpected diff: %v\n", diff) + } + hasher := types.LatestSignerForChainID(tc.tx.ChainId()) + if h, g := hasher.Hash(tc.tx), hasher.Hash(got); h != g { + t.Errorf("Signed transaction hash: %v differs from initial transaction hash: %v", g, h) + } + } + }) } } diff --git a/arbnode/dataposter/dbstorage/storage.go b/arbnode/dataposter/dbstorage/storage.go index 473bfa2c3b..2cfda5d779 100644 --- a/arbnode/dataposter/dbstorage/storage.go +++ b/arbnode/dataposter/dbstorage/storage.go @@ -58,6 +58,18 @@ func (s *Storage) FetchContents(_ context.Context, startingIndex uint64, maxResu return res, it.Error() } +func (s *Storage) Get(_ context.Context, index uint64) (*storage.QueuedTransaction, error) { + key := idxToKey(index) + value, err := s.db.Get(key) + if err != nil { + if errors.Is(err, leveldb.ErrNotFound) { + return nil, nil + } + return nil, err + } + return s.encDec().Decode(value) +} + func (s *Storage) lastItemIdx(context.Context) ([]byte, error) { return s.db.Get(lastItemIdxKey) } diff --git a/arbnode/dataposter/externalsigner/externalsigner.go b/arbnode/dataposter/externalsigner/externalsigner.go new file mode 100644 index 0000000000..10d9754cdf --- /dev/null +++ b/arbnode/dataposter/externalsigner/externalsigner.go @@ -0,0 +1,115 @@ +package externalsigner + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/signer/core/apitypes" + "github.com/holiman/uint256" +) + +type SignTxArgs struct { + *apitypes.SendTxArgs + + // Feilds for BlobTx type transactions. + BlobFeeCap *hexutil.Big `json:"maxFeePerBlobGas"` + BlobHashes []common.Hash `json:"blobVersionedHashes,omitempty"` + + // Blob sidecar fields for BlobTx type transactions. + // These are optional if BlobHashes are already present, since these + // are not included in the hash/signature. + Blobs []kzg4844.Blob `json:"blobs"` + Commitments []kzg4844.Commitment `json:"commitments"` + Proofs []kzg4844.Proof `json:"proofs"` +} + +func (a *SignTxArgs) ToTransaction() *types.Transaction { + if !a.isEIP4844() { + return a.SendTxArgs.ToTransaction() + } + to := common.Address{} + if a.To != nil { + to = a.To.Address() + } + var input []byte + if a.Input != nil { + input = *a.Input + } else if a.Data != nil { + input = *a.Data + } + al := types.AccessList{} + if a.AccessList != nil { + al = *a.AccessList + } + return types.NewTx(&types.BlobTx{ + To: to, + Nonce: uint64(a.SendTxArgs.Nonce), + Gas: uint64(a.Gas), + GasFeeCap: uint256.NewInt(a.MaxFeePerGas.ToInt().Uint64()), + GasTipCap: uint256.NewInt(a.MaxPriorityFeePerGas.ToInt().Uint64()), + Value: uint256.NewInt(a.Value.ToInt().Uint64()), + Data: input, + AccessList: al, + BlobFeeCap: uint256.NewInt(a.BlobFeeCap.ToInt().Uint64()), + BlobHashes: a.BlobHashes, + Sidecar: &types.BlobTxSidecar{ + Blobs: a.Blobs, + Commitments: a.Commitments, + Proofs: a.Proofs, + }, + ChainID: uint256.NewInt(a.ChainID.ToInt().Uint64()), + }) +} + +func (a *SignTxArgs) isEIP4844() bool { + return a.BlobHashes != nil || a.BlobFeeCap != nil +} + +// TxToSignTxArgs converts transaction to SendTxArgs. This is needed for +// external signer to specify From field. +func TxToSignTxArgs(addr common.Address, tx *types.Transaction) (*SignTxArgs, error) { + var to *common.MixedcaseAddress + if tx.To() != nil { + to = new(common.MixedcaseAddress) + *to = common.NewMixedcaseAddress(*tx.To()) + } + data := (hexutil.Bytes)(tx.Data()) + val := (*hexutil.Big)(tx.Value()) + if val == nil { + val = (*hexutil.Big)(big.NewInt(0)) + } + al := tx.AccessList() + var ( + blobs []kzg4844.Blob + commitments []kzg4844.Commitment + proofs []kzg4844.Proof + ) + if tx.BlobTxSidecar() != nil { + blobs = tx.BlobTxSidecar().Blobs + commitments = tx.BlobTxSidecar().Commitments + proofs = tx.BlobTxSidecar().Proofs + } + return &SignTxArgs{ + SendTxArgs: &apitypes.SendTxArgs{ + From: common.NewMixedcaseAddress(addr), + To: to, + Gas: hexutil.Uint64(tx.Gas()), + GasPrice: (*hexutil.Big)(tx.GasPrice()), + MaxFeePerGas: (*hexutil.Big)(tx.GasFeeCap()), + MaxPriorityFeePerGas: (*hexutil.Big)(tx.GasTipCap()), + Value: *val, + Nonce: hexutil.Uint64(tx.Nonce()), + Data: &data, + AccessList: &al, + ChainID: (*hexutil.Big)(tx.ChainId()), + }, + BlobFeeCap: (*hexutil.Big)(tx.BlobGasFeeCap()), + BlobHashes: tx.BlobHashes(), + Blobs: blobs, + Commitments: commitments, + Proofs: proofs, + }, nil +} diff --git a/arbnode/dataposter/externalsigner/externalsigner_test.go b/arbnode/dataposter/externalsigner/externalsigner_test.go new file mode 100644 index 0000000000..abd5acedcf --- /dev/null +++ b/arbnode/dataposter/externalsigner/externalsigner_test.go @@ -0,0 +1,74 @@ +package externalsigner + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/holiman/uint256" +) + +var ( + blobTx = types.NewTx( + &types.BlobTx{ + ChainID: uint256.NewInt(1337), + Nonce: 13, + GasTipCap: uint256.NewInt(1), + GasFeeCap: uint256.NewInt(1), + Gas: 3, + To: common.Address{}, + Value: uint256.NewInt(1), + Data: []byte{0x01, 0x02, 0x03}, + BlobHashes: []common.Hash{ + common.BigToHash(big.NewInt(1)), + common.BigToHash(big.NewInt(2)), + common.BigToHash(big.NewInt(3)), + }, + Sidecar: &types.BlobTxSidecar{}, + }, + ) + dynamicFeeTx = types.NewTx( + &types.DynamicFeeTx{ + ChainID: big.NewInt(1337), + Nonce: 13, + GasTipCap: big.NewInt(1), + GasFeeCap: big.NewInt(1), + Gas: 3, + To: nil, + Value: big.NewInt(1), + Data: []byte{0x01, 0x02, 0x03}, + }, + ) +) + +// TestToTranssaction tests that tranasction converted to SignTxArgs and then +// back to Transaction results in the same hash. +func TestToTranssaction(t *testing.T) { + for _, tc := range []struct { + desc string + tx *types.Transaction + }{ + { + desc: "blob transaction", + tx: blobTx, + }, + { + desc: "dynamic fee transaction", + tx: dynamicFeeTx, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + signTxArgs, err := TxToSignTxArgs(common.Address{}, tc.tx) + if err != nil { + t.Fatalf("TxToSignTxArgs() unexpected error: %v", err) + } + got := signTxArgs.ToTransaction() + hasher := types.LatestSignerForChainID(nil) + if h, g := hasher.Hash(tc.tx), hasher.Hash(got); h != g { + t.Errorf("ToTransaction() got hash: %v want: %v", g, h) + } + }) + } + +} diff --git a/arbnode/dataposter/externalsignertest/externalsignertest.go b/arbnode/dataposter/externalsignertest/externalsignertest.go index 7d15515feb..73a5760fbe 100644 --- a/arbnode/dataposter/externalsignertest/externalsignertest.go +++ b/arbnode/dataposter/externalsignertest/externalsignertest.go @@ -19,7 +19,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/signer/core/apitypes" + "github.com/offchainlabs/nitro/arbnode/dataposter/externalsigner" ) var ( @@ -71,13 +71,14 @@ func CertPaths() (*CertAbsPaths, error) { }, nil } -func NewServer(ctx context.Context, t *testing.T) (*http.Server, *SignerAPI) { +func NewServer(t *testing.T) (*http.Server, *SignerAPI) { rpcServer := rpc.NewServer() signer, address, err := setupAccount("/tmp/keystore") if err != nil { t.Fatalf("Error setting up account: %v", err) } t.Cleanup(func() { os.RemoveAll("/tmp/keystore") }) + s := &SignerAPI{SignerFn: signer, Address: address} if err := rpcServer.RegisterName("test", s); err != nil { t.Fatalf("Failed to register EthSigningAPI, error: %v", err) @@ -107,6 +108,12 @@ func NewServer(ctx context.Context, t *testing.T) (*http.Server, *SignerAPI) { }, } + t.Cleanup(func() { + if err := httpServer.Close(); err != nil { + t.Fatalf("Error shutting down http server: %v", err) + } + }) + return httpServer, s } @@ -137,7 +144,7 @@ type SignerAPI struct { Address common.Address } -func (a *SignerAPI) SignTransaction(ctx context.Context, req *apitypes.SendTxArgs) (hexutil.Bytes, error) { +func (a *SignerAPI) SignTransaction(ctx context.Context, req *externalsigner.SignTxArgs) (hexutil.Bytes, error) { if req == nil { return nil, fmt.Errorf("nil request") } diff --git a/arbnode/dataposter/noop/storage.go b/arbnode/dataposter/noop/storage.go index b3947bcaa0..c90e36b067 100644 --- a/arbnode/dataposter/noop/storage.go +++ b/arbnode/dataposter/noop/storage.go @@ -16,6 +16,10 @@ func (s *Storage) FetchContents(_ context.Context, _, _ uint64) ([]*storage.Queu return nil, nil } +func (s *Storage) Get(_ context.Context, _ uint64) (*storage.QueuedTransaction, error) { + return nil, nil +} + func (s *Storage) FetchLast(ctx context.Context) (*storage.QueuedTransaction, error) { return nil, nil } diff --git a/arbnode/dataposter/redis/redisstorage.go b/arbnode/dataposter/redis/redisstorage.go index f2393611b2..8b6dcf65ac 100644 --- a/arbnode/dataposter/redis/redisstorage.go +++ b/arbnode/dataposter/redis/redisstorage.go @@ -78,6 +78,20 @@ func (s *Storage) FetchContents(ctx context.Context, startingIndex uint64, maxRe return items, nil } +func (s *Storage) Get(ctx context.Context, index uint64) (*storage.QueuedTransaction, error) { + contents, err := s.FetchContents(ctx, index, 1) + if err != nil { + return nil, err + } + if len(contents) == 0 { + return nil, nil + } else if len(contents) == 1 { + return contents[0], nil + } else { + return nil, fmt.Errorf("expected only one return value for Get but got %v", len(contents)) + } +} + func (s *Storage) FetchLast(ctx context.Context) (*storage.QueuedTransaction, error) { query := redis.ZRangeArgs{ Key: s.key, diff --git a/arbnode/dataposter/slice/slicestorage.go b/arbnode/dataposter/slice/slicestorage.go index dbd7a3ea5e..69de7564a3 100644 --- a/arbnode/dataposter/slice/slicestorage.go +++ b/arbnode/dataposter/slice/slicestorage.go @@ -45,6 +45,13 @@ func (s *Storage) FetchContents(_ context.Context, startingIndex uint64, maxResu return res, nil } +func (s *Storage) Get(_ context.Context, index uint64) (*storage.QueuedTransaction, error) { + if index >= s.firstNonce+uint64(len(s.queue)) || index < s.firstNonce { + return nil, nil + } + return s.encDec().Decode(s.queue[index-s.firstNonce]) +} + func (s *Storage) FetchLast(context.Context) (*storage.QueuedTransaction, error) { if len(s.queue) == 0 { return nil, nil diff --git a/arbnode/dataposter/storage/storage.go b/arbnode/dataposter/storage/storage.go index 9586b9c9a9..8e5a7e1798 100644 --- a/arbnode/dataposter/storage/storage.go +++ b/arbnode/dataposter/storage/storage.go @@ -26,31 +26,42 @@ var ( ) type QueuedTransaction struct { - FullTx *types.Transaction - DeprecatedData types.DynamicFeeTx // FullTx should be used instead - Meta []byte - Sent bool - Created time.Time // may be earlier than the tx was given to the tx poster - NextReplacement time.Time + FullTx *types.Transaction + DeprecatedData types.DynamicFeeTx // FullTx should be used instead + Meta []byte + Sent bool + Created time.Time // may be earlier than the tx was given to the tx poster + NextReplacement time.Time + StoredCumulativeWeight *uint64 +} + +// CumulativeWeight returns a rough estimate of the total number of batches submitted at this point, not guaranteed to be exact +func (t *QueuedTransaction) CumulativeWeight() uint64 { + if t.StoredCumulativeWeight != nil { + return *t.StoredCumulativeWeight + } + return t.FullTx.Nonce() } type queuedTransactionForEncoding struct { - FullTx *types.Transaction - Data types.DynamicFeeTx - Meta []byte - Sent bool - Created RlpTime - NextReplacement RlpTime + FullTx *types.Transaction + Data types.DynamicFeeTx + Meta []byte + Sent bool + Created RlpTime + NextReplacement RlpTime + StoredCumulativeWeight *uint64 `rlp:"optional"` } func (qt *QueuedTransaction) EncodeRLP(w io.Writer) error { return rlp.Encode(w, queuedTransactionForEncoding{ - FullTx: qt.FullTx, - Data: qt.DeprecatedData, - Meta: qt.Meta, - Sent: qt.Sent, - Created: (RlpTime)(qt.Created), - NextReplacement: (RlpTime)(qt.NextReplacement), + FullTx: qt.FullTx, + Data: qt.DeprecatedData, + Meta: qt.Meta, + Sent: qt.Sent, + Created: (RlpTime)(qt.Created), + NextReplacement: (RlpTime)(qt.NextReplacement), + StoredCumulativeWeight: qt.StoredCumulativeWeight, }) } @@ -65,6 +76,7 @@ func (qt *QueuedTransaction) DecodeRLP(s *rlp.Stream) error { qt.Sent = qtEnc.Sent qt.Created = time.Time(qtEnc.Created) qt.NextReplacement = time.Time(qtEnc.NextReplacement) + qt.StoredCumulativeWeight = qtEnc.StoredCumulativeWeight return nil } diff --git a/arbnode/delayed.go b/arbnode/delayed.go index 498aa0475f..c166aa2b90 100644 --- a/arbnode/delayed.go +++ b/arbnode/delayed.go @@ -7,6 +7,7 @@ import ( "bytes" "context" "errors" + "fmt" "math/big" "sort" @@ -14,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -28,6 +30,7 @@ var messageDeliveredID common.Hash var inboxMessageDeliveredID common.Hash var inboxMessageFromOriginID common.Hash var l2MessageFromOriginCallABI abi.Method +var delayedInboxAccsCallABI abi.Method func init() { parsedIBridgeABI, err := bridgegen.IBridgeMetaData.GetAbi() @@ -35,6 +38,7 @@ func init() { panic(err) } messageDeliveredID = parsedIBridgeABI.Events["MessageDelivered"].ID + delayedInboxAccsCallABI = parsedIBridgeABI.Methods["delayedInboxAccs"] parsedIMessageProviderABI, err := bridgegen.IDelayedMessageProviderMetaData.GetAbi() if err != nil { @@ -95,12 +99,39 @@ func (b *DelayedBridge) GetMessageCount(ctx context.Context, blockNumber *big.In return bigRes.Uint64(), nil } -func (b *DelayedBridge) GetAccumulator(ctx context.Context, sequenceNumber uint64, blockNumber *big.Int) (common.Hash, error) { - opts := &bind.CallOpts{ - Context: ctx, - BlockNumber: blockNumber, +// Uses blockHash if nonzero, otherwise uses blockNumber +func (b *DelayedBridge) GetAccumulator(ctx context.Context, sequenceNumber uint64, blockNumber *big.Int, blockHash common.Hash) (common.Hash, error) { + calldata := append([]byte{}, delayedInboxAccsCallABI.ID...) + inputs, err := delayedInboxAccsCallABI.Inputs.Pack(arbmath.UintToBig(sequenceNumber)) + if err != nil { + return common.Hash{}, err + } + calldata = append(calldata, inputs...) + msg := ethereum.CallMsg{ + To: &b.address, + Data: calldata, + } + var result hexutil.Bytes + if blockHash != (common.Hash{}) { + result, err = b.client.CallContractAtHash(ctx, msg, blockHash) + } else { + result, err = b.client.CallContract(ctx, msg, blockNumber) + } + if err != nil { + return common.Hash{}, err + } + values, err := delayedInboxAccsCallABI.Outputs.Unpack(result) + if err != nil { + return common.Hash{}, err + } + if len(values) != 1 { + return common.Hash{}, fmt.Errorf("expected 1 return value from %v, got %v", delayedInboxAccsCallABI.Name, len(values)) + } + hash, ok := values[0].([32]byte) + if !ok { + return common.Hash{}, fmt.Errorf("expected [32]uint8 return value from %v, got %T", delayedInboxAccsCallABI.Name, values[0]) } - return b.con.DelayedInboxAccs(opts, new(big.Int).SetUint64(sequenceNumber)) + return hash, nil } type DelayedInboxMessage struct { @@ -190,10 +221,10 @@ func (b *DelayedBridge) logsToDeliveredMessages(ctx context.Context, logs []type msgKey := common.BigToHash(parsedLog.MessageIndex) data, ok := messageData[msgKey] if !ok { - return nil, errors.New("message not found") + return nil, fmt.Errorf("message %v data not found", parsedLog.MessageIndex) } if crypto.Keccak256Hash(data) != parsedLog.MessageDataHash { - return nil, errors.New("found message data with mismatched hash") + return nil, fmt.Errorf("found message %v data with mismatched hash", parsedLog.MessageIndex) } requestId := common.BigToHash(parsedLog.MessageIndex) diff --git a/arbnode/delayed_sequencer.go b/arbnode/delayed_sequencer.go index f1b912e0f7..8cbb094c16 100644 --- a/arbnode/delayed_sequencer.go +++ b/arbnode/delayed_sequencer.go @@ -100,16 +100,20 @@ func (d *DelayedSequencer) sequenceWithoutLockout(ctx context.Context, lastBlock } var finalized uint64 + var finalizedHash common.Hash if config.UseMergeFinality && headerreader.HeaderIndicatesFinalitySupport(lastBlockHeader) { + var header *types.Header var err error if config.RequireFullFinality { - finalized, err = d.l1Reader.LatestFinalizedBlockNr(ctx) + header, err = d.l1Reader.LatestFinalizedBlockHeader(ctx) } else { - finalized, err = d.l1Reader.LatestSafeBlockNr(ctx) + header, err = d.l1Reader.LatestSafeBlockHeader(ctx) } if err != nil { return err } + finalized = header.Number.Uint64() + finalizedHash = header.Hash() } else { currentNum := lastBlockHeader.Number.Int64() if currentNum < config.FinalizeDistance { @@ -167,7 +171,7 @@ func (d *DelayedSequencer) sequenceWithoutLockout(ctx context.Context, lastBlock // Sequence the delayed messages, if any if len(messages) > 0 { - delayedBridgeAcc, err := d.bridge.GetAccumulator(ctx, pos-1, new(big.Int).SetUint64(finalized)) + delayedBridgeAcc, err := d.bridge.GetAccumulator(ctx, pos-1, new(big.Int).SetUint64(finalized), finalizedHash) if err != nil { return err } diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index 5fca3c7eea..72881b52fd 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -32,6 +32,7 @@ type InboxReaderConfig struct { DefaultBlocksToRead uint64 `koanf:"default-blocks-to-read" reload:"hot"` TargetMessagesRead uint64 `koanf:"target-messages-read" reload:"hot"` MaxBlocksToRead uint64 `koanf:"max-blocks-to-read" reload:"hot"` + ReadMode string `koanf:"read-mode" reload:"hot"` } type InboxReaderConfigFetcher func() *InboxReaderConfig @@ -40,6 +41,10 @@ func (c *InboxReaderConfig) Validate() error { if c.MaxBlocksToRead == 0 || c.MaxBlocksToRead < c.DefaultBlocksToRead { return errors.New("inbox reader max-blocks-to-read cannot be zero or less than default-blocks-to-read") } + c.ReadMode = strings.ToLower(c.ReadMode) + if c.ReadMode != "latest" && c.ReadMode != "safe" && c.ReadMode != "finalized" { + return fmt.Errorf("inbox reader read-mode is invalid, want: latest or safe or finalized, got: %s", c.ReadMode) + } return nil } @@ -51,6 +56,7 @@ func InboxReaderConfigAddOptions(prefix string, f *flag.FlagSet) { f.Uint64(prefix+".default-blocks-to-read", DefaultInboxReaderConfig.DefaultBlocksToRead, "the default number of blocks to read at once (will vary based on traffic by default)") f.Uint64(prefix+".target-messages-read", DefaultInboxReaderConfig.TargetMessagesRead, "if adjust-blocks-to-read is enabled, the target number of messages to read at once") f.Uint64(prefix+".max-blocks-to-read", DefaultInboxReaderConfig.MaxBlocksToRead, "if adjust-blocks-to-read is enabled, the maximum number of blocks to read at once") + f.String(prefix+".read-mode", DefaultInboxReaderConfig.ReadMode, "mode to only read latest or safe or finalized L1 blocks. Enabling safe or finalized disables feed input and output. Defaults to latest. Takes string input, valid strings- latest, safe, finalized") } var DefaultInboxReaderConfig = InboxReaderConfig{ @@ -61,6 +67,7 @@ var DefaultInboxReaderConfig = InboxReaderConfig{ DefaultBlocksToRead: 100, TargetMessagesRead: 500, MaxBlocksToRead: 2000, + ReadMode: "latest", } var TestInboxReaderConfig = InboxReaderConfig{ @@ -71,6 +78,7 @@ var TestInboxReaderConfig = InboxReaderConfig{ DefaultBlocksToRead: 100, TargetMessagesRead: 500, MaxBlocksToRead: 2000, + ReadMode: "latest", } type InboxReader struct { @@ -219,6 +227,7 @@ func (r *InboxReader) CaughtUp() chan struct{} { } func (r *InboxReader) run(ctx context.Context, hadError bool) error { + readMode := r.config().ReadMode from, err := r.getNextBlockToRead() if err != nil { return err @@ -239,38 +248,71 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { } defer storeSeenBatchCount() // in case of error for { - - latestHeader, err := r.l1Reader.LastHeader(ctx) - if err != nil { - return err - } config := r.config() - currentHeight := latestHeader.Number - - neededBlockAdvance := config.DelayBlocks + arbmath.SaturatingUSub(config.MinBlocksToRead, 1) - neededBlockHeight := arbmath.BigAddByUint(from, neededBlockAdvance) - checkDelayTimer := time.NewTimer(config.CheckDelay) - WaitForHeight: - for arbmath.BigLessThan(currentHeight, neededBlockHeight) { - select { - case latestHeader = <-newHeaders: - if latestHeader == nil { - // shutting down + currentHeight := big.NewInt(0) + if readMode != "latest" { + var blockNum uint64 + fetchLatestSafeOrFinalized := func() { + if readMode == "safe" { + blockNum, err = r.l1Reader.LatestSafeBlockNr(ctx) + } else { + blockNum, err = r.l1Reader.LatestFinalizedBlockNr(ctx) + } + } + fetchLatestSafeOrFinalized() + if err != nil || blockNum == 0 { + return fmt.Errorf("inboxreader running in read only %s mode and unable to fetch latest %s block. err: %w", readMode, readMode, err) + } + currentHeight.SetUint64(blockNum) + // latest block in our db is newer than the latest safe/finalized block hence reset 'from' to match the last safe/finalized block number + if from.Uint64() > currentHeight.Uint64()+1 { + from.Set(currentHeight) + } + for currentHeight.Cmp(from) <= 0 { + select { + case <-newHeaders: + fetchLatestSafeOrFinalized() + if err != nil || blockNum == 0 { + return fmt.Errorf("inboxreader waiting for recent %s block and unable to fetch its block number. err: %w", readMode, err) + } + currentHeight.SetUint64(blockNum) + case <-ctx.Done(): return nil } - currentHeight = new(big.Int).Set(latestHeader.Number) - case <-ctx.Done(): - return nil - case <-checkDelayTimer.C: - break WaitForHeight } - } - checkDelayTimer.Stop() + } else { - if config.DelayBlocks > 0 { - currentHeight = new(big.Int).Sub(currentHeight, new(big.Int).SetUint64(config.DelayBlocks)) - if currentHeight.Cmp(r.firstMessageBlock) < 0 { - currentHeight = new(big.Int).Set(r.firstMessageBlock) + latestHeader, err := r.l1Reader.LastHeader(ctx) + if err != nil { + return err + } + currentHeight = latestHeader.Number + + neededBlockAdvance := config.DelayBlocks + arbmath.SaturatingUSub(config.MinBlocksToRead, 1) + neededBlockHeight := arbmath.BigAddByUint(from, neededBlockAdvance) + checkDelayTimer := time.NewTimer(config.CheckDelay) + WaitForHeight: + for arbmath.BigLessThan(currentHeight, neededBlockHeight) { + select { + case latestHeader = <-newHeaders: + if latestHeader == nil { + // shutting down + return nil + } + currentHeight = new(big.Int).Set(latestHeader.Number) + case <-ctx.Done(): + return nil + case <-checkDelayTimer.C: + break WaitForHeight + } + } + checkDelayTimer.Stop() + + if config.DelayBlocks > 0 { + currentHeight = new(big.Int).Sub(currentHeight, new(big.Int).SetUint64(config.DelayBlocks)) + if currentHeight.Cmp(r.firstMessageBlock) < 0 { + currentHeight = new(big.Int).Set(r.firstMessageBlock) + } } } @@ -300,7 +342,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { } if checkingDelayedCount > 0 { checkingDelayedSeqNum := checkingDelayedCount - 1 - l1DelayedAcc, err := r.delayedBridge.GetAccumulator(ctx, checkingDelayedSeqNum, currentHeight) + l1DelayedAcc, err := r.delayedBridge.GetAccumulator(ctx, checkingDelayedSeqNum, currentHeight, common.Hash{}) if err != nil { return err } @@ -359,7 +401,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { r.lastReadBatchCount = checkingBatchCount r.lastReadMutex.Unlock() storeSeenBatchCount() - if !r.caughtUp { + if !r.caughtUp && readMode == "latest" { r.caughtUp = true close(r.caughtUpChan) } @@ -408,7 +450,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { if err != nil { return err } - if !r.caughtUp && to.Cmp(currentHeight) == 0 { + if !r.caughtUp && to.Cmp(currentHeight) == 0 && readMode == "latest" { r.caughtUp = true close(r.caughtUpChan) } diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index eaf863bffc..f98f93a3eb 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -374,11 +374,11 @@ func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage, hardR } if seqNum != pos { - return errors.New("unexpected delayed sequence number") + return fmt.Errorf("unexpected delayed sequence number %v, expected %v", seqNum, pos) } if nextAcc != message.BeforeInboxAcc { - return errors.New("previous delayed accumulator mismatch") + return fmt.Errorf("previous delayed accumulator mismatch for message %v", seqNum) } nextAcc = message.AfterInboxAcc() @@ -606,8 +606,14 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L ctx: ctx, client: client, } - - multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, t.das, t.blobReader, arbstate.KeysetValidate) + var daProviders []arbstate.DataAvailabilityProvider + if t.das != nil { + daProviders = append(daProviders, arbstate.NewDAProviderDAS(t.das)) + } + if t.blobReader != nil { + daProviders = append(daProviders, arbstate.NewDAProviderBlobReader(t.blobReader)) + } + multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, daProviders, arbstate.KeysetValidate) batchMessageCounts := make(map[uint64]arbutil.MessageIndex) currentpos := prevbatchmeta.MessageCount + 1 for { diff --git a/arbnode/node.go b/arbnode/node.go index ea6abcce2d..ff14583d10 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -43,6 +43,7 @@ import ( "github.com/offchainlabs/nitro/util/contracts" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/redisutil" + "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/wsbroadcastserver" ) @@ -66,10 +67,10 @@ func GenerateRollupConfig(prod bool, wasmModuleRoot common.Hash, rollupOwner com // TODO could the ChainConfig be just []byte? ChainConfig: string(serializedChainConfig), SequencerInboxMaxTimeVariation: rollupgen.ISequencerInboxMaxTimeVariation{ - DelayBlocks: 60 * 60 * 24 / 15, - FutureBlocks: 12, - DelaySeconds: 60 * 60 * 24, - FutureSeconds: 60 * 60, + DelayBlocks: big.NewInt(60 * 60 * 24 / 15), + FutureBlocks: big.NewInt(12), + DelaySeconds: big.NewInt(60 * 60 * 24), + FutureSeconds: big.NewInt(60 * 60), }, } } @@ -87,7 +88,6 @@ type Config struct { Staker staker.L1ValidatorConfig `koanf:"staker" reload:"hot"` SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` - BlobClient BlobClientConfig `koanf:"blob-client"` SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` Dangerous DangerousConfig `koanf:"dangerous"` TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"` @@ -105,6 +105,13 @@ func (c *Config) Validate() error { if c.DelayedSequencer.Enable && !c.Sequencer { return errors.New("cannot enable delayed sequencer without enabling sequencer") } + if c.InboxReader.ReadMode != "latest" { + if c.Sequencer { + return errors.New("cannot enable inboxreader in safe or finalized mode along with sequencer") + } + c.Feed.Output.Enable = false + c.Feed.Input.URL = []string{} + } if err := c.BlockValidator.Validate(); err != nil { return err } @@ -149,7 +156,6 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feed staker.L1ValidatorConfigAddOptions(prefix+".staker", f) SeqCoordinatorConfigAddOptions(prefix+".seq-coordinator", f) das.DataAvailabilityConfigAddNodeOptions(prefix+".data-availability", f) - BlobClientAddOptions(prefix+".blob-client", f) SyncMonitorConfigAddOptions(prefix+".sync-monitor", f) DangerousConfigAddOptions(prefix+".dangerous", f) TransactionStreamerConfigAddOptions(prefix+".transaction-streamer", f) @@ -189,6 +195,7 @@ func ConfigDefaultL1Test() *Config { func ConfigDefaultL1NonSequencerTest() *Config { config := ConfigDefault + config.Dangerous = TestDangerousConfig config.ParentChainReader = headerreader.TestConfig config.InboxReader = TestInboxReaderConfig config.DelayedSequencer.Enable = false @@ -197,13 +204,14 @@ func ConfigDefaultL1NonSequencerTest() *Config { config.BlockValidator = staker.TestBlockValidatorConfig config.Staker = staker.TestL1ValidatorConfig config.Staker.Enable = false - config.BlockValidator.ValidationServer.URL = "" + config.BlockValidator.ValidationServerConfigs = []rpcclient.ClientConfig{{URL: ""}} return &config } func ConfigDefaultL2Test() *Config { config := ConfigDefault + config.Dangerous = TestDangerousConfig config.ParentChainReader.Enable = false config.SeqCoordinator = TestSeqCoordinatorConfig config.Feed.Input.Verify.Dangerous.AcceptMissing = true @@ -212,7 +220,7 @@ func ConfigDefaultL2Test() *Config { config.SeqCoordinator.Signer.ECDSA.Dangerous.AcceptMissing = true config.Staker = staker.TestL1ValidatorConfig config.Staker.Enable = false - config.BlockValidator.ValidationServer.URL = "" + config.BlockValidator.ValidationServerConfigs = []rpcclient.ClientConfig{{URL: ""}} config.TransactionStreamer = DefaultTransactionStreamerConfig return &config @@ -221,16 +229,25 @@ func ConfigDefaultL2Test() *Config { type DangerousConfig struct { NoL1Listener bool `koanf:"no-l1-listener"` NoSequencerCoordinator bool `koanf:"no-sequencer-coordinator"` + DisableBlobReader bool `koanf:"disable-blob-reader"` } var DefaultDangerousConfig = DangerousConfig{ NoL1Listener: false, NoSequencerCoordinator: false, + DisableBlobReader: false, +} + +var TestDangerousConfig = DangerousConfig{ + NoL1Listener: false, + NoSequencerCoordinator: false, + DisableBlobReader: true, } func DangerousConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".no-l1-listener", DefaultDangerousConfig.NoL1Listener, "DANGEROUS! disables listening to L1. To be used in test nodes only") f.Bool(prefix+".no-sequencer-coordinator", DefaultDangerousConfig.NoSequencerCoordinator, "DANGEROUS! allows sequencing without sequencer-coordinator") + f.Bool(prefix+".disable-blob-reader", DefaultDangerousConfig.DisableBlobReader, "DANGEROUS! disables the EIP-4844 blob reader, which is necessary to read batches") } type Node struct { @@ -240,6 +257,7 @@ type Node struct { L1Reader *headerreader.HeaderReader TxStreamer *TransactionStreamer DeployInfo *chaininfo.RollupAddresses + BlobReader arbstate.BlobReader InboxReader *InboxReader InboxTracker *InboxTracker DelayedSequencer *DelayedSequencer @@ -358,6 +376,7 @@ func createNodeImpl( dataSigner signature.DataSignerFunc, fatalErrChan chan error, parentChainID *big.Int, + blobReader arbstate.BlobReader, ) (*Node, error) { config := configFetcher.Get() @@ -461,6 +480,7 @@ func createNodeImpl( L1Reader: nil, TxStreamer: txStreamer, DeployInfo: nil, + BlobReader: blobReader, InboxReader: nil, InboxTracker: nil, DelayedSequencer: nil, @@ -521,14 +541,6 @@ func createNodeImpl( return nil, errors.New("a data availability service is required for this chain, but it was not configured") } - var blobReader arbstate.BlobReader - if config.BlobClient.BeaconChainUrl != "" { - blobReader, err = NewBlobClient(config.BlobClient, l1client) - if err != nil { - return nil, err - } - } - inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader, blobReader) if err != nil { return nil, err @@ -540,7 +552,7 @@ func createNodeImpl( txStreamer.SetInboxReaders(inboxReader, delayedBridge) var statelessBlockValidator *staker.StatelessBlockValidator - if config.BlockValidator.ValidationServer.URL != "" { + if config.BlockValidator.ValidationServerConfigs[0].URL != "" { var hotShotReader *HotShotReader if config.BlockValidator.Espresso { addr := common.HexToAddress(config.BlockValidator.HotShotAddress) @@ -669,6 +681,7 @@ func createNodeImpl( L1Reader: l1Reader, Inbox: inboxTracker, Streamer: txStreamer, + VersionGetter: exec, SyncMonitor: syncMonitor, Config: func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster }, DeployInfo: deployInfo, @@ -694,6 +707,7 @@ func createNodeImpl( L1Reader: l1Reader, TxStreamer: txStreamer, DeployInfo: deployInfo, + BlobReader: blobReader, InboxReader: inboxReader, InboxTracker: inboxTracker, DelayedSequencer: delayedSequencer, @@ -733,8 +747,9 @@ func CreateNode( dataSigner signature.DataSignerFunc, fatalErrChan chan error, parentChainID *big.Int, + blobReader arbstate.BlobReader, ) (*Node, error) { - currentNode, err := createNodeImpl(ctx, stack, exec, arbDb, configFetcher, l2Config, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan, parentChainID) + currentNode, err := createNodeImpl(ctx, stack, exec, arbDb, configFetcher, l2Config, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan, parentChainID, blobReader) if err != nil { return nil, err } @@ -783,6 +798,12 @@ func (n *Node) Start(ctx context.Context) error { if err != nil { return fmt.Errorf("error starting exec client: %w", err) } + if n.BlobReader != nil { + err = n.BlobReader.Initialize(ctx) + if err != nil { + return fmt.Errorf("error initializing blob reader: %w", err) + } + } if n.InboxTracker != nil { err = n.InboxTracker.Initialize() if err != nil { @@ -823,12 +844,6 @@ func (n *Node) Start(ctx context.Context) error { if n.SeqCoordinator != nil { n.SeqCoordinator.Start(ctx) } else { - if n.DelayedSequencer != nil { - err := n.DelayedSequencer.ForceSequenceDelayed(ctx) - if err != nil { - return fmt.Errorf("error initially sequencing delayed instructions: %w", err) - } - } n.Execution.Activate() } if n.MaintenanceRunner != nil { diff --git a/arbnode/sequencer_inbox.go b/arbnode/sequencer_inbox.go index b743bf0ef9..edda4e5512 100644 --- a/arbnode/sequencer_inbox.go +++ b/arbnode/sequencer_inbox.go @@ -45,7 +45,7 @@ func init() { } batchDeliveredID = sequencerBridgeABI.Events["SequencerBatchDelivered"].ID sequencerBatchDataABI = sequencerBridgeABI.Events[sequencerBatchDataEvent] - addSequencerL2BatchFromOriginCallABI = sequencerBridgeABI.Methods["addSequencerL2BatchFromOrigin"] + addSequencerL2BatchFromOriginCallABI = sequencerBridgeABI.Methods["addSequencerL2BatchFromOrigin0"] } type SequencerInbox struct { diff --git a/arbnode/sync_monitor.go b/arbnode/sync_monitor.go index 598ea4fb34..99a66abde2 100644 --- a/arbnode/sync_monitor.go +++ b/arbnode/sync_monitor.go @@ -26,21 +26,27 @@ func NewSyncMonitor(config *SyncMonitorConfig) *SyncMonitor { } type SyncMonitorConfig struct { - BlockBuildLag uint64 `koanf:"block-build-lag"` - BlockBuildSequencerInboxLag uint64 `koanf:"block-build-sequencer-inbox-lag"` - CoordinatorMsgLag uint64 `koanf:"coordinator-msg-lag"` + BlockBuildLag uint64 `koanf:"block-build-lag"` + BlockBuildSequencerInboxLag uint64 `koanf:"block-build-sequencer-inbox-lag"` + CoordinatorMsgLag uint64 `koanf:"coordinator-msg-lag"` + SafeBlockWaitForBlockValidator bool `koanf:"safe-block-wait-for-block-validator"` + FinalizedBlockWaitForBlockValidator bool `koanf:"finalized-block-wait-for-block-validator"` } var DefaultSyncMonitorConfig = SyncMonitorConfig{ - BlockBuildLag: 20, - BlockBuildSequencerInboxLag: 0, - CoordinatorMsgLag: 15, + BlockBuildLag: 20, + BlockBuildSequencerInboxLag: 0, + CoordinatorMsgLag: 15, + SafeBlockWaitForBlockValidator: false, + FinalizedBlockWaitForBlockValidator: false, } func SyncMonitorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Uint64(prefix+".block-build-lag", DefaultSyncMonitorConfig.BlockBuildLag, "allowed lag between messages read and blocks built") f.Uint64(prefix+".block-build-sequencer-inbox-lag", DefaultSyncMonitorConfig.BlockBuildSequencerInboxLag, "allowed lag between messages read from sequencer inbox and blocks built") f.Uint64(prefix+".coordinator-msg-lag", DefaultSyncMonitorConfig.CoordinatorMsgLag, "allowed lag between local and remote messages") + f.Bool(prefix+".safe-block-wait-for-block-validator", DefaultSyncMonitorConfig.SafeBlockWaitForBlockValidator, "wait for block validator to complete before returning safe block number") + f.Bool(prefix+".finalized-block-wait-for-block-validator", DefaultSyncMonitorConfig.FinalizedBlockWaitForBlockValidator, "wait for block validator to complete before returning finalized block number") } func (s *SyncMonitor) Initialize(inboxReader *InboxReader, txStreamer *TransactionStreamer, coordinator *SeqCoordinator, exec execution.FullExecutionClient) { @@ -153,10 +159,27 @@ func (s *SyncMonitor) SafeBlockNumber(ctx context.Context) (uint64, error) { if err != nil { return 0, err } + // If SafeBlockWaitForBlockValidator is true, we want to wait for the block validator to finish + if s.config.SafeBlockWaitForBlockValidator { + latestValidatedCount, err := s.getLatestValidatedCount() + if err != nil { + return 0, err + } + if msg > latestValidatedCount { + msg = latestValidatedCount + } + } block := s.exec.MessageIndexToBlockNumber(msg - 1) return block, nil } +func (s *SyncMonitor) getLatestValidatedCount() (arbutil.MessageIndex, error) { + if s.txStreamer.validator == nil { + return 0, errors.New("validator not set up") + } + return s.txStreamer.validator.GetValidated(), nil +} + func (s *SyncMonitor) FinalizedBlockNumber(ctx context.Context) (uint64, error) { if s.inboxReader == nil || !s.initialized { return 0, errors.New("not set up for safeblock") @@ -165,6 +188,16 @@ func (s *SyncMonitor) FinalizedBlockNumber(ctx context.Context) (uint64, error) if err != nil { return 0, err } + // If FinalizedBlockWaitForBlockValidator is true, we want to wait for the block validator to finish + if s.config.FinalizedBlockWaitForBlockValidator { + latestValidatedCount, err := s.getLatestValidatedCount() + if err != nil { + return 0, err + } + if msg > latestValidatedCount { + msg = latestValidatedCount + } + } block := s.exec.MessageIndexToBlockNumber(msg - 1) return block, nil } diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index 7fdb61aba2..9e3b90532e 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -318,18 +318,10 @@ func (state *ArbosState) UpgradeArbosVersion( } // ArbOS versions 12 through 19 are left to Orbit chains for custom upgrades. case 20: - if !chainConfig.DebugMode() { - // This upgrade isn't finalized so we only want to support it for testing - return fmt.Errorf( - "the chain is upgrading to unsupported ArbOS version %v, %w", - nextArbosVersion, - ErrFatalNodeOutOfDate, - ) - } // Update Brotli compression level for fast compression from 0 to 1 ensure(state.SetBrotliCompressionLevel(1)) default: - if nextArbosVersion >= 12 && state.arbosVersion < 20 { + if nextArbosVersion >= 12 && nextArbosVersion <= 19 { // ArbOS versions 12 through 19 are left to Orbit chains for custom upgrades. } else { return fmt.Errorf( diff --git a/arbstate/das_reader.go b/arbstate/das_reader.go index 46d01b7bb1..f131a53608 100644 --- a/arbstate/das_reader.go +++ b/arbstate/das_reader.go @@ -46,10 +46,18 @@ const BlobHashesHeaderFlag byte = L1AuthenticatedMessageHeaderFlag | 0x10 // 0x5 // BrotliMessageHeaderByte indicates that the message is brotli-compressed. const BrotliMessageHeaderByte byte = 0 +// KnownHeaderBits is all header bits with known meaning to this nitro version +const KnownHeaderBits byte = DASMessageHeaderFlag | TreeDASMessageHeaderFlag | L1AuthenticatedMessageHeaderFlag | ZeroheavyMessageHeaderFlag | BlobHashesHeaderFlag | BrotliMessageHeaderByte + +// hasBits returns true if `checking` has all `bits` func hasBits(checking byte, bits byte) bool { return (checking & bits) == bits } +func IsL1AuthenticatedMessageHeaderByte(header byte) bool { + return hasBits(header, L1AuthenticatedMessageHeaderFlag) +} + func IsDASMessageHeaderByte(header byte) bool { return hasBits(header, DASMessageHeaderFlag) } @@ -70,6 +78,11 @@ func IsBrotliMessageHeaderByte(b uint8) bool { return b == BrotliMessageHeaderByte } +// IsKnownHeaderByte returns true if the supplied header byte has only known bits +func IsKnownHeaderByte(b uint8) bool { + return b&^KnownHeaderBits == 0 +} + type DataAvailabilityCertificate struct { KeysetHash [32]byte DataHash [32]byte diff --git a/arbstate/inbox.go b/arbstate/inbox.go index fcb1c1ebcb..3105ee92b1 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -19,6 +19,7 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/offchainlabs/nitro/arbcompress" + "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/arbutil" @@ -45,6 +46,7 @@ type BlobReader interface { batchBlockHash common.Hash, versionedHashes []common.Hash, ) ([]kzg4844.Blob, error) + Initialize(ctx context.Context) error } type sequencerMessage struct { @@ -61,7 +63,12 @@ const maxZeroheavyDecompressedLen = 101*MaxDecompressedLen/100 + 64 const MaxSegmentsPerSequencerMessage = 100 * 1024 const MinLifetimeSecondsForDataAvailabilityCert = 7 * 24 * 60 * 60 // one week -func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, dasReader DataAvailabilityReader, blobReader BlobReader, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { +var ( + ErrNoBlobReader = errors.New("blob batch payload was encountered but no BlobReader was configured") + ErrInvalidBlobDataFormat = errors.New("blob batch data is not a list of hashes as expected") +) + +func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, daProviders []DataAvailabilityProvider, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { if len(data) < 40 { return nil, errors.New("sequencer message missing L1 header") } @@ -75,47 +82,47 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash } payload := data[40:] + // Stage 0: Check if our node is out of date and we don't understand this batch type + // If the parent chain sequencer inbox smart contract authenticated this batch, + // an unknown header byte must mean that this node is out of date, + // because the smart contract understands the header byte and this node doesn't. + if len(payload) > 0 && IsL1AuthenticatedMessageHeaderByte(payload[0]) && !IsKnownHeaderByte(payload[0]) { + return nil, fmt.Errorf("%w: batch has unsupported authenticated header byte 0x%02x", arbosState.ErrFatalNodeOutOfDate, payload[0]) + } + // Stage 1: Extract the payload from any data availability header. // It's important that multiple DAS strategies can't both be invoked in the same batch, // as these headers are validated by the sequencer inbox and not other DASs. - if len(payload) > 0 && IsDASMessageHeaderByte(payload[0]) { - if dasReader == nil { - log.Error("No DAS Reader configured, but sequencer message found with DAS header") - } else { - var err error - payload, err = RecoverPayloadFromDasBatch(ctx, batchNum, data, dasReader, nil, keysetValidationMode) - if err != nil { - return nil, err - } - if payload == nil { - return parsedMsg, nil + // We try to extract payload from the first occuring valid DA provider in the daProviders list + if len(payload) > 0 { + foundDA := false + var err error + for _, provider := range daProviders { + if provider != nil && provider.IsValidHeaderByte(payload[0]) { + payload, err = provider.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, data, nil, keysetValidationMode) + if err != nil { + return nil, err + } + if payload == nil { + return parsedMsg, nil + } + foundDA = true + break } } - } else if len(payload) > 0 && IsBlobHashesHeaderByte(payload[0]) { - blobHashes := payload[1:] - if len(blobHashes)%len(common.Hash{}) != 0 { - return nil, fmt.Errorf("blob batch data is not a list of hashes as expected") - } - versionedHashes := make([]common.Hash, len(blobHashes)/len(common.Hash{})) - for i := 0; i*32 < len(blobHashes); i += 1 { - copy(versionedHashes[i][:], blobHashes[i*32:(i+1)*32]) - } - - if blobReader == nil { - return nil, errors.New("blob batch payload was encountered but no BlobReader was configured") - } - kzgBlobs, err := blobReader.GetBlobs(ctx, batchBlockHash, versionedHashes) - if err != nil { - return nil, fmt.Errorf("failed to get blobs: %w", err) - } - payload, err = blobs.DecodeBlobs(kzgBlobs) - if err != nil { - log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) - return parsedMsg, nil + if !foundDA { + if IsDASMessageHeaderByte(payload[0]) { + log.Error("No DAS Reader configured, but sequencer message found with DAS header") + } else if IsBlobHashesHeaderByte(payload[0]) { + return nil, ErrNoBlobReader + } } } + // At this point, `payload` has not been validated by the sequencer inbox at all. + // It's not safe to trust any part of the payload from this point onwards. + // Stage 2: If enabled, decode the zero heavy payload (saves gas based on calldata charging). if len(payload) > 0 && IsZeroheavyEncodedHeaderByte(payload[0]) { pl, err := io.ReadAll(io.LimitReader(zeroheavy.NewZeroheavyDecoder(bytes.NewReader(payload[1:])), int64(maxZeroheavyDecompressedLen))) @@ -271,6 +278,92 @@ func RecoverPayloadFromDasBatch( return payload, nil } +type DataAvailabilityProvider interface { + // IsValidHeaderByte returns true if the given headerByte has bits corresponding to the DA provider + IsValidHeaderByte(headerByte byte) bool + + // RecoverPayloadFromBatch fetches the underlying payload from the DA provider given the batch header information + RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimages map[arbutil.PreimageType]map[common.Hash][]byte, + keysetValidationMode KeysetValidationMode, + ) ([]byte, error) +} + +// NewDAProviderDAS is generally meant to be only used by nitro. +// DA Providers should implement methods in the DataAvailabilityProvider interface independently +func NewDAProviderDAS(das DataAvailabilityReader) *dAProviderForDAS { + return &dAProviderForDAS{ + das: das, + } +} + +type dAProviderForDAS struct { + das DataAvailabilityReader +} + +func (d *dAProviderForDAS) IsValidHeaderByte(headerByte byte) bool { + return IsDASMessageHeaderByte(headerByte) +} + +func (d *dAProviderForDAS) RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimages map[arbutil.PreimageType]map[common.Hash][]byte, + keysetValidationMode KeysetValidationMode, +) ([]byte, error) { + return RecoverPayloadFromDasBatch(ctx, batchNum, sequencerMsg, d.das, preimages, keysetValidationMode) +} + +// NewDAProviderBlobReader is generally meant to be only used by nitro. +// DA Providers should implement methods in the DataAvailabilityProvider interface independently +func NewDAProviderBlobReader(blobReader BlobReader) *dAProviderForBlobReader { + return &dAProviderForBlobReader{ + blobReader: blobReader, + } +} + +type dAProviderForBlobReader struct { + blobReader BlobReader +} + +func (b *dAProviderForBlobReader) IsValidHeaderByte(headerByte byte) bool { + return IsBlobHashesHeaderByte(headerByte) +} + +func (b *dAProviderForBlobReader) RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimages map[arbutil.PreimageType]map[common.Hash][]byte, + keysetValidationMode KeysetValidationMode, +) ([]byte, error) { + blobHashes := sequencerMsg[41:] + if len(blobHashes)%len(common.Hash{}) != 0 { + return nil, ErrInvalidBlobDataFormat + } + versionedHashes := make([]common.Hash, len(blobHashes)/len(common.Hash{})) + for i := 0; i*32 < len(blobHashes); i += 1 { + copy(versionedHashes[i][:], blobHashes[i*32:(i+1)*32]) + } + kzgBlobs, err := b.blobReader.GetBlobs(ctx, batchBlockHash, versionedHashes) + if err != nil { + return nil, fmt.Errorf("failed to get blobs: %w", err) + } + payload, err := blobs.DecodeBlobs(kzgBlobs) + if err != nil { + log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) + return nil, nil + } + return payload, nil +} + type KeysetValidationMode uint8 const KeysetValidate KeysetValidationMode = 0 @@ -280,8 +373,7 @@ const KeysetDontValidate KeysetValidationMode = 2 type inboxMultiplexer struct { backend InboxBackend delayedMessagesRead uint64 - dasReader DataAvailabilityReader - blobReader BlobReader + daProviders []DataAvailabilityProvider cachedSequencerMessage *sequencerMessage cachedSequencerMessageNum uint64 cachedSegmentNum uint64 @@ -291,12 +383,11 @@ type inboxMultiplexer struct { keysetValidationMode KeysetValidationMode } -func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dasReader DataAvailabilityReader, blobReader BlobReader, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { +func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, daProviders []DataAvailabilityProvider, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { return &inboxMultiplexer{ backend: backend, delayedMessagesRead: delayedMessagesRead, - dasReader: dasReader, - blobReader: blobReader, + daProviders: daProviders, keysetValidationMode: keysetValidationMode, } } @@ -318,7 +409,7 @@ func (r *inboxMultiplexer) Pop(ctx context.Context) (*arbostypes.MessageWithMeta } r.cachedSequencerMessageNum = r.backend.GetSequencerInboxPosition() var err error - r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.dasReader, r.blobReader, r.keysetValidationMode) + r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.daProviders, r.keysetValidationMode) if err != nil { return nil, err } diff --git a/arbstate/inbox_fuzz_test.go b/arbstate/inbox_fuzz_test.go index dcf43fd0da..b34c02534b 100644 --- a/arbstate/inbox_fuzz_test.go +++ b/arbstate/inbox_fuzz_test.go @@ -67,7 +67,7 @@ func FuzzInboxMultiplexer(f *testing.F) { delayedMessage: delayedMsg, positionWithinMessage: 0, } - multiplexer := NewInboxMultiplexer(backend, 0, nil, nil, KeysetValidate) + multiplexer := NewInboxMultiplexer(backend, 0, nil, KeysetValidate) _, err := multiplexer.Pop(context.TODO()) if err != nil { panic(err) diff --git a/arbutil/wait_for_l1.go b/arbutil/wait_for_l1.go index 9fb2cd10f8..180ce1c67e 100644 --- a/arbutil/wait_for_l1.go +++ b/arbutil/wait_for_l1.go @@ -24,6 +24,7 @@ type L1Interface interface { ethereum.TransactionReader TransactionSender(ctx context.Context, tx *types.Transaction, block common.Hash, index uint) (common.Address, error) BlockNumber(ctx context.Context) (uint64, error) + CallContractAtHash(ctx context.Context, msg ethereum.CallMsg, blockHash common.Hash) ([]byte, error) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) ChainID(ctx context.Context) (*big.Int, error) Client() rpc.ClientInterface diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go new file mode 100644 index 0000000000..bb6de00cad --- /dev/null +++ b/blocks_reexecutor/blocks_reexecutor.go @@ -0,0 +1,179 @@ +package blocksreexecutor + +import ( + "context" + "errors" + "fmt" + "math/rand" + "runtime" + "strings" + + "github.com/ethereum/go-ethereum/arbitrum" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/stopwaiter" + flag "github.com/spf13/pflag" +) + +type Config struct { + Enable bool `koanf:"enable"` + Mode string `koanf:"mode"` + StartBlock uint64 `koanf:"start-block"` + EndBlock uint64 `koanf:"end-block"` + Room int `koanf:"room"` + BlocksPerThread uint64 `koanf:"blocks-per-thread"` +} + +func (c *Config) Validate() error { + c.Mode = strings.ToLower(c.Mode) + if c.Enable && c.Mode != "random" && c.Mode != "full" { + return errors.New("invalid mode for blocks re-execution") + } + if c.EndBlock < c.StartBlock { + return errors.New("invalid block range for blocks re-execution") + } + if c.Room == 0 { + return errors.New("room for blocks re-execution cannot be zero") + } + return nil +} + +var DefaultConfig = Config{ + Enable: false, + Mode: "random", + Room: runtime.NumCPU(), + BlocksPerThread: 10000, +} + +var TestConfig = Config{ + Enable: true, + Mode: "full", + Room: runtime.NumCPU(), + BlocksPerThread: 10, +} + +func ConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enable", DefaultConfig.Enable, "enables re-execution of a range of blocks against historic state") + f.String(prefix+".mode", DefaultConfig.Mode, "mode to run the blocks-reexecutor on. Valid modes full and random. full - execute all the blocks in the given range. random - execute a random sample range of blocks with in a given range") + f.Uint64(prefix+".start-block", DefaultConfig.StartBlock, "first block number of the block range for re-execution") + f.Uint64(prefix+".end-block", DefaultConfig.EndBlock, "last block number of the block range for re-execution") + f.Int(prefix+".room", DefaultConfig.Room, "number of threads to parallelize blocks re-execution") + f.Uint64(prefix+".blocks-per-thread", DefaultConfig.BlocksPerThread, "minimum number of blocks to execute per thread. When mode is random this acts as the size of random block range sample") +} + +type BlocksReExecutor struct { + stopwaiter.StopWaiter + config *Config + blockchain *core.BlockChain + stateFor arbitrum.StateForHeaderFunction + done chan struct{} + fatalErrChan chan error + startBlock uint64 + currentBlock uint64 +} + +func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *BlocksReExecutor { + start := c.StartBlock + end := c.EndBlock + chainStart := blockchain.Config().ArbitrumChainParams.GenesisBlockNum + chainEnd := blockchain.CurrentBlock().Number.Uint64() + if start == 0 && end == 0 { + start = chainStart + end = chainEnd + } + if start < chainStart { + log.Warn("state reexecutor's start block number is lower than genesis, resetting to genesis") + start = chainStart + } + if end > chainEnd { + log.Warn("state reexecutor's end block number is greater than latest, resetting to latest") + end = chainEnd + } + if c.Mode == "random" && end != start { + if c.BlocksPerThread > end-start { + c.BlocksPerThread = end - start + } + start += uint64(rand.Intn(int(end - start - c.BlocksPerThread + 1))) + end = start + c.BlocksPerThread + } + // inclusive of block reexecution [start, end] + if start > 0 { + start-- + } + return &BlocksReExecutor{ + config: c, + blockchain: blockchain, + currentBlock: end, + startBlock: start, + done: make(chan struct{}, c.Room), + fatalErrChan: fatalErrChan, + stateFor: func(header *types.Header) (*state.StateDB, arbitrum.StateReleaseFunc, error) { + state, err := blockchain.StateAt(header.Root) + return state, arbitrum.NoopStateRelease, err + }, + } +} + +// LaunchBlocksReExecution launches the thread to apply blocks of range [currentBlock-s.config.BlocksPerThread, currentBlock] to the last available valid state +func (s *BlocksReExecutor) LaunchBlocksReExecution(ctx context.Context, currentBlock uint64) uint64 { + start := arbmath.SaturatingUSub(currentBlock, s.config.BlocksPerThread) + if start < s.startBlock { + start = s.startBlock + } + // we don't use state release pattern here + // TODO do we want to use release pattern here? + startState, startHeader, _, err := arbitrum.FindLastAvailableState(ctx, s.blockchain, s.stateFor, s.blockchain.GetHeaderByNumber(start), nil, -1) + if err != nil { + s.fatalErrChan <- fmt.Errorf("blocksReExecutor failed to get last available state while searching for state at %d, err: %w", start, err) + return s.startBlock + } + start = startHeader.Number.Uint64() + s.LaunchThread(func(ctx context.Context) { + _, err := arbitrum.AdvanceStateUpToBlock(ctx, s.blockchain, startState, s.blockchain.GetHeaderByNumber(currentBlock), startHeader, nil) + if err != nil { + s.fatalErrChan <- fmt.Errorf("blocksReExecutor errored advancing state from block %d to block %d, err: %w", start, currentBlock, err) + } else { + log.Info("Successfully reexecuted blocks against historic state", "stateAt", start, "startBlock", start+1, "endBlock", currentBlock) + } + s.done <- struct{}{} + }) + return start +} + +func (s *BlocksReExecutor) Impl(ctx context.Context) { + var threadsLaunched uint64 + end := s.currentBlock + for i := 0; i < s.config.Room && s.currentBlock > s.startBlock; i++ { + threadsLaunched++ + s.currentBlock = s.LaunchBlocksReExecution(ctx, s.currentBlock) + } + for { + select { + case <-s.done: + if s.currentBlock > s.startBlock { + s.currentBlock = s.LaunchBlocksReExecution(ctx, s.currentBlock) + } else { + threadsLaunched-- + } + + case <-ctx.Done(): + return + } + if threadsLaunched == 0 { + break + } + } + log.Info("BlocksReExecutor successfully completed re-execution of blocks against historic state", "stateAt", s.startBlock, "startBlock", s.startBlock+1, "endBlock", end) +} + +func (s *BlocksReExecutor) Start(ctx context.Context) { + s.StopWaiter.Start(ctx, s) + s.LaunchThread(s.Impl) +} + +func (s *BlocksReExecutor) StopAndWait() { + s.StopWaiter.StopAndWait() +} diff --git a/cmd/conf/chain.go b/cmd/conf/chain.go index e9ec2af0c1..531945b4d6 100644 --- a/cmd/conf/chain.go +++ b/cmd/conf/chain.go @@ -7,14 +7,16 @@ import ( "time" "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/rpcclient" flag "github.com/spf13/pflag" ) -type L1Config struct { - ID uint64 `koanf:"id"` - Connection rpcclient.ClientConfig `koanf:"connection" reload:"hot"` - Wallet genericconf.WalletConfig `koanf:"wallet"` +type ParentChainConfig struct { + ID uint64 `koanf:"id"` + Connection rpcclient.ClientConfig `koanf:"connection" reload:"hot"` + Wallet genericconf.WalletConfig `koanf:"wallet"` + BlobClient headerreader.BlobClientConfig `koanf:"blob-client"` } var L1ConnectionConfigDefault = rpcclient.ClientConfig{ @@ -25,10 +27,11 @@ var L1ConnectionConfigDefault = rpcclient.ClientConfig{ ArgLogLimit: 2048, } -var L1ConfigDefault = L1Config{ +var L1ConfigDefault = ParentChainConfig{ ID: 0, Connection: L1ConnectionConfigDefault, Wallet: DefaultL1WalletConfig, + BlobClient: headerreader.DefaultBlobClientConfig, } var DefaultL1WalletConfig = genericconf.WalletConfig{ @@ -43,13 +46,14 @@ func L1ConfigAddOptions(prefix string, f *flag.FlagSet) { f.Uint64(prefix+".id", L1ConfigDefault.ID, "if set other than 0, will be used to validate database and L1 connection") rpcclient.RPCClientAddOptions(prefix+".connection", f, &L1ConfigDefault.Connection) genericconf.WalletConfigAddOptions(prefix+".wallet", f, L1ConfigDefault.Wallet.Pathname) + headerreader.BlobClientAddOptions(prefix+".blob-client", f) } -func (c *L1Config) ResolveDirectoryNames(chain string) { +func (c *ParentChainConfig) ResolveDirectoryNames(chain string) { c.Wallet.ResolveDirectoryNames(chain) } -func (c *L1Config) Validate() error { +func (c *ParentChainConfig) Validate() error { return c.Connection.Validate() } diff --git a/cmd/conf/init.go b/cmd/conf/init.go index bebf1955b7..8a6c5096fb 100644 --- a/cmd/conf/init.go +++ b/cmd/conf/init.go @@ -3,41 +3,44 @@ package conf import ( "time" + "github.com/ethereum/go-ethereum/log" "github.com/spf13/pflag" ) type InitConfig struct { - Force bool `koanf:"force"` - Url string `koanf:"url"` - DownloadPath string `koanf:"download-path"` - DownloadPoll time.Duration `koanf:"download-poll"` - DevInit bool `koanf:"dev-init"` - DevInitAddress string `koanf:"dev-init-address"` - DevInitBlockNum uint64 `koanf:"dev-init-blocknum"` - Empty bool `koanf:"empty"` - AccountsPerSync uint `koanf:"accounts-per-sync"` - ImportFile string `koanf:"import-file"` - ThenQuit bool `koanf:"then-quit"` - Prune string `koanf:"prune"` - PruneBloomSize uint64 `koanf:"prune-bloom-size"` - ResetToMessage int64 `koanf:"reset-to-message"` + Force bool `koanf:"force"` + Url string `koanf:"url"` + DownloadPath string `koanf:"download-path"` + DownloadPoll time.Duration `koanf:"download-poll"` + DevInit bool `koanf:"dev-init"` + DevInitAddress string `koanf:"dev-init-address"` + DevInitBlockNum uint64 `koanf:"dev-init-blocknum"` + Empty bool `koanf:"empty"` + AccountsPerSync uint `koanf:"accounts-per-sync"` + ImportFile string `koanf:"import-file"` + ThenQuit bool `koanf:"then-quit"` + Prune string `koanf:"prune"` + PruneBloomSize uint64 `koanf:"prune-bloom-size"` + ResetToMessage int64 `koanf:"reset-to-message"` + RecreateMissingStateFrom uint64 `koanf:"recreate-missing-state-from"` } var InitConfigDefault = InitConfig{ - Force: false, - Url: "", - DownloadPath: "/tmp/", - DownloadPoll: time.Minute, - DevInit: false, - DevInitAddress: "", - DevInitBlockNum: 0, - Empty: false, - ImportFile: "", - AccountsPerSync: 100000, - ThenQuit: false, - Prune: "", - PruneBloomSize: 2048, - ResetToMessage: -1, + Force: false, + Url: "", + DownloadPath: "/tmp/", + DownloadPoll: time.Minute, + DevInit: false, + DevInitAddress: "", + DevInitBlockNum: 0, + Empty: false, + ImportFile: "", + AccountsPerSync: 100000, + ThenQuit: false, + Prune: "", + PruneBloomSize: 2048, + ResetToMessage: -1, + RecreateMissingStateFrom: 0, // 0 = disabled } func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { @@ -55,4 +58,12 @@ func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".prune", InitConfigDefault.Prune, "pruning for a given use: \"full\" for full nodes serving RPC requests, or \"validator\" for validators") f.Uint64(prefix+".prune-bloom-size", InitConfigDefault.PruneBloomSize, "the amount of memory in megabytes to use for the pruning bloom filter (higher values prune better)") f.Int64(prefix+".reset-to-message", InitConfigDefault.ResetToMessage, "forces a reset to an old message height. Also set max-reorg-resequence-depth=0 to force re-reading messages") + f.Uint64(prefix+".recreate-missing-state-from", InitConfigDefault.RecreateMissingStateFrom, "block number to start recreating missing states from (0 = disabled)") +} + +func (c *InitConfig) Validate() error { + if c.Force && c.RecreateMissingStateFrom > 0 { + log.Warn("force init enabled, recreate-missing-state-from will have no effect") + } + return nil } diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index 8c2d3fe963..1fd85f22cc 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -10,6 +10,7 @@ import ( "fmt" "math/big" "os" + "strings" "time" "github.com/offchainlabs/nitro/cmd/chaininfo" @@ -41,6 +42,8 @@ func main() { deployAccount := flag.String("l1DeployAccount", "", "l1 seq account to use (default is first account in keystore)") ownerAddressString := flag.String("ownerAddress", "", "the rollup owner's address") sequencerAddressString := flag.String("sequencerAddress", "", "the sequencer's address") + batchPostersString := flag.String("batchPosters", "", "the comma separated array of addresses of batch posters. Defaults to sequencer address") + batchPosterManagerAddressString := flag.String("batchPosterManger", "", "the batch poster manger's address. Defaults to owner address") nativeTokenAddressString := flag.String("nativeTokenAddress", "0x0000000000000000000000000000000000000000", "address of the ERC20 token which is used as native L2 currency") maxDataSizeUint := flag.Uint64("maxDataSize", 117964, "maximum data size of a batch or a cross-chain message (default = 90% of Geth's 128KB tx size limit)") loserEscrowAddressString := flag.String("loserEscrowAddress", "", "the address which half of challenge loser's funds accumulate at") @@ -57,6 +60,7 @@ func main() { txTimeout := flag.Duration("txtimeout", 10*time.Minute, "Timeout when waiting for a transaction to be included in a block") prod := flag.Bool("prod", false, "Whether to configure the rollup for production or testing") hotshotAddr := flag.String("hotshot", "", "the address of hotshot contract in L1") + isUsingFeeToken := flag.Bool("isUsingFeeToken", false, "true if the chain uses custom fee token") flag.Parse() l1ChainId := new(big.Int).SetUint64(*l1ChainIdUint) maxDataSize := new(big.Int).SetUint64(*maxDataSizeUint) @@ -93,15 +97,47 @@ func main() { if !common.IsHexAddress(*sequencerAddressString) && len(*sequencerAddressString) > 0 { panic("specified sequencer address is invalid") } + sequencerAddress := common.HexToAddress(*sequencerAddressString) + if !common.IsHexAddress(*ownerAddressString) { panic("please specify a valid rollup owner address") } + ownerAddress := common.HexToAddress(*ownerAddressString) + if *prod && !common.IsHexAddress(*loserEscrowAddressString) { panic("please specify a valid loser escrow address") } - sequencerAddress := common.HexToAddress(*sequencerAddressString) - ownerAddress := common.HexToAddress(*ownerAddressString) + var batchPosters []common.Address + if len(*batchPostersString) > 0 { + batchPostersArr := strings.Split(*batchPostersString, ",") + for _, address := range batchPostersArr { + if !common.IsHexAddress(address) { + log.Error("invalid address in batch posters array", "address", address) + continue + } + batchPosters = append(batchPosters, common.HexToAddress(address)) + } + if len(batchPosters) != len(batchPostersArr) { + panic("found at least one invalid address in batch posters array") + } + } + if len(batchPosters) == 0 { + log.Info("batch posters array was empty, defaulting to sequencer address") + batchPosters = append(batchPosters, sequencerAddress) + } + + var batchPosterManagerAddress common.Address + if common.IsHexAddress(*batchPosterManagerAddressString) { + batchPosterManagerAddress = common.HexToAddress(*batchPosterManagerAddressString) + } else { + if len(*batchPosterManagerAddressString) > 0 { + panic("please specify a valid batch poster manager address") + } + log.Info("batch poster manager address was empty, defaulting to owner address") + batchPosterManagerAddress = ownerAddress + } + loserEscrowAddress := common.HexToAddress(*loserEscrowAddressString) if sequencerAddress != (common.Address{}) && ownerAddress != l1TransactionOpts.From { panic("cannot specify sequencer address if owner is not deployer") @@ -148,12 +184,14 @@ func main() { ctx, l1Reader, l1TransactionOpts, - sequencerAddress, + batchPosters, + batchPosterManagerAddress, *authorizevalidators, arbnode.GenerateRollupConfig(*prod, moduleRoot, ownerAddress, &chainConfig, chainConfigJson, loserEscrowAddress), nativeToken, maxDataSize, hotshot, + *isUsingFeeToken, ) if err != nil { flag.Usage() diff --git a/cmd/genericconf/server.go b/cmd/genericconf/server.go index 3da027ab27..7550791d6d 100644 --- a/cmd/genericconf/server.go +++ b/cmd/genericconf/server.go @@ -8,7 +8,9 @@ import ( flag "github.com/spf13/pflag" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p/enode" ) type HTTPConfig struct { @@ -185,6 +187,65 @@ func AuthRPCConfigAddOptions(prefix string, f *flag.FlagSet) { f.StringSlice(prefix+".api", AuthRPCConfigDefault.API, "APIs offered over the AUTH-RPC interface") } +type P2PConfig struct { + ListenAddr string `koanf:"listen-addr"` + NoDial bool `koanf:"no-dial"` + NoDiscovery bool `koanf:"no-discovery"` + MaxPeers int `koanf:"max-peers"` + DiscoveryV5 bool `koanf:"discovery-v5"` + DiscoveryV4 bool `koanf:"discovery-v4"` + Bootnodes []string `koanf:"bootnodes"` + BootnodesV5 []string `koanf:"bootnodes-v5"` +} + +func (p P2PConfig) Apply(stackConf *node.Config) { + stackConf.P2P.ListenAddr = p.ListenAddr + stackConf.P2P.NoDial = p.NoDial + stackConf.P2P.NoDiscovery = p.NoDiscovery + stackConf.P2P.MaxPeers = p.MaxPeers + stackConf.P2P.DiscoveryV5 = p.DiscoveryV5 + stackConf.P2P.DiscoveryV4 = p.DiscoveryV4 + stackConf.P2P.BootstrapNodes = parseBootnodes(p.Bootnodes) + stackConf.P2P.BootstrapNodesV5 = parseBootnodes(p.BootnodesV5) +} + +func parseBootnodes(urls []string) []*enode.Node { + nodes := make([]*enode.Node, 0, len(urls)) + for _, url := range urls { + if url != "" { + node, err := enode.Parse(enode.ValidSchemes, url) + if err != nil { + log.Crit("Bootstrap URL invalid", "enode", url, "err", err) + return nil + } + nodes = append(nodes, node) + } + } + return nodes +} + +var P2PConfigDefault = P2PConfig{ + ListenAddr: "", + NoDial: true, + NoDiscovery: true, + MaxPeers: 50, + DiscoveryV5: false, + DiscoveryV4: false, + Bootnodes: []string{}, + BootnodesV5: []string{}, +} + +func P2PConfigAddOptions(prefix string, f *flag.FlagSet) { + f.String(prefix+".listen-addr", P2PConfigDefault.ListenAddr, "P2P listen address") + f.Bool(prefix+".no-dial", P2PConfigDefault.NoDial, "P2P no dial") + f.Bool(prefix+".no-discovery", P2PConfigDefault.NoDiscovery, "P2P no discovery") + f.Int(prefix+".max-peers", P2PConfigDefault.MaxPeers, "P2P max peers") + f.Bool(prefix+".discovery-v5", P2PConfigDefault.DiscoveryV5, "P2P discovery v5") + f.Bool(prefix+".discovery-v4", P2PConfigDefault.DiscoveryV4, "P2P discovery v4") + f.StringSlice(prefix+".bootnodes", P2PConfigDefault.Bootnodes, "P2P bootnodes") + f.StringSlice(prefix+".bootnodes-v5", P2PConfigDefault.BootnodesV5, "P2P bootnodes v5") +} + type MetricsServerConfig struct { Addr string `koanf:"addr"` Port int `koanf:"port"` diff --git a/cmd/nitro-val/config.go b/cmd/nitro-val/config.go index cf10787d6d..51d3978836 100644 --- a/cmd/nitro-val/config.go +++ b/cmd/nitro-val/config.go @@ -27,6 +27,7 @@ type ValidationNodeConfig struct { HTTP genericconf.HTTPConfig `koanf:"http"` WS genericconf.WSConfig `koanf:"ws"` IPC genericconf.IPCConfig `koanf:"ipc"` + P2P genericconf.P2PConfig `koanf:"p2p"` Auth genericconf.AuthRPCConfig `koanf:"auth"` Metrics bool `koanf:"metrics"` MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` @@ -66,6 +67,7 @@ var ValidationNodeConfigDefault = ValidationNodeConfig{ HTTP: HTTPConfigDefault, WS: WSConfigDefault, IPC: IPCConfigDefault, + P2P: genericconf.P2PConfigDefault, Auth: genericconf.AuthRPCConfigDefault, Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, @@ -85,6 +87,7 @@ func ValidationNodeConfigAddOptions(f *flag.FlagSet) { genericconf.WSConfigAddOptions("ws", f) genericconf.IPCConfigAddOptions("ipc", f) genericconf.AuthRPCConfigAddOptions("auth", f) + genericconf.P2PConfigAddOptions("p2p", f) f.Bool("metrics", ValidationNodeConfigDefault.Metrics, "enable metrics") genericconf.MetricsServerAddOptions("metrics-server", f) f.Bool("pprof", ValidationNodeConfigDefault.PProf, "enable pprof") diff --git a/cmd/nitro-val/nitro_val.go b/cmd/nitro-val/nitro_val.go index 20b8b23628..fea95cbb15 100644 --- a/cmd/nitro-val/nitro_val.go +++ b/cmd/nitro-val/nitro_val.go @@ -70,9 +70,7 @@ func mainImpl() int { nodeConfig.WS.Apply(&stackConf) nodeConfig.Auth.Apply(&stackConf) nodeConfig.IPC.Apply(&stackConf) - stackConf.P2P.ListenAddr = "" - stackConf.P2P.NoDial = true - stackConf.P2P.NoDiscovery = true + nodeConfig.P2P.Apply(&stackConf) vcsRevision, strippedRevision, vcsTime := confighelpers.GetVersion() stackConf.Version = strippedRevision diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index 4cf5dcda06..ebc57b13b8 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -34,6 +34,7 @@ import ( "github.com/offchainlabs/nitro/cmd/conf" "github.com/offchainlabs/nitro/cmd/ipfshelper" "github.com/offchainlabs/nitro/cmd/pruning" + "github.com/offchainlabs/nitro/cmd/staterecovery" "github.com/offchainlabs/nitro/cmd/util" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/statetransfer" @@ -183,6 +184,13 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return chainDb, l2BlockChain, err } + if config.Init.RecreateMissingStateFrom > 0 { + err = staterecovery.RecreateMissingStates(chainDb, l2BlockChain, cacheConfig, config.Init.RecreateMissingStateFrom) + if err != nil { + return chainDb, l2BlockChain, fmt.Errorf("failed to recreate missing states: %w", err) + } + } + return chainDb, l2BlockChain, nil } readOnlyDb.Close() diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 45f539488d..c32d2e6c80 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -42,7 +42,9 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbnode/resourcemanager" + "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" + blocksreexecutor "github.com/offchainlabs/nitro/blocks_reexecutor" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/conf" "github.com/offchainlabs/nitro/cmd/genericconf" @@ -178,9 +180,7 @@ func mainImpl() int { if nodeConfig.WS.ExposeAll { stackConf.WSModules = append(stackConf.WSModules, "personal") } - stackConf.P2P.ListenAddr = "" - stackConf.P2P.NoDial = true - stackConf.P2P.NoDiscovery = true + nodeConfig.P2P.Apply(&stackConf) vcsRevision, strippedRevision, vcsTime := confighelpers.GetVersion() stackConf.Version = strippedRevision @@ -330,6 +330,8 @@ func mainImpl() int { var rollupAddrs chaininfo.RollupAddresses var l1Client *ethclient.Client + var l1Reader *headerreader.HeaderReader + var blobReader arbstate.BlobReader if nodeConfig.Node.ParentChainReader.Enable { confFetcher := func() *rpcclient.ClientConfig { return &liveNodeConfig.Get().ParentChain.Connection } rpcClient := rpcclient.NewRpcClient(confFetcher, nil) @@ -352,6 +354,22 @@ func mainImpl() int { if err != nil { log.Crit("error getting rollup addresses", "err", err) } + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) + l1Reader, err = headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.ParentChainReader }, arbSys) + if err != nil { + log.Crit("failed to get L1 headerreader", "err", err) + } + if !l1Reader.IsParentChainArbitrum() && !nodeConfig.Node.Dangerous.DisableBlobReader { + if nodeConfig.ParentChain.BlobClient.BeaconUrl == "" { + flag.Usage() + log.Crit("a beacon chain RPC URL is required to read batches, but it was not configured (CLI argument: --parent-chain.blob-client.beacon-url [URL])") + } + blobClient, err := headerreader.NewBlobClient(nodeConfig.ParentChain.BlobClient, l1Client) + if err != nil { + log.Crit("failed to initialize blob client", "err", err) + } + blobReader = blobClient + } } if nodeConfig.Node.Staker.OnlyCreateWalletContract { @@ -359,12 +377,10 @@ func mainImpl() int { flag.Usage() log.Crit("--node.validator.only-create-wallet-contract requires --node.validator.use-smart-contract-wallet") } - arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) - l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.ParentChainReader }, arbSys) - if err != nil { - log.Crit("failed to get L1 headerreader", "error", err) + if l1Reader == nil { + flag.Usage() + log.Crit("--node.validator.only-create-wallet-contract conflicts with --node.dangerous.no-l1-listener") } - // Just create validator smart wallet if needed then exit deployInfo, err := chaininfo.GetRollupAddressesConfig(nodeConfig.Chain.ID, nodeConfig.Chain.Name, combinedL2ChainInfoFile, nodeConfig.Chain.InfoJson) if err != nil { @@ -389,7 +405,7 @@ func mainImpl() int { } var sameProcessValidationNodeEnabled bool - if nodeConfig.Node.BlockValidator.Enable && (nodeConfig.Node.BlockValidator.ValidationServer.URL == "self" || nodeConfig.Node.BlockValidator.ValidationServer.URL == "self-auth") { + if nodeConfig.Node.BlockValidator.Enable && (nodeConfig.Node.BlockValidator.ValidationServerConfigs[0].URL == "self" || nodeConfig.Node.BlockValidator.ValidationServerConfigs[0].URL == "self-auth") { sameProcessValidationNodeEnabled = true valnode.EnsureValidationExposedViaAuthRPC(&stackConf) } @@ -537,6 +553,7 @@ func mainImpl() int { dataSigner, fatalErrChan, big.NewInt(int64(nodeConfig.ParentChain.ID)), + blobReader, ) if err != nil { log.Error("failed to create node", "err", err) @@ -624,6 +641,11 @@ func mainImpl() int { // remove previous deferFuncs, StopAndWait closes database and blockchain. deferFuncs = []func(){func() { currentNode.StopAndWait() }} } + if nodeConfig.BlocksReExecutor.Enable && l2BlockChain != nil { + blocksReExecutor := blocksreexecutor.New(&nodeConfig.BlocksReExecutor, l2BlockChain, fatalErrChan) + blocksReExecutor.Start(ctx) + deferFuncs = append(deferFuncs, func() { blocksReExecutor.StopAndWait() }) + } sigint := make(chan os.Signal, 1) signal.Notify(sigint, os.Interrupt, syscall.SIGTERM) @@ -659,51 +681,55 @@ func mainImpl() int { } type NodeConfig struct { - Conf genericconf.ConfConfig `koanf:"conf" reload:"hot"` - Node arbnode.Config `koanf:"node" reload:"hot"` - Execution gethexec.Config `koanf:"execution" reload:"hot"` - Validation valnode.Config `koanf:"validation" reload:"hot"` - ParentChain conf.L1Config `koanf:"parent-chain" reload:"hot"` - Chain conf.L2Config `koanf:"chain"` - LogLevel int `koanf:"log-level" reload:"hot"` - LogType string `koanf:"log-type" reload:"hot"` - FileLogging genericconf.FileLoggingConfig `koanf:"file-logging" reload:"hot"` - Persistent conf.PersistentConfig `koanf:"persistent"` - HTTP genericconf.HTTPConfig `koanf:"http"` - WS genericconf.WSConfig `koanf:"ws"` - IPC genericconf.IPCConfig `koanf:"ipc"` - Auth genericconf.AuthRPCConfig `koanf:"auth"` - GraphQL genericconf.GraphQLConfig `koanf:"graphql"` - Metrics bool `koanf:"metrics"` - MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` - PProf bool `koanf:"pprof"` - PprofCfg genericconf.PProf `koanf:"pprof-cfg"` - Init conf.InitConfig `koanf:"init"` - Rpc genericconf.RpcConfig `koanf:"rpc"` + Conf genericconf.ConfConfig `koanf:"conf" reload:"hot"` + Node arbnode.Config `koanf:"node" reload:"hot"` + Execution gethexec.Config `koanf:"execution" reload:"hot"` + Validation valnode.Config `koanf:"validation" reload:"hot"` + ParentChain conf.ParentChainConfig `koanf:"parent-chain" reload:"hot"` + Chain conf.L2Config `koanf:"chain"` + LogLevel int `koanf:"log-level" reload:"hot"` + LogType string `koanf:"log-type" reload:"hot"` + FileLogging genericconf.FileLoggingConfig `koanf:"file-logging" reload:"hot"` + Persistent conf.PersistentConfig `koanf:"persistent"` + HTTP genericconf.HTTPConfig `koanf:"http"` + WS genericconf.WSConfig `koanf:"ws"` + IPC genericconf.IPCConfig `koanf:"ipc"` + Auth genericconf.AuthRPCConfig `koanf:"auth"` + GraphQL genericconf.GraphQLConfig `koanf:"graphql"` + P2P genericconf.P2PConfig `koanf:"p2p"` + Metrics bool `koanf:"metrics"` + MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` + PProf bool `koanf:"pprof"` + PprofCfg genericconf.PProf `koanf:"pprof-cfg"` + Init conf.InitConfig `koanf:"init"` + Rpc genericconf.RpcConfig `koanf:"rpc"` + BlocksReExecutor blocksreexecutor.Config `koanf:"blocks-reexecutor"` } var NodeConfigDefault = NodeConfig{ - Conf: genericconf.ConfConfigDefault, - Node: arbnode.ConfigDefault, - Execution: gethexec.ConfigDefault, - Validation: valnode.DefaultValidationConfig, - ParentChain: conf.L1ConfigDefault, - Chain: conf.L2ConfigDefault, - LogLevel: int(log.LvlInfo), - LogType: "plaintext", - FileLogging: genericconf.DefaultFileLoggingConfig, - Persistent: conf.PersistentConfigDefault, - HTTP: genericconf.HTTPConfigDefault, - WS: genericconf.WSConfigDefault, - IPC: genericconf.IPCConfigDefault, - Auth: genericconf.AuthRPCConfigDefault, - GraphQL: genericconf.GraphQLConfigDefault, - Metrics: false, - MetricsServer: genericconf.MetricsServerConfigDefault, - Init: conf.InitConfigDefault, - Rpc: genericconf.DefaultRpcConfig, - PProf: false, - PprofCfg: genericconf.PProfDefault, + Conf: genericconf.ConfConfigDefault, + Node: arbnode.ConfigDefault, + Execution: gethexec.ConfigDefault, + Validation: valnode.DefaultValidationConfig, + ParentChain: conf.L1ConfigDefault, + Chain: conf.L2ConfigDefault, + LogLevel: int(log.LvlInfo), + LogType: "plaintext", + FileLogging: genericconf.DefaultFileLoggingConfig, + Persistent: conf.PersistentConfigDefault, + HTTP: genericconf.HTTPConfigDefault, + WS: genericconf.WSConfigDefault, + IPC: genericconf.IPCConfigDefault, + Auth: genericconf.AuthRPCConfigDefault, + GraphQL: genericconf.GraphQLConfigDefault, + P2P: genericconf.P2PConfigDefault, + Metrics: false, + MetricsServer: genericconf.MetricsServerConfigDefault, + Init: conf.InitConfigDefault, + Rpc: genericconf.DefaultRpcConfig, + PProf: false, + PprofCfg: genericconf.PProfDefault, + BlocksReExecutor: blocksreexecutor.DefaultConfig, } func NodeConfigAddOptions(f *flag.FlagSet) { @@ -721,6 +747,7 @@ func NodeConfigAddOptions(f *flag.FlagSet) { genericconf.WSConfigAddOptions("ws", f) genericconf.IPCConfigAddOptions("ipc", f) genericconf.AuthRPCConfigAddOptions("auth", f) + genericconf.P2PConfigAddOptions("p2p", f) genericconf.GraphQLConfigAddOptions("graphql", f) f.Bool("metrics", NodeConfigDefault.Metrics, "enable metrics") genericconf.MetricsServerAddOptions("metrics-server", f) @@ -729,6 +756,7 @@ func NodeConfigAddOptions(f *flag.FlagSet) { conf.InitConfigAddOptions("init", f) genericconf.RpcConfigAddOptions("rpc", f) + blocksreexecutor.ConfigAddOptions("blocks-reexecutor", f) } func (c *NodeConfig) ResolveDirectoryNames() error { @@ -781,6 +809,12 @@ func (c *NodeConfig) CanReload(new *NodeConfig) error { } func (c *NodeConfig) Validate() error { + if c.Init.RecreateMissingStateFrom > 0 && !c.Execution.Caching.Archive { + return errors.New("recreate-missing-state-from enabled for a non-archive node") + } + if err := c.Init.Validate(); err != nil { + return err + } if err := c.ParentChain.Validate(); err != nil { return err } @@ -790,6 +824,9 @@ func (c *NodeConfig) Validate() error { if err := c.Execution.Validate(); err != nil { return err } + if err := c.BlocksReExecutor.Validate(); err != nil { + return err + } return c.Persistent.Validate() } diff --git a/cmd/replay/main.go b/cmd/replay/main.go index c21530550e..49049b7cde 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -145,6 +145,10 @@ func (r *BlobPreimageReader) GetBlobs( return blobs, nil } +func (r *BlobPreimageReader) Initialize(ctx context.Context) error { + return nil +} + // To generate: // key, _ := crypto.HexToECDSA("0000000000000000000000000000000000000000000000000000000000000001") // sig, _ := crypto.Sign(make([]byte, 32), key) @@ -208,7 +212,12 @@ func main() { if backend.GetPositionWithinMessage() > 0 { keysetValidationMode = arbstate.KeysetDontValidate } - inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dasReader, &BlobPreimageReader{}, keysetValidationMode) + var daProviders []arbstate.DataAvailabilityProvider + if dasReader != nil { + daProviders = append(daProviders, arbstate.NewDAProviderDAS(dasReader)) + } + daProviders = append(daProviders, arbstate.NewDAProviderBlobReader(&BlobPreimageReader{})) + inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, daProviders, keysetValidationMode) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) if err != nil { diff --git a/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go index 782ab3801b..e963c0e96c 100644 --- a/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go +++ b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go @@ -16,6 +16,9 @@ type RedisCoordinator struct { // UpdatePriorities updates the priority list of sequencers func (rc *RedisCoordinator) UpdatePriorities(ctx context.Context, priorities []string) error { + if len(priorities) == 0 { + return rc.Client.Del(ctx, redisutil.PRIORITIES_KEY).Err() + } prioritiesString := strings.Join(priorities, ",") err := rc.Client.Set(ctx, redisutil.PRIORITIES_KEY, prioritiesString, 0).Err() if err != nil { diff --git a/cmd/staterecovery/staterecovery.go b/cmd/staterecovery/staterecovery.go new file mode 100644 index 0000000000..6390826a91 --- /dev/null +++ b/cmd/staterecovery/staterecovery.go @@ -0,0 +1,88 @@ +package staterecovery + +import ( + "fmt" + "time" + + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" +) + +func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheConfig *core.CacheConfig, startBlock uint64) error { + start := time.Now() + currentHeader := bc.CurrentBlock() + if currentHeader == nil { + return fmt.Errorf("current header is nil") + } + target := currentHeader.Number.Uint64() + current := startBlock + genesis := bc.Config().ArbitrumChainParams.GenesisBlockNum + if current < genesis+1 { + current = genesis + 1 + log.Warn("recreate-missing-states-from before genesis+1, starting from genesis+1", "configured", startBlock, "override", current) + } + previousBlock := bc.GetBlockByNumber(current - 1) + if previousBlock == nil { + return fmt.Errorf("start block parent is missing, parent block number: %d", current-1) + } + hashConfig := *hashdb.Defaults + hashConfig.CleanCacheSize = cacheConfig.TrieCleanLimit + trieConfig := &trie.Config{ + Preimages: false, + HashDB: &hashConfig, + } + database := state.NewDatabaseWithConfig(chainDb, trieConfig) + defer database.TrieDB().Close() + previousState, err := state.New(previousBlock.Root(), database, nil) + if err != nil { + return fmt.Errorf("state of start block parent is missing: %w", err) + } + // we don't need to reference states with `trie.Database.Reference` here, because: + // * either the state nodes will be read from disk and then cached in cleans cache + // * or they will be recreated, saved to disk and then also cached in cleans cache + logged := time.Unix(0, 0) + recreated := 0 + for { + currentBlock := bc.GetBlockByNumber(current) + if currentBlock == nil { + break + } + if time.Since(logged) > 1*time.Minute { + log.Info("Recreating missing states", "block", current, "target", target, "remaining", int64(target)-int64(current), "elapsed", time.Since(start), "recreated", recreated) + logged = time.Now() + } + currentState, err := state.New(currentBlock.Root(), database, nil) + if err != nil { + _, _, _, err := bc.Processor().Process(currentBlock, previousState, vm.Config{}) + if err != nil { + return fmt.Errorf("processing block %d failed: %w", current, err) + } + root, err := previousState.Commit(current, bc.Config().IsEIP158(currentBlock.Number())) + if err != nil { + return fmt.Errorf("StateDB commit failed, number %d root %v: %w", current, currentBlock.Root(), err) + } + if root.Cmp(currentBlock.Root()) != 0 { + return fmt.Errorf("reached different state root after processing block %d, have %v, want %v", current, root, currentBlock.Root()) + } + // commit to disk + err = database.TrieDB().Commit(root, false) + if err != nil { + return fmt.Errorf("TrieDB commit failed, number %d root %v: %w", current, root, err) + } + currentState, err = state.New(currentBlock.Root(), database, nil) + if err != nil { + return fmt.Errorf("state reset after block %d failed: %w", current, err) + } + recreated++ + } + current++ + previousState = currentState + } + log.Info("Finished recreating missing states", "elapsed", time.Since(start), "recreated", recreated) + return nil +} diff --git a/contracts b/contracts index 3193b2ee1f..e16b1c4ba7 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 3193b2ee1f5f87f3437a76b8ddc0ac8a04ec2158 +Subproject commit e16b1c4ba7325988ca096a1ae01f1ad779ff370f diff --git a/das/bigcache_storage_service.go b/das/bigcache_storage_service.go deleted file mode 100644 index f8421bed1d..0000000000 --- a/das/bigcache_storage_service.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package das - -import ( - "context" - "fmt" - "time" - - "github.com/allegro/bigcache" - "github.com/offchainlabs/nitro/arbstate" - "github.com/offchainlabs/nitro/das/dastree" - "github.com/offchainlabs/nitro/util/pretty" - flag "github.com/spf13/pflag" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" -) - -type BigCacheConfig struct { - // TODO add other config information like HardMaxCacheSize - Enable bool `koanf:"enable"` - Expiration time.Duration `koanf:"expiration"` - MaxEntriesInWindow int -} - -var DefaultBigCacheConfig = BigCacheConfig{ - Expiration: time.Hour, -} - -var TestBigCacheConfig = BigCacheConfig{ - Enable: true, - Expiration: time.Hour, - MaxEntriesInWindow: 1000, -} - -func BigCacheConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Bool(prefix+".enable", DefaultBigCacheConfig.Enable, "Enable local in-memory caching of sequencer batch data") - f.Duration(prefix+".expiration", DefaultBigCacheConfig.Expiration, "Expiration time for in-memory cached sequencer batches") -} - -type BigCacheStorageService struct { - baseStorageService StorageService - bigCacheConfig BigCacheConfig - bigCache *bigcache.BigCache -} - -func NewBigCacheStorageService(bigCacheConfig BigCacheConfig, baseStorageService StorageService) (StorageService, error) { - conf := bigcache.DefaultConfig(bigCacheConfig.Expiration) - if bigCacheConfig.MaxEntriesInWindow > 0 { - conf.MaxEntriesInWindow = bigCacheConfig.MaxEntriesInWindow - } - bigCache, err := bigcache.NewBigCache(conf) - if err != nil { - return nil, err - } - return &BigCacheStorageService{ - baseStorageService: baseStorageService, - bigCacheConfig: bigCacheConfig, - bigCache: bigCache, - }, nil -} - -func (bcs *BigCacheStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { - log.Trace("das.BigCacheStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", bcs) - - ret, err := bcs.bigCache.Get(string(key.Bytes())) - if err != nil { - ret, err = bcs.baseStorageService.GetByHash(ctx, key) - if err != nil { - return nil, err - } - - err = bcs.bigCache.Set(string(key.Bytes()), ret) - if err != nil { - return nil, err - } - return ret, err - } - - return ret, err -} - -func (bcs *BigCacheStorageService) Put(ctx context.Context, value []byte, timeout uint64) error { - logPut("das.BigCacheStorageService.Put", value, timeout, bcs) - err := bcs.baseStorageService.Put(ctx, value, timeout) - if err != nil { - return err - } - return bcs.bigCache.Set(string(dastree.HashBytes(value)), value) -} - -func (bcs *BigCacheStorageService) Sync(ctx context.Context) error { - return bcs.baseStorageService.Sync(ctx) -} - -func (bcs *BigCacheStorageService) Close(ctx context.Context) error { - err := bcs.bigCache.Close() - if err != nil { - return err - } - return bcs.baseStorageService.Close(ctx) -} - -func (bcs *BigCacheStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return bcs.baseStorageService.ExpirationPolicy(ctx) -} - -func (bcs *BigCacheStorageService) String() string { - return fmt.Sprintf("BigCacheStorageService(%+v)", bcs.bigCacheConfig) -} - -func (bcs *BigCacheStorageService) HealthCheck(ctx context.Context) error { - return bcs.baseStorageService.HealthCheck(ctx) -} diff --git a/das/cache_storage_service.go b/das/cache_storage_service.go new file mode 100644 index 0000000000..13bdb189d3 --- /dev/null +++ b/das/cache_storage_service.go @@ -0,0 +1,95 @@ +// Copyright 2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package das + +import ( + "context" + "fmt" + + "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/das/dastree" + "github.com/offchainlabs/nitro/util/pretty" + flag "github.com/spf13/pflag" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" + "github.com/ethereum/go-ethereum/log" +) + +type CacheConfig struct { + Enable bool `koanf:"enable"` + Capacity int `koanf:"capacity"` +} + +var DefaultCacheConfig = CacheConfig{ + Capacity: 20_000, +} + +var TestCacheConfig = CacheConfig{ + Capacity: 1_000, +} + +func CacheConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enable", DefaultCacheConfig.Enable, "Enable local in-memory caching of sequencer batch data") + f.Int(prefix+".capacity", DefaultCacheConfig.Capacity, "Maximum number of entries (up to 64KB each) to store in the cache.") +} + +type CacheStorageService struct { + baseStorageService StorageService + cache *lru.Cache[common.Hash, []byte] +} + +func NewCacheStorageService(cacheConfig CacheConfig, baseStorageService StorageService) *CacheStorageService { + return &CacheStorageService{ + baseStorageService: baseStorageService, + cache: lru.NewCache[common.Hash, []byte](cacheConfig.Capacity), + } +} + +func (c *CacheStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { + log.Trace("das.CacheStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", c) + + if val, wasCached := c.cache.Get(key); wasCached { + return val, nil + } + + val, err := c.baseStorageService.GetByHash(ctx, key) + if err != nil { + return nil, err + } + + c.cache.Add(key, val) + + return val, nil +} + +func (c *CacheStorageService) Put(ctx context.Context, value []byte, timeout uint64) error { + logPut("das.CacheStorageService.Put", value, timeout, c) + err := c.baseStorageService.Put(ctx, value, timeout) + if err != nil { + return err + } + c.cache.Add(common.Hash(dastree.Hash(value)), value) + return nil +} + +func (c *CacheStorageService) Sync(ctx context.Context) error { + return c.baseStorageService.Sync(ctx) +} + +func (c *CacheStorageService) Close(ctx context.Context) error { + return c.baseStorageService.Close(ctx) +} + +func (c *CacheStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { + return c.baseStorageService.ExpirationPolicy(ctx) +} + +func (c *CacheStorageService) String() string { + return fmt.Sprintf("CacheStorageService(size:%+v)", len(c.cache.Keys())) +} + +func (c *CacheStorageService) HealthCheck(ctx context.Context) error { + return c.baseStorageService.HealthCheck(ctx) +} diff --git a/das/bigcache_storage_service_test.go b/das/cache_storage_service_test.go similarity index 57% rename from das/bigcache_storage_service_test.go rename to das/cache_storage_service_test.go index 5fd0cf68d2..8b4203dab5 100644 --- a/das/bigcache_storage_service_test.go +++ b/das/cache_storage_service_test.go @@ -8,42 +8,32 @@ import ( "context" "errors" "testing" - "time" - "github.com/allegro/bigcache" "github.com/offchainlabs/nitro/das/dastree" ) -func TestBigCacheStorageService(t *testing.T) { +func TestCacheStorageService(t *testing.T) { ctx := context.Background() - timeout := uint64(time.Now().Add(time.Hour).Unix()) baseStorageService := NewMemoryBackedStorageService(ctx) - bigCache, err := bigcache.NewBigCache(bigcache.DefaultConfig(TestBigCacheConfig.Expiration)) - Require(t, err) - bigCacheService := &BigCacheStorageService{ - baseStorageService: baseStorageService, - bigCacheConfig: TestBigCacheConfig, - bigCache: bigCache, - } - Require(t, err) + cacheService := NewCacheStorageService(TestCacheConfig, baseStorageService) val1 := []byte("The first value") val1CorrectKey := dastree.Hash(val1) val1IncorrectKey := dastree.Hash(append(val1, 0)) - _, err = bigCacheService.GetByHash(ctx, val1CorrectKey) + _, err := cacheService.GetByHash(ctx, val1CorrectKey) if !errors.Is(err, ErrNotFound) { t.Fatal(err) } - err = bigCacheService.Put(ctx, val1, timeout) + err = cacheService.Put(ctx, val1, 1) Require(t, err) - _, err = bigCacheService.GetByHash(ctx, val1IncorrectKey) + _, err = cacheService.GetByHash(ctx, val1IncorrectKey) if !errors.Is(err, ErrNotFound) { t.Fatal(err) } - val, err := bigCacheService.GetByHash(ctx, val1CorrectKey) + val, err := cacheService.GetByHash(ctx, val1CorrectKey) Require(t, err) if !bytes.Equal(val, val1) { t.Fatal(val, val1) @@ -54,14 +44,14 @@ func TestBigCacheStorageService(t *testing.T) { val2CorrectKey := dastree.Hash(val2) val2IncorrectKey := dastree.Hash(append(val2, 0)) - err = baseStorageService.Put(ctx, val2, timeout) + err = baseStorageService.Put(ctx, val2, 1) Require(t, err) - _, err = bigCacheService.GetByHash(ctx, val2IncorrectKey) + _, err = cacheService.GetByHash(ctx, val2IncorrectKey) if !errors.Is(err, ErrNotFound) { t.Fatal(err) } - val, err = bigCacheService.GetByHash(ctx, val2CorrectKey) + val, err = cacheService.GetByHash(ctx, val2CorrectKey) Require(t, err) if !bytes.Equal(val, val2) { t.Fatal(val, val2) @@ -69,19 +59,18 @@ func TestBigCacheStorageService(t *testing.T) { // For Case where the value is present in the cache storage but not present in the base. emptyBaseStorageService := NewMemoryBackedStorageService(ctx) - bigCacheServiceWithEmptyBaseStorage := &BigCacheStorageService{ + cacheServiceWithEmptyBaseStorage := &CacheStorageService{ baseStorageService: emptyBaseStorageService, - bigCacheConfig: TestBigCacheConfig, - bigCache: bigCache, + cache: cacheService.cache, } - val, err = bigCacheServiceWithEmptyBaseStorage.GetByHash(ctx, val1CorrectKey) + val, err = cacheServiceWithEmptyBaseStorage.GetByHash(ctx, val1CorrectKey) Require(t, err) if !bytes.Equal(val, val1) { t.Fatal(val, val1) } // Closes the base storage properly. - err = bigCacheService.Close(ctx) + err = cacheService.Close(ctx) Require(t, err) _, err = baseStorageService.GetByHash(ctx, val1CorrectKey) if !errors.Is(err, ErrClosed) { diff --git a/das/das.go b/das/das.go index 910e511083..dd8e43a34d 100644 --- a/das/das.go +++ b/das/das.go @@ -40,8 +40,8 @@ type DataAvailabilityConfig struct { RequestTimeout time.Duration `koanf:"request-timeout"` - LocalCache BigCacheConfig `koanf:"local-cache"` - RedisCache RedisConfig `koanf:"redis-cache"` + LocalCache CacheConfig `koanf:"local-cache"` + RedisCache RedisConfig `koanf:"redis-cache"` LocalDBStorage LocalDBStorageConfig `koanf:"local-db-storage"` LocalFileStorage LocalFileStorageConfig `koanf:"local-file-storage"` @@ -109,7 +109,7 @@ func dataAvailabilityConfigAddOptions(prefix string, f *flag.FlagSet, r role) { f.Bool(prefix+".disable-signature-checking", DefaultDataAvailabilityConfig.DisableSignatureChecking, "disables signature checking on Data Availability Store requests (DANGEROUS, FOR TESTING ONLY)") // Cache options - BigCacheConfigAddOptions(prefix+".local-cache", f) + CacheConfigAddOptions(prefix+".local-cache", f) RedisConfigAddOptions(prefix+".redis-cache", f) // Storage options diff --git a/das/dasRpcServer.go b/das/dasRpcServer.go index 9fa39d1959..2f1fc1fd42 100644 --- a/das/dasRpcServer.go +++ b/das/dasRpcServer.go @@ -5,6 +5,7 @@ package das import ( "context" + "errors" "fmt" "net" "net/http" @@ -44,6 +45,9 @@ func StartDASRPCServer(ctx context.Context, addr string, portNum uint64, rpcServ } func StartDASRPCServerOnListener(ctx context.Context, listener net.Listener, rpcServerTimeouts genericconf.HTTPServerTimeoutConfig, daReader DataAvailabilityServiceReader, daWriter DataAvailabilityServiceWriter, daHealthChecker DataAvailabilityServiceHealthChecker) (*http.Server, error) { + if daWriter == nil { + return nil, errors.New("No writer backend was configured for DAS RPC server. Has the BLS signing key been set up (--data-availability.key.key-dir or --data-availability.key.priv-key options)?") + } rpcServer := rpc.NewServer() err := rpcServer.RegisterName("das", &DASRPCServer{ daReader: daReader, diff --git a/das/db_storage_service.go b/das/db_storage_service.go index b9af530b9e..6a98e3af1d 100644 --- a/das/db_storage_service.go +++ b/das/db_storage_service.go @@ -9,7 +9,7 @@ import ( "errors" "time" - badger "github.com/dgraph-io/badger/v3" + badger "github.com/dgraph-io/badger/v4" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbstate" diff --git a/das/factory.go b/das/factory.go index 0e6b292005..d5f103e548 100644 --- a/das/factory.go +++ b/das/factory.go @@ -112,7 +112,7 @@ func WrapStorageWithCache( return nil, nil } - // Enable caches, Redis and (local) BigCache. Local is the outermost, so it will be tried first. + // Enable caches, Redis and (local) Cache. Local is the outermost, so it will be tried first. var err error if config.RedisCache.Enable { storageService, err = NewRedisStorageService(config.RedisCache, storageService) @@ -130,11 +130,8 @@ func WrapStorageWithCache( } } if config.LocalCache.Enable { - storageService, err = NewBigCacheStorageService(config.LocalCache, storageService) + storageService = NewCacheStorageService(config.LocalCache, storageService) lifecycleManager.Register(storageService) - if err != nil { - return nil, err - } } return storageService, nil } diff --git a/das/syncing_fallback_storage.go b/das/syncing_fallback_storage.go index 91f2e522a7..c79cd80400 100644 --- a/das/syncing_fallback_storage.go +++ b/das/syncing_fallback_storage.go @@ -53,7 +53,7 @@ func init() { } BatchDeliveredID = sequencerInboxABI.Events[sequencerBatchDeliveredEvent].ID sequencerBatchDataABI = sequencerInboxABI.Events[sequencerBatchDataEvent] - addSequencerL2BatchFromOriginCallABI = sequencerInboxABI.Methods["addSequencerL2BatchFromOrigin"] + addSequencerL2BatchFromOriginCallABI = sequencerInboxABI.Methods["addSequencerL2BatchFromOrigin0"] } type SyncToStorageConfig struct { diff --git a/deploy/deploy.go b/deploy/deploy.go index 31d6f84f80..6752ae493e 100644 --- a/deploy/deploy.go +++ b/deploy/deploy.go @@ -31,7 +31,7 @@ func andTxSucceeded(ctx context.Context, l1Reader *headerreader.HeaderReader, tx return nil } -func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, maxDataSize *big.Int) (common.Address, error) { +func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, maxDataSize *big.Int, isUsingFeeToken bool) (common.Address, error) { client := l1Reader.Client() /// deploy eth based templates @@ -46,7 +46,7 @@ func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReade if err != nil { return common.Address{}, fmt.Errorf("blob basefee reader deploy error: %w", err) } - seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, reader4844) + seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, reader4844, isUsingFeeToken) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { return common.Address{}, fmt.Errorf("sequencer inbox deploy error: %w", err) @@ -161,8 +161,8 @@ func deployChallengeFactory(ctx context.Context, l1Reader *headerreader.HeaderRe return ospEntryAddr, challengeManagerAddr, nil } -func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, maxDataSize *big.Int, hotshot common.Address) (*rollupgen.RollupCreator, common.Address, common.Address, common.Address, error) { - bridgeCreator, err := deployBridgeCreator(ctx, l1Reader, auth, maxDataSize) +func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, maxDataSize *big.Int, hotshot common.Address, isUsingFeeToken bool) (*rollupgen.RollupCreator, common.Address, common.Address, common.Address, error) { + bridgeCreator, err := deployBridgeCreator(ctx, l1Reader, auth, maxDataSize, isUsingFeeToken) if err != nil { return nil, common.Address{}, common.Address{}, common.Address{}, fmt.Errorf("bridge creator deploy error: %w", err) } @@ -234,12 +234,12 @@ func deployRollupCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return rollupCreator, rollupCreatorAddress, validatorUtils, validatorWalletCreator, nil } -func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReader, deployAuth *bind.TransactOpts, batchPoster common.Address, authorizeValidators uint64, config rollupgen.Config, nativeToken common.Address, maxDataSize *big.Int, hotshot common.Address) (*chaininfo.RollupAddresses, error) { +func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReader, deployAuth *bind.TransactOpts, batchPosters []common.Address, batchPosterManager common.Address, authorizeValidators uint64, config rollupgen.Config, nativeToken common.Address, maxDataSize *big.Int, hotshot common.Address, isUsingFeeToken bool) (*chaininfo.RollupAddresses, error) { if config.WasmModuleRoot == (common.Hash{}) { return nil, errors.New("no machine specified") } - rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, parentChainReader, deployAuth, maxDataSize, hotshot) + rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, parentChainReader, deployAuth, maxDataSize, hotshot, isUsingFeeToken) if err != nil { return nil, fmt.Errorf("error deploying rollup creator: %w", err) } @@ -251,12 +251,13 @@ func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReade deployParams := rollupgen.RollupCreatorRollupDeploymentParams{ Config: config, - BatchPoster: batchPoster, Validators: validatorAddrs, MaxDataSize: maxDataSize, NativeToken: nativeToken, DeployFactoriesToL2: false, MaxFeePerGasForRetryables: big.NewInt(0), // needed when utility factories are deployed + BatchPosters: batchPosters, + BatchPosterManager: batchPosterManager, } tx, err := rollupCreator.CreateRollup( diff --git a/execution/gethexec/blockchain.go b/execution/gethexec/blockchain.go index a85224b635..2a20c3da26 100644 --- a/execution/gethexec/blockchain.go +++ b/execution/gethexec/blockchain.go @@ -67,6 +67,7 @@ var DefaultCachingConfig = CachingConfig{ MaxAmountOfGasToSkipStateSaving: 0, } +// TODO remove stack from parameters as it is no longer needed here func DefaultCacheConfigFor(stack *node.Node, cachingConfig *CachingConfig) *core.CacheConfig { baseConf := ethconfig.Defaults if cachingConfig.Archive { diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 718ffba668..7a4a0e98b1 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -102,7 +102,7 @@ func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbost resequencing := false defer func() { // if we are resequencing old messages - don't release the lock - // lock will be relesed by thread listening to resequenceChan + // lock will be released by thread listening to resequenceChan if !resequencing { s.createBlocksMutex.Unlock() } @@ -693,6 +693,15 @@ func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, return nil } +func (s *ExecutionEngine) ArbOSVersionForMessageNumber(messageNum arbutil.MessageIndex) (uint64, error) { + block := s.bc.GetBlockByNumber(s.MessageIndexToBlockNumber(messageNum)) + if block == nil { + return 0, fmt.Errorf("couldn't find block for message number %d", messageNum) + } + extra := types.DeserializeHeaderExtraInformation(block.Header()) + return extra.ArbOSFormatVersion, nil +} + func (s *ExecutionEngine) Start(ctx_in context.Context) { s.StopWaiter.Start(ctx_in, s) s.LaunchThread(func(ctx context.Context) { diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index db0a26e8c4..18b9b31f0a 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -50,6 +50,7 @@ type Config struct { RPC arbitrum.Config `koanf:"rpc"` TxLookupLimit uint64 `koanf:"tx-lookup-limit"` Dangerous DangerousConfig `koanf:"dangerous"` + EnablePrefetchBlock bool `koanf:"enable-prefetch-block"` forwardingTarget string } @@ -84,6 +85,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) { CachingConfigAddOptions(prefix+".caching", f) f.Uint64(prefix+".tx-lookup-limit", ConfigDefault.TxLookupLimit, "retain the ability to lookup transactions by hash for the past N blocks (0 = all blocks)") DangerousConfigAddOptions(prefix+".dangerous", f) + f.Bool(prefix+".enable-prefetch-block", ConfigDefault.EnablePrefetchBlock, "enable prefetching of blocks") } var ConfigDefault = Config{ @@ -98,6 +100,7 @@ var ConfigDefault = Config{ Caching: DefaultCachingConfig, Dangerous: DefaultDangerousConfig, Forwarder: DefaultNodeForwarderConfig, + EnablePrefetchBlock: true, } func ConfigDefaultNonSequencerTest() *Config { @@ -149,6 +152,9 @@ func CreateExecutionNode( ) (*ExecutionNode, error) { config := configFetcher() execEngine, err := NewExecutionEngine(l2BlockChain) + if config.EnablePrefetchBlock { + execEngine.EnablePrefetchBlock() + } if err != nil { return nil, err } @@ -340,6 +346,9 @@ func (n *ExecutionNode) SequenceDelayedMessage(message *arbostypes.L1IncomingMes func (n *ExecutionNode) ResultAtPos(pos arbutil.MessageIndex) (*execution.MessageResult, error) { return n.ExecEngine.ResultAtPos(pos) } +func (n *ExecutionNode) ArbOSVersionForMessageNumber(messageNum arbutil.MessageIndex) (uint64, error) { + return n.ExecEngine.ArbOSVersionForMessageNumber(messageNum) +} func (n *ExecutionNode) RecordBlockCreation( ctx context.Context, diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 8186045128..232fd6bb6d 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -66,7 +66,6 @@ type SequencerConfig struct { MaxTxDataSize int `koanf:"max-tx-data-size" reload:"hot"` NonceFailureCacheSize int `koanf:"nonce-failure-cache-size" reload:"hot"` NonceFailureCacheExpiry time.Duration `koanf:"nonce-failure-cache-expiry" reload:"hot"` - EnablePrefetchBlock bool `koanf:"enable-prefetch-block"` // Espresso specific flags Espresso bool `koanf:"espresso"` @@ -104,7 +103,6 @@ var DefaultSequencerConfig = SequencerConfig{ MaxTxDataSize: 95000, NonceFailureCacheSize: 1024, NonceFailureCacheExpiry: time.Second, - EnablePrefetchBlock: false, } var TestSequencerConfig = SequencerConfig{ @@ -120,7 +118,6 @@ var TestSequencerConfig = SequencerConfig{ MaxTxDataSize: 95000, NonceFailureCacheSize: 1024, NonceFailureCacheExpiry: time.Second, - EnablePrefetchBlock: false, } func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -137,7 +134,6 @@ func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".nonce-failure-cache-size", DefaultSequencerConfig.NonceFailureCacheSize, "number of transactions with too high of a nonce to keep in memory while waiting for their predecessor") f.Duration(prefix+".nonce-failure-cache-expiry", DefaultSequencerConfig.NonceFailureCacheExpiry, "maximum amount of time to wait for a predecessor before rejecting a tx with nonce too high") f.Bool(prefix+".espresso", DefaultSequencerConfig.Espresso, "if true, transactions will be fetched from the espresso sequencer network") - f.Bool(prefix+".enable-prefetch-block", DefaultSequencerConfig.EnablePrefetchBlock, "enable prefetching of blocks") f.String(prefix+".hotshot-url", DefaultSequencerConfig.HotShotUrl, "") f.Uint64(prefix+".espresso-namespace", DefaultSequencerConfig.EspressoNamespace, "espresso namespace that corresponds the L2 chain") @@ -339,9 +335,6 @@ func NewSequencer(execEngine *ExecutionEngine, l1Reader *headerreader.HeaderRead } s.Pause() execEngine.EnableReorgSequencing() - if config.EnablePrefetchBlock { - execEngine.EnablePrefetchBlock() - } return s, nil } diff --git a/execution/interface.go b/execution/interface.go index e59272f982..0df7a890ae 100644 --- a/execution/interface.go +++ b/execution/interface.go @@ -70,6 +70,8 @@ type FullExecutionClient interface { // TODO: only used to get safe/finalized block numbers MessageIndexToBlockNumber(messageNum arbutil.MessageIndex) uint64 + + ArbOSVersionForMessageNumber(messageNum arbutil.MessageIndex) (uint64, error) } // not implemented in execution, used as input diff --git a/go-ethereum b/go-ethereum index df4a68e8f9..160c003e5f 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit df4a68e8f919e4c577fe2c84fb52dfe9bcd488bf +Subproject commit 160c003e5f5ec3ffd24f30c805813f64354a47cc diff --git a/go.mod b/go.mod index 523b11af58..c461ed6c77 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,6 @@ require ( github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible github.com/Shopify/toxiproxy v2.1.4+incompatible github.com/alicebob/miniredis/v2 v2.21.0 - github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 github.com/andybalholm/brotli v1.0.4 github.com/aws/aws-sdk-go-v2 v1.16.4 github.com/aws/aws-sdk-go-v2/config v1.15.5 @@ -21,19 +20,18 @@ require ( github.com/cavaliergopher/grab/v3 v3.0.1 github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 github.com/codeclysm/extract/v3 v3.0.2 - github.com/dgraph-io/badger/v3 v3.2103.2 + github.com/dgraph-io/badger/v4 v4.2.0 github.com/enescakir/emoji v1.0.0 github.com/ethereum/go-ethereum v1.13.5 github.com/fatih/structtag v1.2.0 github.com/gdamore/tcell/v2 v2.6.0 - github.com/google/go-cmp v0.5.9 + github.com/google/go-cmp v0.6.0 github.com/hashicorp/golang-lru/v2 v2.0.2 github.com/holiman/uint256 v1.2.3 github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-libipfs v0.6.2 github.com/ipfs/interface-go-ipfs-core v0.11.0 github.com/ipfs/kubo v0.19.1 - github.com/jarcoal/httpmock v1.3.1 github.com/knadh/koanf v1.4.0 github.com/libp2p/go-libp2p v0.27.8 github.com/miguelmota/go-ethereum-hdwallet v0.1.2 @@ -43,9 +41,9 @@ require ( github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703 github.com/spf13/pflag v1.0.5 github.com/wealdtech/go-merkletree v1.0.0 - golang.org/x/crypto v0.17.0 - golang.org/x/sys v0.15.0 - golang.org/x/term v0.15.0 + golang.org/x/crypto v0.21.0 + golang.org/x/sys v0.18.0 + golang.org/x/term v0.18.0 golang.org/x/tools v0.13.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -85,7 +83,6 @@ require ( github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/ceramicnetwork/go-dag-jose v0.1.0 // indirect github.com/cespare/cp v1.1.1 // indirect - github.com/cespare/xxhash v1.1.0 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/redact v1.1.3 // indirect @@ -102,11 +99,11 @@ require ( github.com/deckarep/golang-set/v2 v2.1.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect github.com/dgraph-io/badger v1.6.2 // indirect - github.com/dgraph-io/ristretto v0.1.0 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect - github.com/dustin/go-humanize v1.0.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/elastic/gosigar v0.14.2 // indirect github.com/ethereum/c-kzg-4844 v0.4.0 // indirect github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 // indirect @@ -124,11 +121,11 @@ require ( github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.3.0 // indirect - github.com/golang/glog v1.1.2 // indirect + github.com/golang/glog v1.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/mock v1.6.0 // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/flatbuffers v1.12.1 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/flatbuffers v23.5.26+incompatible // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b // indirect github.com/gorilla/mux v1.8.0 // indirect @@ -191,7 +188,7 @@ require ( github.com/jbenet/goprocess v0.1.4 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5 // indirect - github.com/klauspost/compress v1.16.4 // indirect + github.com/klauspost/compress v1.17.7 // indirect github.com/klauspost/cpuid/v2 v2.2.4 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/kr/pretty v0.3.1 // indirect @@ -291,7 +288,7 @@ require ( go4.org v0.0.0-20200411211856-f5505b9728dd // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.17.0 // indirect + golang.org/x/net v0.22.0 // indirect golang.org/x/sync v0.3.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect @@ -300,7 +297,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect google.golang.org/grpc v1.59.0 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/square/go-jose.v2 v2.5.1 // indirect lukechampine.com/blake3 v1.1.7 // indirect nhooyr.io/websocket v1.8.7 // indirect diff --git a/go.sum b/go.sum index d1ff7b1b76..c6df12125d 100644 --- a/go.sum +++ b/go.sum @@ -51,20 +51,6 @@ github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3 github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/EspressoSystems/espresso-sequencer-go v0.0.5 h1:CCg6Z7dabUqb5kQIw9YkqucKdFdLeDTfDFPrwRs1Qn8= -github.com/EspressoSystems/espresso-sequencer-go v0.0.5/go.mod h1:9dSL1bj0l+jpgaMRmi55YeRBd3AhOZz8/HXQcQ42mRQ= -github.com/EspressoSystems/espresso-sequencer-go v0.0.6 h1:NBiqPyyqzaumyWVwvx1EgQD7rwCg3lpo3LdPysRFnVY= -github.com/EspressoSystems/espresso-sequencer-go v0.0.6/go.mod h1:9dSL1bj0l+jpgaMRmi55YeRBd3AhOZz8/HXQcQ42mRQ= -github.com/EspressoSystems/espresso-sequencer-go v0.0.7 h1:qzP7WsBEej+6mZ6UUSWQpyHRn5qC9rQ3C7B1vxdavRg= -github.com/EspressoSystems/espresso-sequencer-go v0.0.7/go.mod h1:9dSL1bj0l+jpgaMRmi55YeRBd3AhOZz8/HXQcQ42mRQ= -github.com/EspressoSystems/espresso-sequencer-go v0.0.8-0.20240223102020-14d6344d941c h1:uMi+cuSgTzsHUOJHERRxH/tItZT7Oi7x9Vv+fcAOv74= -github.com/EspressoSystems/espresso-sequencer-go v0.0.8-0.20240223102020-14d6344d941c/go.mod h1:9dSL1bj0l+jpgaMRmi55YeRBd3AhOZz8/HXQcQ42mRQ= -github.com/EspressoSystems/espresso-sequencer-go v0.0.8 h1:OtlZZsGKaSONOFjlU5zcPuJkNj2Y8+fK5NZ+TQr/+RU= -github.com/EspressoSystems/espresso-sequencer-go v0.0.8/go.mod h1:9dSL1bj0l+jpgaMRmi55YeRBd3AhOZz8/HXQcQ42mRQ= -github.com/EspressoSystems/espresso-sequencer-go v0.0.9 h1:KJkF79mmbzPi3qqiDdbobxURCwZbbp7YXjcB3YSC0to= -github.com/EspressoSystems/espresso-sequencer-go v0.0.9/go.mod h1:9dSL1bj0l+jpgaMRmi55YeRBd3AhOZz8/HXQcQ42mRQ= -github.com/EspressoSystems/espresso-sequencer-go v0.0.10-0.20240312032852-d14bb3206833 h1:ecP+A2qv88Cf/+vARVZuUtjRZmMw4zfEwkSNBjmQHds= -github.com/EspressoSystems/espresso-sequencer-go v0.0.10-0.20240312032852-d14bb3206833/go.mod h1:9dSL1bj0l+jpgaMRmi55YeRBd3AhOZz8/HXQcQ42mRQ= github.com/EspressoSystems/espresso-sequencer-go v0.0.10 h1:INykV9fatyUZXzgV4wyZAwBtqXCPGHgOcwaUsHsVJA4= github.com/EspressoSystems/espresso-sequencer-go v0.0.10/go.mod h1:9dSL1bj0l+jpgaMRmi55YeRBd3AhOZz8/HXQcQ42mRQ= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= @@ -73,7 +59,6 @@ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= @@ -219,7 +204,6 @@ github.com/ceramicnetwork/go-dag-jose v0.1.0 h1:yJ/HVlfKpnD3LdYP03AHyTvbm3BpPiz2 github.com/ceramicnetwork/go-dag-jose v0.1.0/go.mod h1:qYA1nYt0X8u4XoMAVoOV3upUVKtrxy/I670Dg5F0wjI= github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU= github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -255,7 +239,6 @@ github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvD github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo= github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= -github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= @@ -302,7 +285,6 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6Uh github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI= github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= -github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= @@ -312,11 +294,11 @@ github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6ps github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= -github.com/dgraph-io/badger/v3 v3.2103.2 h1:dpyM5eCJAtQCBcMCZcT4UBZchuTJgCywerHHgmxfxM8= -github.com/dgraph-io/badger/v3 v3.2103.2/go.mod h1:RHo4/GmYcKKh5Lxu63wLEMHJ70Pac2JqZRYGhlyAo2M= +github.com/dgraph-io/badger/v4 v4.2.0 h1:kJrlajbXXL9DFTNuhhu9yCx7JJa4qpYWxtE8BzuWsEs= +github.com/dgraph-io/badger/v4 v4.2.0/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= -github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= @@ -335,8 +317,9 @@ github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14 github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= @@ -418,7 +401,6 @@ github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -482,8 +464,8 @@ github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoB github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -518,19 +500,18 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= -github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= +github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -544,8 +525,8 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -880,8 +861,6 @@ github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQ github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= -github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= @@ -918,7 +897,6 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -952,10 +930,9 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6 github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU= -github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= @@ -1241,7 +1218,6 @@ github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpe github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= @@ -1301,7 +1277,6 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= @@ -1443,7 +1418,6 @@ github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAv github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= -github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -1683,7 +1657,6 @@ github.com/wealdtech/go-merkletree v1.0.0/go.mod h1:cdil512d/8ZC7Kx3bfrDvGMQXB25 github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4= github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= -github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa h1:EyA027ZAkuaCLoxVX4r1TZMPy1d31fM6hbfQ4OU4I5o= github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= @@ -1735,7 +1708,6 @@ go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= @@ -1828,8 +1800,8 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210920023735-84f357641f63/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1927,8 +1899,8 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2047,18 +2019,19 @@ golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2267,8 +2240,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/linter/koanf/handlers.go b/linters/koanf/handlers.go similarity index 99% rename from linter/koanf/handlers.go rename to linters/koanf/handlers.go index 5826004014..5ee3b80f9f 100644 --- a/linter/koanf/handlers.go +++ b/linters/koanf/handlers.go @@ -1,4 +1,4 @@ -package main +package koanf import ( "fmt" diff --git a/linter/koanf/koanf.go b/linters/koanf/koanf.go similarity index 92% rename from linter/koanf/koanf.go rename to linters/koanf/koanf.go index d6780760e7..e53064b6b3 100644 --- a/linter/koanf/koanf.go +++ b/linters/koanf/koanf.go @@ -1,4 +1,4 @@ -package main +package koanf import ( "errors" @@ -8,7 +8,6 @@ import ( "reflect" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/singlechecker" ) var ( @@ -18,10 +17,6 @@ var ( errIncorrectFlag = errors.New("mismatching flag initialization") ) -func New(conf any) ([]*analysis.Analyzer, error) { - return []*analysis.Analyzer{Analyzer}, nil -} - var Analyzer = &analysis.Analyzer{ Name: "koanfcheck", Doc: "check for koanf misconfigurations", @@ -101,7 +96,3 @@ func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { } return ret, nil } - -func main() { - singlechecker.Main(Analyzer) -} diff --git a/linter/koanf/koanf_test.go b/linters/koanf/koanf_test.go similarity index 95% rename from linter/koanf/koanf_test.go rename to linters/koanf/koanf_test.go index 064ae533c4..9029951dfa 100644 --- a/linter/koanf/koanf_test.go +++ b/linters/koanf/koanf_test.go @@ -1,4 +1,4 @@ -package main +package koanf import ( "errors" @@ -20,7 +20,7 @@ func testData(t *testing.T) string { t.Helper() wd, err := os.Getwd() if err != nil { - t.Fatalf("Failed to get wd: %s", err) + t.Fatalf("Failed to get working directory: %v", err) } return filepath.Join(filepath.Dir(wd), "testdata") } diff --git a/linters/linters.go b/linters/linters.go new file mode 100644 index 0000000000..a6c9f6d55e --- /dev/null +++ b/linters/linters.go @@ -0,0 +1,18 @@ +package main + +import ( + "github.com/offchainlabs/nitro/linters/koanf" + "github.com/offchainlabs/nitro/linters/pointercheck" + "github.com/offchainlabs/nitro/linters/rightshift" + "github.com/offchainlabs/nitro/linters/structinit" + "golang.org/x/tools/go/analysis/multichecker" +) + +func main() { + multichecker.Main( + koanf.Analyzer, + pointercheck.Analyzer, + rightshift.Analyzer, + structinit.Analyzer, + ) +} diff --git a/linter/pointercheck/pointer.go b/linters/pointercheck/pointercheck.go similarity index 91% rename from linter/pointercheck/pointer.go rename to linters/pointercheck/pointercheck.go index 6500b01222..682ebd9357 100644 --- a/linter/pointercheck/pointer.go +++ b/linters/pointercheck/pointercheck.go @@ -1,4 +1,4 @@ -package main +package pointercheck import ( "fmt" @@ -8,13 +8,8 @@ import ( "reflect" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/singlechecker" ) -func New(conf any) ([]*analysis.Analyzer, error) { - return []*analysis.Analyzer{Analyzer}, nil -} - var Analyzer = &analysis.Analyzer{ Name: "pointercheck", Doc: "check for pointer comparison", @@ -94,7 +89,3 @@ func ptrIdent(pass *analysis.Pass, e ast.Expr) bool { } return false } - -func main() { - singlechecker.Main(Analyzer) -} diff --git a/linter/pointercheck/pointer_test.go b/linters/pointercheck/pointercheck_test.go similarity index 88% rename from linter/pointercheck/pointer_test.go rename to linters/pointercheck/pointercheck_test.go index 290e3826de..24f4534bca 100644 --- a/linter/pointercheck/pointer_test.go +++ b/linters/pointercheck/pointercheck_test.go @@ -1,4 +1,4 @@ -package main +package pointercheck import ( "os" @@ -11,7 +11,7 @@ import ( func TestAll(t *testing.T) { wd, err := os.Getwd() if err != nil { - t.Fatalf("Failed to get wd: %s", err) + t.Fatalf("Failed to get working directory: %v", err) } testdata := filepath.Join(filepath.Dir(wd), "testdata") res := analysistest.Run(t, testdata, analyzerForTests, "pointercheck") diff --git a/linters/rightshift/rightshift.go b/linters/rightshift/rightshift.go new file mode 100644 index 0000000000..d6fcbfec6c --- /dev/null +++ b/linters/rightshift/rightshift.go @@ -0,0 +1,71 @@ +package rightshift + +import ( + "go/ast" + "go/token" + "reflect" + + "golang.org/x/tools/go/analysis" +) + +var Analyzer = &analysis.Analyzer{ + Name: "rightshift", + Doc: "check for 1 >> x operation", + Run: func(p *analysis.Pass) (interface{}, error) { return run(false, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +var analyzerForTests = &analysis.Analyzer{ + Name: "testrightshift", + Doc: "check for pointer comparison (for tests)", + Run: func(p *analysis.Pass) (interface{}, error) { return run(true, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +// rightShiftError indicates the position of pointer comparison. +type rightShiftError struct { + Pos token.Position + Message string +} + +// Result is returned from the checkStruct function, and holds all rightshift +// operations. +type Result struct { + Errors []rightShiftError +} + +func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { + var ret Result + for _, f := range pass.Files { + ast.Inspect(f, func(node ast.Node) bool { + be, ok := node.(*ast.BinaryExpr) + if !ok { + return true + } + // Check if the expression is '1 >> x'. + if be.Op == token.SHR && isOne(be.X) { + err := rightShiftError{ + Pos: pass.Fset.Position(be.Pos()), + Message: "found rightshift ('1 >> x') expression, did you mean '1 << x' ?", + } + ret.Errors = append(ret.Errors, err) + if !dryRun { + pass.Report(analysis.Diagnostic{ + Pos: pass.Fset.File(f.Pos()).Pos(err.Pos.Offset), + Message: err.Message, + Category: "pointercheck", + }) + } + } + return true + }, + ) + } + return ret, nil +} + +// isOne checks if the expression is a constant 1. +func isOne(expr ast.Expr) bool { + bl, ok := expr.(*ast.BasicLit) + return ok && bl.Kind == token.INT && bl.Value == "1" +} diff --git a/linters/rightshift/rightshift_test.go b/linters/rightshift/rightshift_test.go new file mode 100644 index 0000000000..3640d79975 --- /dev/null +++ b/linters/rightshift/rightshift_test.go @@ -0,0 +1,36 @@ +package rightshift + +import ( + "os" + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "golang.org/x/tools/go/analysis/analysistest" +) + +func TestAll(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get working directory: %v", err) + } + testdata := filepath.Join(filepath.Dir(wd), "testdata") + res := analysistest.Run(t, testdata, analyzerForTests, "rightshift") + want := []int{6, 11, 12} + got := erroLines(res) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("analysistest.Ru() unexpected diff in error lines:\n%s\n", diff) + } +} + +func erroLines(errs []*analysistest.Result) []int { + var ret []int + for _, e := range errs { + if r, ok := e.Result.(Result); ok { + for _, err := range r.Errors { + ret = append(ret, err.Pos.Line) + } + } + } + return ret +} diff --git a/linter/structinit/structinit.go b/linters/structinit/structinit.go similarity index 93% rename from linter/structinit/structinit.go rename to linters/structinit/structinit.go index e4e65bc3fc..236b8747b2 100644 --- a/linter/structinit/structinit.go +++ b/linters/structinit/structinit.go @@ -1,4 +1,4 @@ -package main +package structinit import ( "fmt" @@ -8,7 +8,6 @@ import ( "strings" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/singlechecker" ) // Tip for linter that struct that has this comment should be included in the @@ -16,10 +15,6 @@ import ( // Note: comment should be directly line above the struct definition. const linterTip = "// lint:require-exhaustive-initialization" -func New(conf any) ([]*analysis.Analyzer, error) { - return []*analysis.Analyzer{Analyzer}, nil -} - // Analyzer implements struct analyzer for structs that are annotated with // `linterTip`, it checks that every instantiation initializes all the fields. var Analyzer = &analysis.Analyzer{ @@ -116,7 +111,3 @@ type position struct { fileName string line int } - -func main() { - singlechecker.Main(Analyzer) -} diff --git a/linter/structinit/structinit_test.go b/linters/structinit/structinit_test.go similarity index 89% rename from linter/structinit/structinit_test.go rename to linters/structinit/structinit_test.go index db3676e185..57dfc2b000 100644 --- a/linter/structinit/structinit_test.go +++ b/linters/structinit/structinit_test.go @@ -1,4 +1,4 @@ -package main +package structinit import ( "os" @@ -12,7 +12,7 @@ func testData(t *testing.T) string { t.Helper() wd, err := os.Getwd() if err != nil { - t.Fatalf("Failed to get wd: %s", err) + t.Fatalf("Failed to get working directory: %v", err) } return filepath.Join(filepath.Dir(wd), "testdata") } diff --git a/linter/testdata/src/koanf/a/a.go b/linters/testdata/src/koanf/a/a.go similarity index 100% rename from linter/testdata/src/koanf/a/a.go rename to linters/testdata/src/koanf/a/a.go diff --git a/linter/testdata/src/koanf/b/b.go b/linters/testdata/src/koanf/b/b.go similarity index 100% rename from linter/testdata/src/koanf/b/b.go rename to linters/testdata/src/koanf/b/b.go diff --git a/linter/testdata/src/pointercheck/pointercheck.go b/linters/testdata/src/pointercheck/pointercheck.go similarity index 100% rename from linter/testdata/src/pointercheck/pointercheck.go rename to linters/testdata/src/pointercheck/pointercheck.go diff --git a/linters/testdata/src/rightshift/rightshift.go b/linters/testdata/src/rightshift/rightshift.go new file mode 100644 index 0000000000..3ad6d95980 --- /dev/null +++ b/linters/testdata/src/rightshift/rightshift.go @@ -0,0 +1,14 @@ +package rightshift + +import "fmt" + +func doThing(v int) int { + return 1 >> v // Error: Ln: 6 +} + +func calc() { + val := 10 + fmt.Printf("%v", 1>>val) // Error: Ln 11 + _ = doThing(1 >> val) // Error: Ln 12 + fmt.Printf("%v", 1< \[--binary-path PATH\] \[--fuzzcache-path PATH\] \[--nitro-path PATH\] + echo " " $0 \ \[--binary-path PATH\] \[--fuzzcache-path PATH\] \[--nitro-path PATH\] \[--duration DURATION\] echo echo fuzzer names: echo " " FuzzPrecompiles echo " " FuzzInboxMultiplexer echo " " FuzzStateTransition + echo + echo " " duration in minutes } if [[ $# -eq 0 ]]; then @@ -26,6 +28,7 @@ fuzzcachepath=../target/var/fuzz-cache nitropath=../ run_build=false test_group="" +duration=60 while [[ $# -gt 0 ]]; do case $1 in --nitro-path) @@ -55,6 +58,15 @@ while [[ $# -gt 0 ]]; do shift shift ;; + --duration) + duration="$2" + if ! [[ "$duration" =~ ^[0-9]+$ ]]; then + echo "Invalid timeout duration. Please specify positive integer (in minutes)" + exit 1 + fi + shift + shift + ;; --build) run_build=true shift @@ -83,6 +95,11 @@ while [[ $# -gt 0 ]]; do esac done +if [[ "$run_build" == "false" && -z "$test_group" ]]; then + echo you must specify either --build flag or fuzzer-name + printusage +fi + if $run_build; then for build_group in system_tests arbstate; do go test -c ${nitropath}/${build_group} -fuzz Fuzz -o "$binpath"/${build_group}.fuzz @@ -90,5 +107,12 @@ if $run_build; then fi if [[ ! -z $test_group ]]; then - "$binpath"/${test_group}.fuzz -test.run "^$" -test.fuzzcachedir "$fuzzcachepath" -test.fuzz $test_name + timeout "$((60 * duration))" "$binpath"/${test_group}.fuzz -test.run "^$" -test.fuzzcachedir "$fuzzcachepath" -test.fuzz $test_name || exit_status=$? fi + +if [ -n "$exit_status" ] && [ $exit_status -ne 0 ] && [ $exit_status -ne 124 ]; then + echo "Fuzzing failed." + exit $exit_status +fi + +echo "Fuzzing succeeded." diff --git a/scripts/startup-testnode.bash b/scripts/startup-testnode.bash new file mode 100755 index 0000000000..701e7ff59a --- /dev/null +++ b/scripts/startup-testnode.bash @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# The script starts up the test node (with timeout 1 minute), to make sure the +# nitro-testnode script isn't out of sync with flags with nitro. +# This is used in CI, basically as smoke test. + +timeout 60 ./nitro-testnode/test-node.bash --init --dev || exit_status=$? + +if [ -n "$exit_status" ] && [ $exit_status -ne 0 ] && [ $exit_status -ne 124 ]; then + echo "Startup failed." + exit $exit_status +fi + +echo "Startup succeeded." diff --git a/staker/block_validator.go b/staker/block_validator.go index c9d9188e36..bddc1c5fcf 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -6,6 +6,7 @@ package staker import ( "context" "encoding/binary" + "encoding/json" "errors" "fmt" "runtime" @@ -84,16 +85,18 @@ type BlockValidator struct { } type BlockValidatorConfig struct { - Enable bool `koanf:"enable"` - ValidationServer rpcclient.ClientConfig `koanf:"validation-server" reload:"hot"` - ValidationPoll time.Duration `koanf:"validation-poll" reload:"hot"` - PrerecordedBlocks uint64 `koanf:"prerecorded-blocks" reload:"hot"` - ForwardBlocks uint64 `koanf:"forward-blocks" reload:"hot"` - CurrentModuleRoot string `koanf:"current-module-root"` // TODO(magic) requires reinitialization on hot reload - PendingUpgradeModuleRoot string `koanf:"pending-upgrade-module-root"` // TODO(magic) requires StatelessBlockValidator recreation on hot reload - FailureIsFatal bool `koanf:"failure-is-fatal" reload:"hot"` - Dangerous BlockValidatorDangerousConfig `koanf:"dangerous"` - MemoryFreeLimit string `koanf:"memory-free-limit" reload:"hot"` + Enable bool `koanf:"enable"` + ValidationServer rpcclient.ClientConfig `koanf:"validation-server" reload:"hot"` + ValidationServerConfigs []rpcclient.ClientConfig `koanf:"validation-server-configs" reload:"hot"` + ValidationPoll time.Duration `koanf:"validation-poll" reload:"hot"` + PrerecordedBlocks uint64 `koanf:"prerecorded-blocks" reload:"hot"` + ForwardBlocks uint64 `koanf:"forward-blocks" reload:"hot"` + CurrentModuleRoot string `koanf:"current-module-root"` // TODO(magic) requires reinitialization on hot reload + PendingUpgradeModuleRoot string `koanf:"pending-upgrade-module-root"` // TODO(magic) requires StatelessBlockValidator recreation on hot reload + FailureIsFatal bool `koanf:"failure-is-fatal" reload:"hot"` + Dangerous BlockValidatorDangerousConfig `koanf:"dangerous"` + MemoryFreeLimit string `koanf:"memory-free-limit" reload:"hot"` + ValidationServerConfigsList string `koanf:"validation-server-configs-list" reload:"hot"` memoryFreeLimit int @@ -112,7 +115,26 @@ func (c *BlockValidatorConfig) Validate() error { } c.memoryFreeLimit = limit } - return c.ValidationServer.Validate() + if c.ValidationServerConfigs == nil { + if c.ValidationServerConfigsList == "default" { + c.ValidationServerConfigs = []rpcclient.ClientConfig{c.ValidationServer} + } else { + var validationServersConfigs []rpcclient.ClientConfig + if err := json.Unmarshal([]byte(c.ValidationServerConfigsList), &validationServersConfigs); err != nil { + return fmt.Errorf("failed to parse block-validator validation-server-configs-list string: %w", err) + } + c.ValidationServerConfigs = validationServersConfigs + } + } + if len(c.ValidationServerConfigs) == 0 { + return fmt.Errorf("block-validator validation-server-configs is empty, need at least one validation server config") + } + for _, serverConfig := range c.ValidationServerConfigs { + if err := serverConfig.Validate(); err != nil { + return fmt.Errorf("failed to validate one of the block-validator validation-server-configs. url: %s, err: %w", serverConfig.URL, err) + } + } + return nil } type BlockValidatorDangerousConfig struct { @@ -124,6 +146,7 @@ type BlockValidatorConfigFetcher func() *BlockValidatorConfig func BlockValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultBlockValidatorConfig.Enable, "enable block-by-block validation") rpcclient.RPCClientAddOptions(prefix+".validation-server", f, &DefaultBlockValidatorConfig.ValidationServer) + f.String(prefix+".validation-server-configs-list", DefaultBlockValidatorConfig.ValidationServerConfigsList, "array of validation rpc configs given as a json string. time duration should be supplied in number indicating nanoseconds") f.Duration(prefix+".validation-poll", DefaultBlockValidatorConfig.ValidationPoll, "poll time to check validations") f.Uint64(prefix+".forward-blocks", DefaultBlockValidatorConfig.ForwardBlocks, "prepare entries for up to that many blocks ahead of validation (small footprint)") f.Uint64(prefix+".prerecorded-blocks", DefaultBlockValidatorConfig.PrerecordedBlocks, "record that many blocks ahead of validation (larger footprint)") @@ -141,21 +164,23 @@ func BlockValidatorDangerousConfigAddOptions(prefix string, f *flag.FlagSet) { } var DefaultBlockValidatorConfig = BlockValidatorConfig{ - Enable: false, - ValidationServer: rpcclient.DefaultClientConfig, - ValidationPoll: time.Second, - ForwardBlocks: 1024, - PrerecordedBlocks: uint64(2 * runtime.NumCPU()), - CurrentModuleRoot: "current", - PendingUpgradeModuleRoot: "latest", - FailureIsFatal: true, - Dangerous: DefaultBlockValidatorDangerousConfig, - MemoryFreeLimit: "default", + Enable: false, + ValidationServerConfigsList: "default", + ValidationServer: rpcclient.DefaultClientConfig, + ValidationPoll: time.Second, + ForwardBlocks: 1024, + PrerecordedBlocks: uint64(2 * runtime.NumCPU()), + CurrentModuleRoot: "current", + PendingUpgradeModuleRoot: "latest", + FailureIsFatal: true, + Dangerous: DefaultBlockValidatorDangerousConfig, + MemoryFreeLimit: "default", } var TestBlockValidatorConfig = BlockValidatorConfig{ Enable: false, ValidationServer: rpcclient.TestClientConfig, + ValidationServerConfigs: []rpcclient.ClientConfig{rpcclient.TestClientConfig}, ValidationPoll: 100 * time.Millisecond, ForwardBlocks: 128, PrerecordedBlocks: uint64(2 * runtime.NumCPU()), @@ -573,15 +598,21 @@ func (v *BlockValidator) iterativeValidationEntryCreator(ctx context.Context, ig return v.config().ValidationPoll } +func (v *BlockValidator) isMemoryLimitExceeded() bool { + if v.MemoryFreeLimitChecker == nil { + return false + } + exceeded, err := v.MemoryFreeLimitChecker.IsLimitExceeded() + if err != nil { + log.Error("error checking if free-memory limit exceeded using MemoryFreeLimitChecker", "err", err) + } + return exceeded +} + func (v *BlockValidator) sendNextRecordRequests(ctx context.Context) (bool, error) { - if v.MemoryFreeLimitChecker != nil { - exceeded, err := v.MemoryFreeLimitChecker.IsLimitExceeded() - if err != nil { - log.Error("error checking if free-memory limit exceeded using MemoryFreeLimitChecker", "err", err) - } - if exceeded { - return false, nil - } + if v.isMemoryLimitExceeded() { + log.Warn("sendNextRecordRequests: aborting due to running low on memory") + return false, nil } v.reorgMutex.RLock() pos := v.recordSent() @@ -612,14 +643,9 @@ func (v *BlockValidator) sendNextRecordRequests(ctx context.Context) (bool, erro return true, nil } for pos <= recordUntil { - if v.MemoryFreeLimitChecker != nil { - exceeded, err := v.MemoryFreeLimitChecker.IsLimitExceeded() - if err != nil { - log.Error("error checking if free-memory limit exceeded using MemoryFreeLimitChecker", "err", err) - } - if exceeded { - return false, nil - } + if v.isMemoryLimitExceeded() { + log.Warn("sendNextRecordRequests: aborting due to running low on memory") + return false, nil } validationStatus, found := v.validations.Load(pos) if !found { @@ -688,14 +714,12 @@ func (v *BlockValidator) advanceValidations(ctx context.Context) (*arbutil.Messa defer v.reorgMutex.RUnlock() wasmRoots := v.GetModuleRootsToValidate() - room := 100 // even if there is more room then that it's fine - for _, spawner := range v.validationSpawners { + rooms := make([]int, len(v.validationSpawners)) + currentSpawnerIndex := 0 + for i, spawner := range v.validationSpawners { here := spawner.Room() / len(wasmRoots) - if here <= 0 { - room = 0 - } - if here < room { - room = here + if here > 0 { + rooms[i] = here } } pos := v.validated() - 1 // to reverse the first +1 in the loop @@ -766,18 +790,19 @@ validationsLoop: log.Trace("result validated", "count", v.validated(), "blockHash", v.lastValidGS.BlockHash) continue } - if room == 0 { + for currentSpawnerIndex < len(rooms) { + if rooms[currentSpawnerIndex] > 0 { + break + } + currentSpawnerIndex++ + } + if currentSpawnerIndex == len(rooms) { log.Trace("advanceValidations: no more room", "pos", pos) return nil, nil } - if v.MemoryFreeLimitChecker != nil { - exceeded, err := v.MemoryFreeLimitChecker.IsLimitExceeded() - if err != nil { - log.Error("error checking if free-memory limit exceeded using MemoryFreeLimitChecker", "err", err) - } - if exceeded { - return nil, nil - } + if v.isMemoryLimitExceeded() { + log.Warn("advanceValidations: aborting due to running low on memory") + return nil, nil } if currentStatus == Prepared { input, err := validationStatus.Entry.ToInput() @@ -793,11 +818,9 @@ validationsLoop: defer validatorPendingValidationsGauge.Dec(1) var runs []validator.ValidationRun for _, moduleRoot := range wasmRoots { - for i, spawner := range v.validationSpawners { - run := spawner.Launch(input, moduleRoot) - log.Trace("advanceValidations: launched", "pos", validationStatus.Entry.Pos, "moduleRoot", moduleRoot, "spawner", i) - runs = append(runs, run) - } + run := v.validationSpawners[currentSpawnerIndex].Launch(input, moduleRoot) + log.Trace("advanceValidations: launched", "pos", validationStatus.Entry.Pos, "moduleRoot", moduleRoot, "spawner", currentSpawnerIndex) + runs = append(runs, run) } validationCtx, cancel := context.WithCancel(ctx) validationStatus.Runs = runs @@ -819,7 +842,10 @@ validationsLoop: } nonBlockingTrigger(v.progressValidationsChan) }) - room-- + rooms[currentSpawnerIndex]-- + if rooms[currentSpawnerIndex] == 0 { + currentSpawnerIndex++ + } } } } @@ -1236,3 +1262,9 @@ func mockHash(h uint64) common.Hash { binary.BigEndian.PutUint64(result[24:32], h) return result } + +func (v *BlockValidator) GetValidated() arbutil.MessageIndex { + v.reorgMutex.RLock() + defer v.reorgMutex.RUnlock() + return v.validated() +} diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index d57d9c30d5..cbb51369fd 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -253,14 +253,18 @@ func NewStatelessBlockValidator( if config().Espresso && config().HotShotAddress == "" { return nil, errors.New("cannot create a new stateless block validator in espresso mode without a hotshot reader") } - valConfFetcher := func() *rpcclient.ClientConfig { return &config().ValidationServer } - valClient := server_api.NewValidationClient(valConfFetcher, stack) + validationSpawners := make([]validator.ValidationSpawner, len(config().ValidationServerConfigs)) + for i, serverConfig := range config().ValidationServerConfigs { + valConfFetcher := func() *rpcclient.ClientConfig { return &serverConfig } + validationSpawners[i] = server_api.NewValidationClient(valConfFetcher, stack) + } + valConfFetcher := func() *rpcclient.ClientConfig { return &config().ValidationServerConfigs[0] } execClient := server_api.NewExecutionClient(valConfFetcher, stack) validator := &StatelessBlockValidator{ config: config(), execSpawner: execClient, recorder: recorder, - validationSpawners: []validator.ValidationSpawner{valClient}, + validationSpawners: validationSpawners, hotShotReader: hotShotReader, inboxReader: inboxReader, inboxTracker: inbox, @@ -335,7 +339,10 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * e.Preimages[arbutil.EthVersionedHashPreimageType] = make(map[common.Hash][]byte) } for i, blob := range blobs { - e.Preimages[arbutil.EthVersionedHashPreimageType][versionedHashes[i]] = blob[:] + // Prevent aliasing `blob` when slicing it, as for range loops overwrite the same variable + // Won't be necessary after Go 1.22 with https://go.dev/blog/loopvar-preview + b := blob + e.Preimages[arbutil.EthVersionedHashPreimageType][versionedHashes[i]] = b[:] } } if arbstate.IsDASMessageHeaderByte(batch.Data[40]) { diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index cacbe3cee4..0fc127d0e3 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -80,7 +80,7 @@ func externalSignerTestCfg(addr common.Address) (*dataposter.ExternalSignerCfg, func testBatchPosterParallel(t *testing.T, useRedis bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - httpSrv, srv := externalsignertest.NewServer(ctx, t) + httpSrv, srv := externalsignertest.NewServer(t) cp, err := externalsignertest.CertPaths() if err != nil { t.Fatalf("Error getting cert paths: %v", err) @@ -166,6 +166,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { L1Reader: builder.L2.ConsensusNode.L1Reader, Inbox: builder.L2.ConsensusNode.InboxTracker, Streamer: builder.L2.ConsensusNode.TxStreamer, + VersionGetter: builder.L2.ExecNode, SyncMonitor: builder.L2.ConsensusNode.SyncMonitor, Config: func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, DeployInfo: builder.L2.ConsensusNode.DeployInfo, diff --git a/system_tests/blocks_reexecutor_test.go b/system_tests/blocks_reexecutor_test.go new file mode 100644 index 0000000000..c2941ddcc4 --- /dev/null +++ b/system_tests/blocks_reexecutor_test.go @@ -0,0 +1,87 @@ +package arbtest + +import ( + "context" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" + "github.com/offchainlabs/nitro/arbnode" + blocksreexecutor "github.com/offchainlabs/nitro/blocks_reexecutor" + "github.com/offchainlabs/nitro/execution/gethexec" +) + +func TestBlocksReExecutorModes(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + execConfig := gethexec.ConfigDefaultTest() + Require(t, execConfig.Validate()) + l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, nil, t.TempDir(), params.ArbitrumDevTestChainConfig(), &execConfig.Caching) + + execConfigFetcher := func() *gethexec.Config { return execConfig } + execNode, err := gethexec.CreateExecutionNode(ctx, stack, chainDb, blockchain, nil, execConfigFetcher) + Require(t, err) + + parentChainID := big.NewInt(1234) + feedErrChan := make(chan error, 10) + node, err := arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, parentChainID, nil) + Require(t, err) + err = node.TxStreamer.AddFakeInitMessage() + Require(t, err) + Require(t, node.Start(ctx)) + client := ClientForStack(t, stack) + + l2info.GenerateAccount("User2") + for i := 0; i < 100; i++ { + tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, common.Big1, nil) + err := client.SendTransaction(ctx, tx) + Require(t, err) + receipt, err := EnsureTxSucceeded(ctx, client, tx) + Require(t, err) + if have, want := receipt.BlockNumber.Uint64(), uint64(i)+1; have != want { + Fatal(t, "internal test error - tx got included in unexpected block number, have:", have, "want:", want) + } + } + + success := make(chan struct{}) + + // Reexecute blocks at mode full + go func() { + executorFull := blocksreexecutor.New(&blocksreexecutor.TestConfig, blockchain, feedErrChan) + executorFull.StopWaiter.Start(ctx, executorFull) + executorFull.Impl(ctx) + executorFull.StopAndWait() + success <- struct{}{} + }() + select { + case err := <-feedErrChan: + t.Errorf("error occurred: %v", err) + if node != nil { + node.StopAndWait() + } + t.FailNow() + case <-success: + } + + // Reexecute blocks at mode random + go func() { + c := &blocksreexecutor.TestConfig + c.Mode = "random" + executorRandom := blocksreexecutor.New(c, blockchain, feedErrChan) + executorRandom.StopWaiter.Start(ctx, executorRandom) + executorRandom.Impl(ctx) + executorRandom.StopAndWait() + success <- struct{}{} + }() + select { + case err := <-feedErrChan: + t.Errorf("error occurred: %v", err) + if node != nil { + node.StopAndWait() + } + t.FailNow() + case <-success: + } +} diff --git a/system_tests/common_test.go b/system_tests/common_test.go index cce63acbef..257848df53 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -35,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" @@ -183,6 +184,13 @@ func (b *NodeBuilder) DefaultConfig(t *testing.T, withL1 bool) *NodeBuilder { } func (b *NodeBuilder) Build(t *testing.T) func() { + if b.execConfig.RPC.MaxRecreateStateDepth == arbitrum.UninitializedMaxRecreateStateDepth { + if b.execConfig.Caching.Archive { + b.execConfig.RPC.MaxRecreateStateDepth = arbitrum.DefaultArchiveNodeMaxRecreateStateDepth + } else { + b.execConfig.RPC.MaxRecreateStateDepth = arbitrum.DefaultNonArchiveNodeMaxRecreateStateDepth + } + } if b.withL1 { l1, l2 := NewTestClient(b.ctx), NewTestClient(b.ctx) b.L2Info, l2.ConsensusNode, l2.Client, l2.Stack, b.L1Info, l1.L1Backend, l1.Client, l1.Stack = @@ -229,6 +237,13 @@ func (b *NodeBuilder) Build2ndNode(t *testing.T, params *SecondNodeParams) (*Tes if params.execConfig == nil { params.execConfig = b.execConfig } + if params.execConfig.RPC.MaxRecreateStateDepth == arbitrum.UninitializedMaxRecreateStateDepth { + if params.execConfig.Caching.Archive { + params.execConfig.RPC.MaxRecreateStateDepth = arbitrum.DefaultArchiveNodeMaxRecreateStateDepth + } else { + params.execConfig.RPC.MaxRecreateStateDepth = arbitrum.DefaultNonArchiveNodeMaxRecreateStateDepth + } + } l2 := NewTestClient(b.ctx) l2.Client, l2.ConsensusNode = @@ -541,15 +556,15 @@ func StaticFetcherFrom[T any](t *testing.T, config *T) func() *T { } func configByValidationNode(t *testing.T, clientConfig *arbnode.Config, valStack *node.Node) { - clientConfig.BlockValidator.ValidationServer.URL = valStack.WSEndpoint() - clientConfig.BlockValidator.ValidationServer.JWTSecret = "" + clientConfig.BlockValidator.ValidationServerConfigs[0].URL = valStack.WSEndpoint() + clientConfig.BlockValidator.ValidationServerConfigs[0].JWTSecret = "" } func AddDefaultValNode(t *testing.T, ctx context.Context, nodeConfig *arbnode.Config, useJit bool) { if !nodeConfig.ValidatorRequired() { return } - if nodeConfig.BlockValidator.ValidationServer.URL != "" { + if nodeConfig.BlockValidator.ValidationServerConfigs[0].URL != "" { return } conf := valnode.TestValidationConfig @@ -666,12 +681,14 @@ func DeployOnTestL1( ctx, l1Reader, &l1TransactionOpts, - l1info.GetAddress("Sequencer"), + []common.Address{l1info.GetAddress("Sequencer")}, + l1info.GetAddress("RollupOwner"), 0, arbnode.GenerateRollupConfig(false, locator.LatestWasmModuleRoot(), l1info.GetAddress("RollupOwner"), chainConfig, serializedChainConfig, common.Address{}), nativeToken, maxDataSize, hotshotAddr, + false, ) Require(t, err) l1info.SetContract("Bridge", addresses.Bridge) @@ -794,7 +811,7 @@ func createTestNodeOnL1WithConfigImpl( Require(t, err) currentNode, err = arbnode.CreateNode( ctx, l2stack, execNode, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, - addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, big.NewInt(1337), + addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, big.NewInt(1337), nil, ) Require(t, err) @@ -830,7 +847,7 @@ func createTestNode( execNode, err := gethexec.CreateExecutionNode(ctx, stack, chainDb, blockchain, nil, execConfigFetcher) Require(t, err) - currentNode, err := arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(nodeConfig), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, big.NewInt(1337)) + currentNode, err := arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(nodeConfig), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, big.NewInt(1337), nil) Require(t, err) // Give the node an init message @@ -930,11 +947,12 @@ func Create2ndNodeWithConfig( AddDefaultValNode(t, ctx, nodeConfig, true) Require(t, execConfig.Validate()) + Require(t, nodeConfig.Validate()) configFetcher := func() *gethexec.Config { return execConfig } currentExec, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, configFetcher) Require(t, err) - currentNode, err := arbnode.CreateNode(ctx, l2stack, currentExec, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, first.DeployInfo, &txOpts, &txOpts, dataSigner, feedErrChan, big.NewInt(1337)) + currentNode, err := arbnode.CreateNode(ctx, l2stack, currentExec, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, first.DeployInfo, &txOpts, &txOpts, dataSigner, feedErrChan, big.NewInt(1337), nil) Require(t, err) err = currentNode.Start(ctx) diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 747fd7d518..61606500f5 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -141,7 +141,7 @@ func TestDASRekey(t *testing.T) { l1NodeConfigA.DataAvailability.ParentChainNodeURL = "none" execA, err := gethexec.CreateExecutionNode(ctx, l2stackA, l2chainDb, l2blockchain, l1client, gethexec.ConfigDefaultTest) Require(t, err) - nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan, parentChainID) + nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan, parentChainID, nil) Require(t, err) Require(t, nodeA.Start(ctx)) l2clientA := ClientForStack(t, l2stackA) @@ -188,7 +188,7 @@ func TestDASRekey(t *testing.T) { Require(t, err) l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, backendConfigB) - nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan, parentChainID) + nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan, parentChainID, nil) Require(t, err) Require(t, nodeA.Start(ctx)) l2clientA := ClientForStack(t, l2stackA) @@ -256,7 +256,7 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { serverConfig := das.DataAvailabilityConfig{ Enable: true, - LocalCache: das.TestBigCacheConfig, + LocalCache: das.TestCacheConfig, LocalFileStorage: das.LocalFileStorageConfig{ Enable: true, @@ -321,7 +321,7 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) sequencerTxOptsPtr := &sequencerTxOpts - nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, feedErrChan, big.NewInt(1337)) + nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, feedErrChan, big.NewInt(1337), nil) Require(t, err) Require(t, nodeA.Start(ctx)) l2clientA := ClientForStack(t, l2stackA) diff --git a/system_tests/debugapi_test.go b/system_tests/debugapi_test.go index 4568e2809a..52a6bb25c4 100644 --- a/system_tests/debugapi_test.go +++ b/system_tests/debugapi_test.go @@ -2,12 +2,17 @@ package arbtest import ( "context" + "github.com/ethereum/go-ethereum/eth/tracers" "testing" + "encoding/json" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/rpc" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" ) func TestDebugAPI(t *testing.T) { @@ -35,4 +40,19 @@ func TestDebugAPI(t *testing.T) { err = l2rpc.CallContext(ctx, &dumpIt, "debug_accountRange", rpc.PendingBlockNumber, hexutil.Bytes{}, 10, true, true, false) Require(t, err) + arbSys, err := precompilesgen.NewArbSys(types.ArbSysAddress, builder.L2.Client) + Require(t, err) + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + tx, err := arbSys.WithdrawEth(&auth, common.Address{}) + Require(t, err) + receipt, err := builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + if len(receipt.Logs) != 1 { + Fatal(t, "Unexpected number of logs", len(receipt.Logs)) + } + + var result json.RawMessage + flatCallTracer := "flatCallTracer" + err = l2rpc.CallContext(ctx, &result, "debug_traceTransaction", tx.Hash(), &tracers.TraceConfig{Tracer: &flatCallTracer}) + Require(t, err) } diff --git a/system_tests/espresso-e2e/docker-compose.yaml b/system_tests/espresso-e2e/docker-compose.yaml index b933df76fe..bc4948ba26 100644 --- a/system_tests/espresso-e2e/docker-compose.yaml +++ b/system_tests/espresso-e2e/docker-compose.yaml @@ -59,6 +59,7 @@ services: - ESPRESSO_SEQUENCER_ETH_MNEMONIC - ESPRESSO_SEQUENCER_ETH_ACCOUNT_INDEX - ESPRESSO_SEQUENCER_PREFUNDED_BUILDER_ACCOUNTS + - ESPRESSO_SEQUENCER_STATE_PEERS=http://espresso-sequencer1:$ESPRESSO_SEQUENCER_API_PORT - RUST_LOG - RUST_LOG_FORMAT depends_on: @@ -95,6 +96,7 @@ services: - ESPRESSO_STATE_RELAY_SERVER_URL - RUST_LOG - RUST_LOG_FORMAT + - ESPRESSO_SEQUENCER_STATE_PEERS=http://espresso-sequencer0:$ESPRESSO_SEQUENCER_API_PORT depends_on: orchestrator: condition: service_healthy diff --git a/system_tests/espresso_e2e_test.go b/system_tests/espresso_e2e_test.go index 07eb53382c..eae295921c 100644 --- a/system_tests/espresso_e2e_test.go +++ b/system_tests/espresso_e2e_test.go @@ -77,16 +77,20 @@ func runEspresso(t *testing.T, ctx context.Context) func() { func createL2Node(ctx context.Context, t *testing.T, hotshot_url string, builder *NodeBuilder) (*TestClient, info, func()) { nodeConfig := arbnode.ConfigDefaultL1Test() builder.takeOwnership = false + nodeConfig.BatchPoster.Enable = false + nodeConfig.BlockValidator.Enable = false nodeConfig.DelayedSequencer.Enable = true nodeConfig.Sequencer = true nodeConfig.Espresso = true builder.execConfig.Sequencer.Enable = true builder.execConfig.Sequencer.Espresso = true - builder.execConfig.Sequencer.EspressoNamespace = 412346 + builder.execConfig.Sequencer.EspressoNamespace = builder.chainConfig.ChainID.Uint64() builder.execConfig.Sequencer.HotShotUrl = hotshot_url builder.chainConfig.ArbitrumChainParams.EnableEspresso = true + nodeConfig.Feed.Output.Enable = true + nodeConfig.Feed.Output.Addr = "0.0.0.0" builder.nodeConfig.Feed.Output.Enable = true builder.nodeConfig.Feed.Output.Port = fmt.Sprintf("%d", broadcastPort) @@ -156,6 +160,7 @@ func createL1ValidatorPosterNode(ctx context.Context, t *testing.T) (*NodeBuilde builder.nodeConfig.BatchPoster.PollInterval = 10 * time.Second builder.nodeConfig.BatchPoster.MaxDelay = -1000 * time.Hour builder.nodeConfig.BlockValidator.Enable = true + builder.nodeConfig.BlockValidator.ValidationPoll = 2 * time.Second builder.nodeConfig.BlockValidator.ValidationServer.URL = fmt.Sprintf("ws://127.0.0.1:%d", arbValidationPort) builder.nodeConfig.BlockValidator.HotShotAddress = hotShotAddress builder.nodeConfig.BlockValidator.Espresso = true @@ -209,6 +214,7 @@ func createStaker(ctx context.Context, t *testing.T, builder *NodeBuilder, incor config.BatchPoster.Enable = false config.Staker.Enable = false config.BlockValidator.Enable = true + config.BlockValidator.ValidationPoll = 2 * time.Second config.BlockValidator.HotShotAddress = hotShotAddress config.BlockValidator.Espresso = true config.BlockValidator.ValidationServer.URL = fmt.Sprintf("ws://127.0.0.1:%d", arbValidationPort) @@ -392,7 +398,14 @@ func TestEspressoE2E(t *testing.T) { Require(t, err) // Remember the number of messages - msgCnt, err := node.ConsensusNode.TxStreamer.GetMessageCount() + var msgCnt arbutil.MessageIndex + err = waitFor(t, ctx, func() bool { + cnt, err := node.ConsensusNode.TxStreamer.GetMessageCount() + Require(t, err) + msgCnt = cnt + log.Info("waiting for message count", "cnt", msgCnt) + return msgCnt > 1 + }) Require(t, err) // Wait for the number of validated messages to catch up @@ -445,7 +458,7 @@ func TestEspressoE2E(t *testing.T) { badStaker, blockValidatorB, cleanB := createStaker(ctx, t, builder, incorrectHeight) defer cleanB() - err = waitFor(t, ctx, func() bool { + err = waitForWith(t, ctx, 60*time.Second, 1*time.Second, func() bool { validatedA := blockValidatorA.Validated(t) validatedB := blockValidatorB.Validated(t) shouldValidated := arbutil.MessageIndex(incorrectHeight - 1) diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 26c52c31e5..3e6d1d368c 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -184,6 +184,7 @@ func makeBatch(t *testing.T, l2Node *arbnode.Node, l2Info *BlockchainTestInfo, b } func confirmLatestBlock(ctx context.Context, t *testing.T, l1Info *BlockchainTestInfo, backend arbutil.L1Interface) { + t.Helper() // With SimulatedBeacon running in on-demand block production mode, the // finalized block is considered to be be the nearest multiple of 32 less // than or equal to the block number. @@ -205,10 +206,10 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha _, err = EnsureTxSucceeded(ctx, l1Client, tx) Require(t, err) timeBounds := mocksgen.ISequencerInboxMaxTimeVariation{ - DelayBlocks: 10000, - FutureBlocks: 10000, - DelaySeconds: 10000, - FutureSeconds: 10000, + DelayBlocks: big.NewInt(10000), + FutureBlocks: big.NewInt(10000), + DelaySeconds: big.NewInt(10000), + FutureSeconds: big.NewInt(10000), } seqInboxAddr, tx, seqInbox, err := mocksgen.DeploySequencerInboxStub( &txOpts, @@ -218,6 +219,7 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha timeBounds, big.NewInt(117964), reader4844, + false, ) Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) @@ -285,7 +287,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall asserterExec, err := gethexec.CreateExecutionNode(ctx, asserterL2Stack, asserterL2ChainDb, asserterL2Blockchain, l1Backend, gethexec.ConfigDefaultTest) Require(t, err) parentChainID := big.NewInt(1337) - asserterL2, err := arbnode.CreateNode(ctx, asserterL2Stack, asserterExec, asserterL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, asserterRollupAddresses, nil, nil, nil, fatalErrChan, parentChainID) + asserterL2, err := arbnode.CreateNode(ctx, asserterL2Stack, asserterExec, asserterL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, asserterRollupAddresses, nil, nil, nil, fatalErrChan, parentChainID, nil) Require(t, err) err = asserterL2.Start(ctx) Require(t, err) @@ -296,7 +298,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall challengerRollupAddresses.SequencerInbox = challengerSeqInboxAddr challengerExec, err := gethexec.CreateExecutionNode(ctx, challengerL2Stack, challengerL2ChainDb, challengerL2Blockchain, l1Backend, gethexec.ConfigDefaultTest) Require(t, err) - challengerL2, err := arbnode.CreateNode(ctx, challengerL2Stack, challengerExec, challengerL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, &challengerRollupAddresses, nil, nil, nil, fatalErrChan, parentChainID) + challengerL2, err := arbnode.CreateNode(ctx, challengerL2Stack, challengerExec, challengerL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, &challengerRollupAddresses, nil, nil, nil, fatalErrChan, parentChainID, nil) Require(t, err) err = challengerL2.Start(ctx) Require(t, err) diff --git a/system_tests/meaningless_reorg_test.go b/system_tests/meaningless_reorg_test.go index e1715dc635..11b68b558b 100644 --- a/system_tests/meaningless_reorg_test.go +++ b/system_tests/meaningless_reorg_test.go @@ -27,7 +27,7 @@ func TestMeaninglessBatchReorg(t *testing.T) { Require(t, err) seqOpts := builder.L1Info.GetDefaultTransactOpts("Sequencer", ctx) - tx, err := seqInbox.AddSequencerL2BatchFromOrigin(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}) + tx, err := seqInbox.AddSequencerL2BatchFromOrigin0(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}, common.Big0, common.Big0) Require(t, err) batchReceipt, err := builder.L1.EnsureTxSucceeded(tx) Require(t, err) @@ -69,7 +69,7 @@ func TestMeaninglessBatchReorg(t *testing.T) { // Produce a new l1Block so that the batch ends up in a different l1Block than before builder.L1.TransferBalance(t, "User", "User", common.Big1, builder.L1Info) - tx, err = seqInbox.AddSequencerL2BatchFromOrigin(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}) + tx, err = seqInbox.AddSequencerL2BatchFromOrigin0(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}, common.Big0, common.Big0) Require(t, err) newBatchReceipt, err := builder.L1.EnsureTxSucceeded(tx) Require(t, err) diff --git a/system_tests/precompile_test.go b/system_tests/precompile_test.go index e0a9c2ce78..0ad0f8f1e4 100644 --- a/system_tests/precompile_test.go +++ b/system_tests/precompile_test.go @@ -5,6 +5,7 @@ package arbtest import ( "context" + "fmt" "math/big" "testing" @@ -67,7 +68,9 @@ func TestCustomSolidityErrors(t *testing.T) { Fatal(t, "customRevert call should have errored") } observedMessage := customError.Error() - expectedMessage := "execution reverted: error Custom(1024, This spider family wards off bugs: /\\oo/\\ //\\(oo)/\\ /\\oo/\\, true)" + expectedError := "Custom(1024, This spider family wards off bugs: /\\oo/\\ //\\(oo)/\\ /\\oo/\\, true)" + // The first error is server side. The second error is client side ABI decoding. + expectedMessage := fmt.Sprintf("execution reverted: error %v: %v", expectedError, expectedError) if observedMessage != expectedMessage { Fatal(t, observedMessage) } @@ -79,7 +82,8 @@ func TestCustomSolidityErrors(t *testing.T) { Fatal(t, "out of range ArbBlockHash call should have errored") } observedMessage = customError.Error() - expectedMessage = "execution reverted: error InvalidBlockNumber(1000000000, 1)" + expectedError = "InvalidBlockNumber(1000000000, 1)" + expectedMessage = fmt.Sprintf("execution reverted: error %v: %v", expectedError, expectedError) if observedMessage != expectedMessage { Fatal(t, observedMessage) } diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index f5bdca0970..777ed17961 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -2,31 +2,30 @@ package arbtest import ( "context" + "encoding/binary" "errors" + "fmt" "math/big" "strings" + "sync" "testing" + "time" "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/trie" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util" ) -func prepareNodeWithHistory(t *testing.T, ctx context.Context, execConfig *gethexec.Config, txCount uint64) (node *arbnode.Node, executionNode *gethexec.ExecutionNode, l2client *ethclient.Client, cancel func()) { - t.Helper() - builder := NewNodeBuilder(ctx).DefaultConfig(t, true) - builder.execConfig = execConfig - cleanup := builder.Build(t) - builder.L2Info.GenerateAccount("User2") +func makeSomeTransfers(t *testing.T, ctx context.Context, builder *NodeBuilder, txCount uint64) { var txs []*types.Transaction for i := uint64(0); i < txCount; i++ { tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) @@ -38,8 +37,16 @@ func prepareNodeWithHistory(t *testing.T, ctx context.Context, execConfig *gethe _, err := builder.L2.EnsureTxSucceeded(tx) Require(t, err) } +} - return builder.L2.ConsensusNode, builder.L2.ExecNode, builder.L2.Client, cleanup +func prepareNodeWithHistory(t *testing.T, ctx context.Context, execConfig *gethexec.Config, txCount uint64) (*NodeBuilder, func()) { + t.Helper() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig = execConfig + cleanup := builder.Build(t) + builder.L2Info.GenerateAccount("User2") + makeSomeTransfers(t, ctx, builder, txCount) + return builder, cleanup } func fillHeaderCache(t *testing.T, bc *core.BlockChain, from, to uint64) { @@ -89,17 +96,19 @@ func removeStatesFromDb(t *testing.T, bc *core.BlockChain, db ethdb.Database, fr func TestRecreateStateForRPCNoDepthLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := gethexec.ConfigDefaultTest() - nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching.Archive = true + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true + execConfig.Caching.SnapshotCache = 0 // disable snapshots // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there - nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 - nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) + execConfig.Caching.TrieCleanCache = 0 + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 32) defer cancelNode() + execNode, l2client := builder.L2.ExecNode, builder.L2.Client bc := execNode.Backend.ArbInterface().BlockChain() db := execNode.Backend.ChainDb() @@ -123,17 +132,18 @@ func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() depthGasLimit := int64(256 * util.NormalizeL2GasForL1GasInitial(800_000, params.GWei)) - nodeConfig := gethexec.ConfigDefaultTest() - nodeConfig.RPC.MaxRecreateStateDepth = depthGasLimit - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching.Archive = true + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = depthGasLimit + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there - nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 - nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) + execConfig.Caching.TrieCleanCache = 0 + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 32) defer cancelNode() + execNode, l2client := builder.L2.ExecNode, builder.L2.Client bc := execNode.Backend.ArbInterface().BlockChain() db := execNode.Backend.ChainDb() @@ -157,17 +167,18 @@ func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { func TestRecreateStateForRPCDepthLimitExceeded(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := gethexec.ConfigDefaultTest() - nodeConfig.RPC.MaxRecreateStateDepth = int64(200) - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching.Archive = true + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = int64(200) + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there - nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 - nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) + execConfig.Caching.TrieCleanCache = 0 + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 32) defer cancelNode() + execNode, l2client := builder.L2.ExecNode, builder.L2.Client bc := execNode.Backend.ArbInterface().BlockChain() db := execNode.Backend.ChainDb() @@ -191,17 +202,18 @@ func TestRecreateStateForRPCMissingBlockParent(t *testing.T) { var headerCacheLimit uint64 = 512 ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := gethexec.ConfigDefaultTest() - nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching.Archive = true + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there - nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 - nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, headerCacheLimit+5) + execConfig.Caching.TrieCleanCache = 0 + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, headerCacheLimit+5) defer cancelNode() + execNode, l2client := builder.L2.ExecNode, builder.L2.Client bc := execNode.Backend.ArbInterface().BlockChain() db := execNode.Backend.ChainDb() @@ -236,16 +248,17 @@ func TestRecreateStateForRPCBeyondGenesis(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := gethexec.ConfigDefaultTest() - nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching.Archive = true + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there - nodeConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 - nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, 32) + execConfig.Caching.TrieCleanCache = 0 + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 32) + execNode, l2client := builder.L2.ExecNode, builder.L2.Client defer cancelNode() bc := execNode.Backend.ArbInterface().BlockChain() db := execNode.Backend.ChainDb() @@ -271,17 +284,18 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { var blockCacheLimit uint64 = 256 ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := gethexec.ConfigDefaultTest() - nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching.Archive = true + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching.Archive = true // disable trie/Database.cleans cache, so as states removed from ChainDb won't be cached there - nodeConfig.Caching.TrieCleanCache = 0 + execConfig.Caching.TrieCleanCache = 0 - nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 - nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 - _, execNode, l2client, cancelNode := prepareNodeWithHistory(t, ctx, nodeConfig, blockCacheLimit+4) + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 0 + execConfig.Caching.MaxAmountOfGasToSkipStateSaving = 0 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, blockCacheLimit+4) + execNode, l2client := builder.L2.ExecNode, builder.L2.Client defer cancelNode() bc := execNode.Backend.ArbInterface().BlockChain() db := execNode.Backend.ChainDb() @@ -306,7 +320,7 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { hash := rawdb.ReadCanonicalHash(db, lastBlock) Fatal(t, "Didn't fail to get balance at block:", lastBlock, " with hash:", hash, ", lastBlock:", lastBlock) } - if !strings.Contains(err.Error(), "block not found while recreating") { + if !strings.Contains(err.Error(), fmt.Sprintf("block #%d not found", blockBodyToRemove)) { Fatal(t, "Failed with unexpected error: \"", err, "\", at block:", lastBlock, "lastBlock:", lastBlock) } } @@ -335,7 +349,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig Require(t, err) parentChainID := big.NewInt(1337) - node, err := arbnode.CreateNode(ctx1, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, parentChainID) + node, err := arbnode.CreateNode(ctx1, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, parentChainID, nil) Require(t, err) err = node.TxStreamer.AddFakeInitMessage() Require(t, err) @@ -358,9 +372,13 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig Fatal(t, "internal test error - tx got included in unexpected block number, have:", have, "want:", want) } } + bc := execNode.Backend.ArbInterface().BlockChain() genesis := uint64(0) - lastBlock, err := client.BlockNumber(ctx) - Require(t, err) + currentHeader := bc.CurrentBlock() + if currentHeader == nil { + Fatal(t, "missing current block") + } + lastBlock := currentHeader.Number.Uint64() if want := genesis + uint64(txCount); lastBlock < want { Fatal(t, "internal test error - not enough blocks produced during preparation, want:", want, "have:", lastBlock) } @@ -376,12 +394,12 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig execNode, err = gethexec.CreateExecutionNode(ctx1, stack, chainDb, blockchain, nil, execConfigFetcher) Require(t, err) - node, err = arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, node.DeployInfo, nil, nil, nil, feedErrChan, parentChainID) + node, err = arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, node.DeployInfo, nil, nil, nil, feedErrChan, parentChainID, nil) Require(t, err) Require(t, node.Start(ctx)) client = ClientForStack(t, stack) defer node.StopAndWait() - bc := execNode.Backend.ArbInterface().BlockChain() + bc = execNode.Backend.ArbInterface().BlockChain() gas := skipGas blocks := skipBlocks for i := genesis + 1; i <= genesis+uint64(txCount); i++ { @@ -391,8 +409,8 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig continue } gas += block.GasUsed() - blocks++ _, err := bc.StateAt(block.Root()) + blocks++ if (skipBlocks == 0 && skipGas == 0) || (skipBlocks != 0 && blocks > skipBlocks) || (skipGas != 0 && gas > skipGas) { if err != nil { t.Log("blocks:", blocks, "skipBlocks:", skipBlocks, "gas:", gas, "skipGas:", skipGas) @@ -401,13 +419,17 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig gas = 0 blocks = 0 } else { + if int(i) >= int(lastBlock)-int(cacheConfig.BlockCount) { + // skipping nonexistence check - the state might have been saved on node shutdown + continue + } if err == nil { t.Log("blocks:", blocks, "skipBlocks:", skipBlocks, "gas:", gas, "skipGas:", skipGas) Fatal(t, "state shouldn't be available, root:", block.Root(), "blockNumber:", i, "blockHash", block.Hash()) } expectedErr := &trie.MissingNodeError{} if !errors.As(err, &expectedErr) { - Fatal(t, "getting state failed with unexpected error, root:", block.Root(), "blockNumber:", i, "blockHash", block.Hash()) + Fatal(t, "getting state failed with unexpected error, root:", block.Root(), "blockNumber:", i, "blockHash:", block.Hash(), "err:", err) } } } @@ -429,7 +451,10 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { cacheConfig := gethexec.DefaultCachingConfig cacheConfig.Archive = true - //// test defaults + cacheConfig.SnapshotCache = 0 // disable snapshots + cacheConfig.BlockAge = 0 // use only Caching.BlockCount to keep only last N blocks in dirties cache, no matter how new they are + + // test defaults testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512) cacheConfig.MaxNumberOfBlocksToSkipStateSaving = 127 @@ -444,8 +469,10 @@ func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { cacheConfig.MaxAmountOfGasToSkipStateSaving = 15 * 1000 * 1000 testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 512) - // one test block ~ 925000 gas - testBlockGas := uint64(925000) + // lower number of blocks in triegc below 100 blocks, to be able to check for nonexistence in testSkippingSavingStateAndRecreatingAfterRestart (it doesn't check last BlockCount blocks as some of them may be persisted on node shutdown) + cacheConfig.BlockCount = 16 + + testBlockGas := uint64(925000) // one test block ~ 925000 gas skipBlockValues := []uint64{0, 1, 2, 3, 5, 21, 51, 100, 101} var skipGasValues []uint64 for _, i := range skipBlockValues { @@ -459,3 +486,206 @@ func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { } } } + +func TestGettingStateForRPCFullNode(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + execConfig := gethexec.ConfigDefaultTest() + execConfig.Caching.SnapshotCache = 0 // disable snapshots + execConfig.Caching.BlockAge = 0 // use only Caching.BlockCount to keep only last N blocks in dirties cache, no matter how new they are + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 16) + execNode, _ := builder.L2.ExecNode, builder.L2.Client + defer cancelNode() + bc := execNode.Backend.ArbInterface().BlockChain() + api := execNode.Backend.APIBackend() + + header := bc.CurrentBlock() + if header == nil { + Fatal(t, "failed to get current block header") + } + state, _, err := api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) + Require(t, err) + addr := builder.L2Info.GetAddress("User2") + exists := state.Exist(addr) + err = state.Error() + Require(t, err) + if !exists { + Fatal(t, "User2 address does not exist in the state") + } + // Get the state again to avoid caching + state, _, err = api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) + Require(t, err) + + blockCountRequiredToFlushDirties := builder.execConfig.Caching.BlockCount + makeSomeTransfers(t, ctx, builder, blockCountRequiredToFlushDirties) + + exists = state.Exist(addr) + err = state.Error() + Require(t, err) + if !exists { + Fatal(t, "User2 address does not exist in the state") + } +} + +func TestGettingStateForRPCHybridArchiveNode(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + execConfig := gethexec.ConfigDefaultTest() + execConfig.Caching.Archive = true + execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 128 + execConfig.Caching.BlockCount = 128 + execConfig.Caching.SnapshotCache = 0 // disable snapshots + execConfig.Caching.BlockAge = 0 // use only Caching.BlockCount to keep only last N blocks in dirties cache, no matter how new they are + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + builder, cancelNode := prepareNodeWithHistory(t, ctx, execConfig, 16) + execNode, _ := builder.L2.ExecNode, builder.L2.Client + defer cancelNode() + bc := execNode.Backend.ArbInterface().BlockChain() + api := execNode.Backend.APIBackend() + + header := bc.CurrentBlock() + if header == nil { + Fatal(t, "failed to get current block header") + } + state, _, err := api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) + Require(t, err) + addr := builder.L2Info.GetAddress("User2") + exists := state.Exist(addr) + err = state.Error() + Require(t, err) + if !exists { + Fatal(t, "User2 address does not exist in the state") + } + // Get the state again to avoid caching + state, _, err = api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) + Require(t, err) + + blockCountRequiredToFlushDirties := builder.execConfig.Caching.BlockCount + makeSomeTransfers(t, ctx, builder, blockCountRequiredToFlushDirties) + + exists = state.Exist(addr) + err = state.Error() + Require(t, err) + if !exists { + Fatal(t, "User2 address does not exist in the state") + } +} + +func TestStateAndHeaderForRecentBlock(t *testing.T) { + threads := 32 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig.Caching.Archive = true + builder.execConfig.RPC.MaxRecreateStateDepth = 0 + cleanup := builder.Build(t) + defer cleanup() + builder.L2Info.GenerateAccount("User2") + + errors := make(chan error, threads+1) + senderDone := make(chan struct{}) + go func() { + defer close(senderDone) + for ctx.Err() == nil { + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, new(big.Int).Lsh(big.NewInt(1), 128), nil) + err := builder.L2.Client.SendTransaction(ctx, tx) + if ctx.Err() != nil { + return + } + if err != nil { + errors <- err + return + } + _, err = builder.L2.EnsureTxSucceeded(tx) + if ctx.Err() != nil { + return + } + if err != nil { + errors <- err + return + } + time.Sleep(10 * time.Millisecond) + } + }() + api := builder.L2.ExecNode.Backend.APIBackend() + db := builder.L2.ExecNode.Backend.ChainDb() + i := 1 + var mtx sync.RWMutex + var wgCallers sync.WaitGroup + for j := 0; j < threads && ctx.Err() == nil; j++ { + wgCallers.Add(1) + go func() { + defer wgCallers.Done() + mtx.RLock() + blockNumber := i + mtx.RUnlock() + for blockNumber < 300 && ctx.Err() == nil { + prefix := make([]byte, 8) + binary.BigEndian.PutUint64(prefix, uint64(blockNumber)) + prefix = append([]byte("b"), prefix...) + it := db.NewIterator(prefix, nil) + defer it.Release() + if it.Next() { + key := it.Key() + if len(key) != len(prefix)+common.HashLength { + Fatal(t, "Wrong key length, have:", len(key), "want:", len(prefix)+common.HashLength) + } + blockHash := common.BytesToHash(key[len(prefix):]) + start := time.Now() + for ctx.Err() == nil { + _, _, err := api.StateAndHeaderByNumberOrHash(ctx, rpc.BlockNumberOrHash{BlockHash: &blockHash}) + if err == nil { + mtx.Lock() + if blockNumber == i { + i++ + } + mtx.Unlock() + break + } + if ctx.Err() != nil { + return + } + if !strings.Contains(err.Error(), "ahead of current block") { + errors <- err + return + } + if time.Since(start) > 5*time.Second { + errors <- fmt.Errorf("timeout - failed to get state for more then 5 seconds, block: %d, err: %w", blockNumber, err) + return + } + } + } + it.Release() + mtx.RLock() + blockNumber = i + mtx.RUnlock() + } + }() + } + callersDone := make(chan struct{}) + go func() { + wgCallers.Wait() + close(callersDone) + }() + + select { + case <-callersDone: + cancel() + case <-senderDone: + cancel() + case err := <-errors: + t.Error(err) + cancel() + } + <-callersDone + <-senderDone + close(errors) + for err := range errors { + if err != nil { + t.Error(err) + } + } +} diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index 4619671700..be0ecc590f 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -121,7 +121,8 @@ func TestRetryableNoExist(t *testing.T) { arbRetryableTx, err := precompilesgen.NewArbRetryableTx(common.HexToAddress("6e"), builder.L2.Client) Require(t, err) _, err = arbRetryableTx.GetTimeout(&bind.CallOpts{}, common.Hash{}) - if err.Error() != "execution reverted: error NoTicketWithID()" { + // The first error is server side. The second error is client side ABI decoding. + if err.Error() != "execution reverted: error NoTicketWithID(): NoTicketWithID()" { Fatal(t, "didn't get expected NoTicketWithID error") } } @@ -156,7 +157,10 @@ func TestSubmitRetryableImmediateSuccess(t *testing.T) { ) Require(t, err, "failed to estimate retryable submission") estimate := tx.Gas() - colors.PrintBlue("estimate: ", estimate) + expectedEstimate := params.TxGas + params.TxDataNonZeroGasEIP2028*4 + if estimate != expectedEstimate { + t.Errorf("estimated retryable ticket at %v gas but expected %v", estimate, expectedEstimate) + } // submit & auto redeem the retryable using the gas estimate usertxoptsL1 := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) @@ -335,6 +339,12 @@ func TestSubmitRetryableFailThenRetry(t *testing.T) { receipt, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) + redemptionL2Gas := receipt.GasUsed - receipt.GasUsedForL1 + var maxRedemptionL2Gas uint64 = 1_000_000 + if redemptionL2Gas > maxRedemptionL2Gas { + t.Errorf("manual retryable redemption used %v gas, more than expected max %v gas", redemptionL2Gas, maxRedemptionL2Gas) + } + retryTxId := receipt.Logs[0].Topics[2] // check the receipt for the retry diff --git a/system_tests/ipc_test.go b/system_tests/rpc_test.go similarity index 50% rename from system_tests/ipc_test.go rename to system_tests/rpc_test.go index 511a608e67..357cb8e4c1 100644 --- a/system_tests/ipc_test.go +++ b/system_tests/rpc_test.go @@ -7,8 +7,10 @@ import ( "context" "path/filepath" "testing" + "time" "github.com/ethereum/go-ethereum/ethclient" + "github.com/offchainlabs/nitro/solgen/go/mocksgen" ) func TestIpcRpc(t *testing.T) { @@ -25,3 +27,23 @@ func TestIpcRpc(t *testing.T) { _, err := ethclient.Dial(ipcPath) Require(t, err) } + +func TestPendingBlockTimeAndNumberAdvance(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() + + auth := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) + + _, _, testTimeAndNr, err := mocksgen.DeployPendingBlkTimeAndNrAdvanceCheck(&auth, builder.L2.Client) + Require(t, err) + + time.Sleep(1 * time.Second) + + _, err = testTimeAndNr.IsAdvancing(&auth) + Require(t, err) +} diff --git a/system_tests/seqinbox_test.go b/system_tests/seqinbox_test.go index c4dd17ef53..e00bda8e84 100644 --- a/system_tests/seqinbox_test.go +++ b/system_tests/seqinbox_test.go @@ -355,7 +355,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { if i%5 == 0 { tx, err = seqInbox.AddSequencerL2Batch(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr, big.NewInt(0), big.NewInt(0)) } else { - tx, err = seqInbox.AddSequencerL2BatchFromOrigin(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr) + tx, err = seqInbox.AddSequencerL2BatchFromOrigin0(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr, common.Big0, common.Big0) } Require(t, err) txRes, err := builder.L1.EnsureTxSucceeded(tx) diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index b87a8b9736..5798580a2c 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -61,7 +61,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) t.Parallel() ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() - httpSrv, srv := externalsignertest.NewServer(ctx, t) + httpSrv, srv := externalsignertest.NewServer(t) cp, err := externalsignertest.CertPaths() if err != nil { t.Fatalf("Error getting cert paths: %v", err) diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index 28bcbec9b4..2c11435485 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -41,7 +41,7 @@ func BuildBlock( if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, nil, arbstate.KeysetValidate) + inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, arbstate.KeysetValidate) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) @@ -121,6 +121,9 @@ func (c noopChainContext) GetHeader(common.Hash, uint64) *types.Header { func FuzzStateTransition(f *testing.F) { f.Fuzz(func(t *testing.T, compressSeqMsg bool, seqMsg []byte, delayedMsg []byte) { + if len(seqMsg) > 0 && arbstate.IsL1AuthenticatedMessageHeaderByte(seqMsg[0]) { + return + } chainDb := rawdb.NewMemoryDatabase() chainConfig := params.ArbitrumRollupGoerliTestnetChainConfig() serializedChainConfig, err := json.Marshal(chainConfig) diff --git a/system_tests/staterecovery_test.go b/system_tests/staterecovery_test.go new file mode 100644 index 0000000000..ac30038cc1 --- /dev/null +++ b/system_tests/staterecovery_test.go @@ -0,0 +1,95 @@ +package arbtest + +import ( + "context" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/trie" + "github.com/offchainlabs/nitro/cmd/staterecovery" + "github.com/offchainlabs/nitro/execution/gethexec" +) + +func TestRectreateMissingStates(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig.Caching.Archive = true + builder.execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 16 + builder.execConfig.Caching.SnapshotCache = 0 // disable snapshots + _ = builder.Build(t) + l2cleanupDone := false + defer func() { + if !l2cleanupDone { + builder.L2.cleanup() + } + builder.L1.cleanup() + }() + builder.L2Info.GenerateAccount("User2") + var txs []*types.Transaction + for i := uint64(0); i < 200; i++ { + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) + txs = append(txs, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) + Require(t, err) + } + for _, tx := range txs { + _, err := builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + } + lastBlock, err := builder.L2.Client.BlockNumber(ctx) + Require(t, err) + l2cleanupDone = true + builder.L2.cleanup() + t.Log("stopped l2 node") + func() { + stack, err := node.New(builder.l2StackConfig) + Require(t, err) + defer stack.Close() + chainDb, err := stack.OpenDatabase("chaindb", 0, 0, "", false) + Require(t, err) + defer chainDb.Close() + cacheConfig := gethexec.DefaultCacheConfigFor(stack, &gethexec.DefaultCachingConfig) + bc, err := gethexec.GetBlockChain(chainDb, cacheConfig, builder.chainConfig, builder.execConfig.TxLookupLimit) + Require(t, err) + err = staterecovery.RecreateMissingStates(chainDb, bc, cacheConfig, 1) + Require(t, err) + }() + + testClient, cleanup := builder.Build2ndNode(t, &SecondNodeParams{stackConfig: builder.l2StackConfig}) + defer cleanup() + + currentBlock := uint64(0) + // wait for the chain to catch up + for currentBlock < lastBlock { + currentBlock, err = testClient.Client.BlockNumber(ctx) + Require(t, err) + time.Sleep(20 * time.Millisecond) + } + + currentBlock, err = testClient.Client.BlockNumber(ctx) + Require(t, err) + bc := testClient.ExecNode.Backend.ArbInterface().BlockChain() + triedb := bc.StateCache().TrieDB() + for i := uint64(0); i <= currentBlock; i++ { + header := bc.GetHeaderByNumber(i) + _, err := bc.StateAt(header.Root) + Require(t, err) + tr, err := trie.New(trie.TrieID(header.Root), triedb) + Require(t, err) + it, err := tr.NodeIterator(nil) + Require(t, err) + for it.Next(true) { + } + Require(t, it.Error()) + } + + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) + err = testClient.Client.SendTransaction(ctx, tx) + Require(t, err) + _, err = testClient.EnsureTxSucceeded(tx) + Require(t, err) +} diff --git a/system_tests/unsupported_txtypes_test.go b/system_tests/unsupported_txtypes_test.go new file mode 100644 index 0000000000..4c3c8661c8 --- /dev/null +++ b/system_tests/unsupported_txtypes_test.go @@ -0,0 +1,133 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +// race detection makes things slow and miss timeouts +//go:build !race +// +build !race + +package arbtest + +import ( + "context" + "errors" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + "github.com/holiman/uint256" +) + +func TestBlobAndInternalTxsReject(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() + + builder.L2Info.GenerateAccount("User") + builder.L2Info.GenerateAccount("User2") + l2ChainID := builder.L2Info.Signer.ChainID() + + privKey := GetTestKeyForAccountName(t, "User") + txDataBlob := &types.BlobTx{ + ChainID: &uint256.Int{l2ChainID.Uint64()}, + Nonce: 0, + GasFeeCap: &uint256.Int{params.GWei}, + Gas: 500000, + To: builder.L2Info.GetAddress("User2"), + Value: &uint256.Int{0}, + } + blobTx, err := types.SignNewTx(privKey, types.NewCancunSigner(l2ChainID), txDataBlob) + Require(t, err) + err = builder.L2.Client.SendTransaction(ctx, blobTx) + if err == nil && !errors.Is(err, types.ErrTxTypeNotSupported) { + t.Fatalf("did not receive expected error when submitting blob transaction. Want: %v, Got: %v", types.ErrTxTypeNotSupported, err) + } + + txDataInternal := &types.ArbitrumInternalTx{ChainId: l2ChainID} + internalTx := types.NewTx(txDataInternal) + err = builder.L2.Client.SendTransaction(ctx, internalTx) + if err == nil && !errors.Is(err, types.ErrTxTypeNotSupported) { + t.Fatalf("did not receive expected error when submitting arbitrum internal transaction. Want: %v, Got: %v", types.ErrTxTypeNotSupported, err) + } +} +func TestBlobAndInternalTxsAsDelayedMsgReject(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() + + builder.L2Info.GenerateAccount("User2") + + l1Txs := make([]*types.Transaction, 0, 4) + txAcceptStatus := make(map[common.Hash]bool, 4) + l2ChainID := builder.L2Info.Signer.ChainID() + + privKey := GetTestKeyForAccountName(t, "Owner") + txDataBlob := &types.BlobTx{ + ChainID: &uint256.Int{l2ChainID.Uint64()}, + Nonce: 0, + GasFeeCap: &uint256.Int{params.GWei}, + Gas: 500000, + To: builder.L2Info.GetAddress("User2"), + Value: &uint256.Int{0}, + } + delayedBlobTx, err := types.SignNewTx(privKey, types.NewCancunSigner(l2ChainID), txDataBlob) + Require(t, err) + txAcceptStatus[delayedBlobTx.Hash()] = false + l1TxBlob := WrapL2ForDelayed(t, delayedBlobTx, builder.L1Info, "User", 100000) + l1Txs = append(l1Txs, l1TxBlob) + + txDataInternal := &types.ArbitrumInternalTx{ChainId: l2ChainID} + delayedInternalTx := types.NewTx(txDataInternal) + txAcceptStatus[delayedInternalTx.Hash()] = false + l1TxInternal := WrapL2ForDelayed(t, delayedInternalTx, builder.L1Info, "User", 100000) + l1Txs = append(l1Txs, l1TxInternal) + + delayedTx1 := builder.L2Info.PrepareTx("Owner", "User2", 50001, big.NewInt(10000), nil) + txAcceptStatus[delayedTx1.Hash()] = false + l1tx := WrapL2ForDelayed(t, delayedTx1, builder.L1Info, "User", 100000) + l1Txs = append(l1Txs, l1tx) + + delayedTx2 := builder.L2Info.PrepareTx("Owner", "User2", 50001, big.NewInt(10000), nil) + txAcceptStatus[delayedTx2.Hash()] = false + l1tx = WrapL2ForDelayed(t, delayedTx2, builder.L1Info, "User", 100000) + l1Txs = append(l1Txs, l1tx) + + errs := builder.L1.L1Backend.TxPool().Add(l1Txs, true, false) + for _, err := range errs { + Require(t, err) + } + + confirmLatestBlock(ctx, t, builder.L1Info, builder.L1.Client) + for _, tx := range l1Txs { + _, err = builder.L1.EnsureTxSucceeded(tx) + Require(t, err) + } + + blocknum, err := builder.L2.Client.BlockNumber(ctx) + Require(t, err) + for i := int64(0); i <= int64(blocknum); i++ { + block, err := builder.L2.Client.BlockByNumber(ctx, big.NewInt(i)) + Require(t, err) + for _, tx := range block.Transactions() { + if _, ok := txAcceptStatus[tx.Hash()]; ok { + txAcceptStatus[tx.Hash()] = true + } + } + } + if !txAcceptStatus[delayedTx1.Hash()] || !txAcceptStatus[delayedTx2.Hash()] { + t.Fatalf("transaction of valid transaction type wasn't accepted as a delayed message") + } + if txAcceptStatus[delayedBlobTx.Hash()] { + t.Fatalf("blob transaction was successfully accepted as a delayed message") + } + if txAcceptStatus[delayedInternalTx.Hash()] { + t.Fatalf("arbitrum internal transaction was successfully accepted as a delayed message") + } +} diff --git a/util/arbmath/bips.go b/util/arbmath/bips.go index 1e788df064..83c7a61ec2 100644 --- a/util/arbmath/bips.go +++ b/util/arbmath/bips.go @@ -36,3 +36,10 @@ func UintMulByBips(value uint64, bips Bips) uint64 { func SaturatingCastToBips(value uint64) Bips { return Bips(SaturatingCast(value)) } + +// BigDivToBips returns dividend/divisor as bips, saturating if out of bounds +func BigDivToBips(dividend, divisor *big.Int) Bips { + value := BigMulByInt(dividend, int64(OneInBips)) + value.Div(value, divisor) + return Bips(BigToUintSaturating(value)) +} diff --git a/util/blobs/blobs.go b/util/blobs/blobs.go index 2852f2b29f..405c776bad 100644 --- a/util/blobs/blobs.go +++ b/util/blobs/blobs.go @@ -29,6 +29,9 @@ func fillBlobBytes(blob []byte, data []byte) []byte { // The number of bits in a BLS scalar that aren't part of a whole byte. const spareBlobBits = 6 // = math.floor(math.log2(BLS_MODULUS)) % 8 +// The number of bytes encodable in a blob with the current encoding scheme. +const BlobEncodableData = 254 * params.BlobTxFieldElementsPerBlob / 8 + func fillBlobBits(blob []byte, data []byte) ([]byte, error) { var acc uint16 accBits := 0 diff --git a/arbnode/blob_reader.go b/util/headerreader/blob_client.go similarity index 58% rename from arbnode/blob_reader.go rename to util/headerreader/blob_client.go index 1424285832..8989a321c7 100644 --- a/arbnode/blob_reader.go +++ b/util/headerreader/blob_client.go @@ -1,15 +1,17 @@ -// Copyright 2023, Offchain Labs, Inc. +// Copyright 2023-2024, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package arbnode +package headerreader import ( "context" "encoding/json" + "errors" "fmt" "io" "net/http" "net/url" + "os" "path" "github.com/ethereum/go-ethereum/common" @@ -24,36 +26,59 @@ import ( ) type BlobClient struct { - ec arbutil.L1Interface - beaconUrl *url.URL - httpClient *http.Client + ec arbutil.L1Interface + beaconUrl *url.URL + httpClient *http.Client + authorization string - // The genesis time time and seconds per slot won't change so only request them once. - cachedGenesisTime uint64 - cachedSecondsPerSlot uint64 + // Filled in in Initialize() + genesisTime uint64 + secondsPerSlot uint64 + + // Directory to save the fetched blobs + blobDirectory string } type BlobClientConfig struct { - BeaconChainUrl string `koanf:"beacon-chain-url"` + BeaconUrl string `koanf:"beacon-url"` + BlobDirectory string `koanf:"blob-directory"` + Authorization string `koanf:"authorization"` } var DefaultBlobClientConfig = BlobClientConfig{ - BeaconChainUrl: "", + BeaconUrl: "", + BlobDirectory: "", + Authorization: "", } func BlobClientAddOptions(prefix string, f *pflag.FlagSet) { - f.String(prefix+".beacon-chain-url", DefaultBlobClientConfig.BeaconChainUrl, "Beacon Chain url to use for fetching blobs") + f.String(prefix+".beacon-url", DefaultBlobClientConfig.BeaconUrl, "Beacon Chain RPC URL to use for fetching blobs (normally on port 3500)") + f.String(prefix+".blob-directory", DefaultBlobClientConfig.BlobDirectory, "Full path of the directory to save fetched blobs") + f.String(prefix+".authorization", DefaultBlobClientConfig.Authorization, "Value to send with the HTTP Authorization: header for Beacon REST requests, must include both scheme and scheme parameters") } func NewBlobClient(config BlobClientConfig, ec arbutil.L1Interface) (*BlobClient, error) { - beaconUrl, err := url.Parse(config.BeaconChainUrl) + beaconUrl, err := url.Parse(config.BeaconUrl) if err != nil { return nil, fmt.Errorf("failed to parse beacon chain URL: %w", err) } + if config.BlobDirectory != "" { + if _, err = os.Stat(config.BlobDirectory); err != nil { + if os.IsNotExist(err) { + if err = os.MkdirAll(config.BlobDirectory, os.ModePerm); err != nil { + return nil, fmt.Errorf("error creating blob directory: %w", err) + } + } else { + return nil, fmt.Errorf("invalid blob directory path: %w", err) + } + } + } return &BlobClient{ - ec: ec, - beaconUrl: beaconUrl, - httpClient: &http.Client{}, + ec: ec, + beaconUrl: beaconUrl, + authorization: config.Authorization, + httpClient: &http.Client{}, + blobDirectory: config.BlobDirectory, }, nil } @@ -75,6 +100,10 @@ func beaconRequest[T interface{}](b *BlobClient, ctx context.Context, beaconPath return empty, err } + if b.authorization != "" { + req.Header.Set("Authorization", b.authorization) + } + resp, err := b.httpClient.Do(req) if err != nil { return empty, err @@ -100,15 +129,10 @@ func (b *BlobClient) GetBlobs(ctx context.Context, blockHash common.Hash, versio if err != nil { return nil, err } - genesisTime, err := b.genesisTime(ctx) - if err != nil { - return nil, err - } - secondsPerSlot, err := b.secondsPerSlot(ctx) - if err != nil { - return nil, err + if b.secondsPerSlot == 0 { + return nil, errors.New("BlobClient hasn't been initialized") } - slot := (header.Time - genesisTime) / secondsPerSlot + slot := (header.Time - b.genesisTime) / b.secondsPerSlot return b.blobSidecars(ctx, slot, versionedHashes) } @@ -124,10 +148,14 @@ type blobResponseItem struct { } func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHashes []common.Hash) ([]kzg4844.Blob, error) { - response, err := beaconRequest[[]blobResponseItem](b, ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot)) + rawData, err := beaconRequest[json.RawMessage](b, ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot)) if err != nil { return nil, fmt.Errorf("error calling beacon client in blobSidecars: %w", err) } + var response []blobResponseItem + if err := json.Unmarshal(rawData, &response); err != nil { + return nil, fmt.Errorf("error unmarshalling raw data into array of blobResponseItem in blobSidecars: %w", err) + } if len(response) < len(versionedHashes) { return nil, fmt.Errorf("expected at least %d blobs for slot %d but only got %d", len(versionedHashes), slot, len(response)) @@ -178,39 +206,57 @@ func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHas } } - return output, nil -} + if b.blobDirectory != "" { + if err := saveBlobDataToDisk(rawData, slot, b.blobDirectory); err != nil { + return nil, err + } + } -type genesisResponse struct { - GenesisTime jsonapi.Uint64String `json:"genesis_time"` - // don't currently care about other fields, add if needed + return output, nil } -func (b *BlobClient) genesisTime(ctx context.Context) (uint64, error) { - if b.cachedGenesisTime > 0 { - return b.cachedGenesisTime, nil +func saveBlobDataToDisk(rawData json.RawMessage, slot uint64, blobDirectory string) error { + filePath := path.Join(blobDirectory, fmt.Sprint(slot)) + file, err := os.Create(filePath) + if err != nil { + return fmt.Errorf("could not create file to store fetched blobs") } - gr, err := beaconRequest[genesisResponse](b, ctx, "/eth/v1/beacon/genesis") + full := fullResult[json.RawMessage]{Data: rawData} + fullbytes, err := json.Marshal(full) if err != nil { - return 0, fmt.Errorf("error calling beacon client in genesisTime: %w", err) + return fmt.Errorf("unable to marshal data into bytes while attempting to store fetched blobs") } - b.cachedGenesisTime = uint64(gr.GenesisTime) - return b.cachedGenesisTime, nil + if _, err := file.Write(fullbytes); err != nil { + return fmt.Errorf("failed to write blob data to disk") + } + file.Close() + return nil +} + +type genesisResponse struct { + GenesisTime jsonapi.Uint64String `json:"genesis_time"` + // don't currently care about other fields, add if needed } type getSpecResponse struct { SecondsPerSlot jsonapi.Uint64String `json:"SECONDS_PER_SLOT"` } -func (b *BlobClient) secondsPerSlot(ctx context.Context) (uint64, error) { - if b.cachedSecondsPerSlot > 0 { - return b.cachedSecondsPerSlot, nil +func (b *BlobClient) Initialize(ctx context.Context) error { + genesis, err := beaconRequest[genesisResponse](b, ctx, "/eth/v1/beacon/genesis") + if err != nil { + return fmt.Errorf("error calling beacon client to get genesisTime: %w", err) } - gr, err := beaconRequest[getSpecResponse](b, ctx, "/eth/v1/config/spec") + b.genesisTime = uint64(genesis.GenesisTime) + + spec, err := beaconRequest[getSpecResponse](b, ctx, "/eth/v1/config/spec") if err != nil { - return 0, fmt.Errorf("error calling beacon client in secondsPerSlot: %w", err) + return fmt.Errorf("error calling beacon client to get secondsPerSlot: %w", err) + } + if spec.SecondsPerSlot == 0 { + return errors.New("got SECONDS_PER_SLOT of zero from beacon client") } - b.cachedSecondsPerSlot = uint64(gr.SecondsPerSlot) - return b.cachedSecondsPerSlot, nil + b.secondsPerSlot = uint64(spec.SecondsPerSlot) + return nil } diff --git a/util/headerreader/blob_client_test.go b/util/headerreader/blob_client_test.go new file mode 100644 index 0000000000..9735899daa --- /dev/null +++ b/util/headerreader/blob_client_test.go @@ -0,0 +1,69 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package headerreader + +import ( + "encoding/json" + "io" + "os" + "path" + "reflect" + "testing" + + "github.com/offchainlabs/nitro/util/testhelpers" + "github.com/r3labs/diff/v3" +) + +func TestSaveBlobsToDisk(t *testing.T) { + response := []blobResponseItem{{ + BlockRoot: "a", + Index: 0, + Slot: 5, + BlockParentRoot: "a0", + ProposerIndex: 9, + Blob: []byte{1}, + KzgCommitment: []byte{1}, + KzgProof: []byte{1}, + }, { + BlockRoot: "a", + Index: 1, + Slot: 5, + BlockParentRoot: "a0", + ProposerIndex: 10, + Blob: []byte{2}, + KzgCommitment: []byte{2}, + KzgProof: []byte{2}, + }} + testDir := t.TempDir() + rawData, err := json.Marshal(response) + Require(t, err) + err = saveBlobDataToDisk(rawData, 5, testDir) + Require(t, err) + + filePath := path.Join(testDir, "5") + file, err := os.Open(filePath) + Require(t, err) + defer file.Close() + + data, err := io.ReadAll(file) + Require(t, err) + var full fullResult[[]blobResponseItem] + err = json.Unmarshal(data, &full) + Require(t, err) + if !reflect.DeepEqual(full.Data, response) { + changelog, err := diff.Diff(full.Data, response) + Require(t, err) + Fail(t, "blob data saved to disk does not match actual blob data", changelog) + } +} + +func Require(t *testing.T, err error, printables ...interface{}) { + t.Helper() + testhelpers.RequireImpl(t, err, printables...) +} + +func Fail(t *testing.T, printables ...interface{}) { + t.Helper() + testhelpers.FailImpl(t, printables...) +} diff --git a/util/redisutil/redis_coordinator.go b/util/redisutil/redis_coordinator.go index 6af141c668..59e3b0e0f9 100644 --- a/util/redisutil/redis_coordinator.go +++ b/util/redisutil/redis_coordinator.go @@ -79,10 +79,10 @@ func (c *RedisCoordinator) CurrentChosenSequencer(ctx context.Context) (string, // GetPriorities returns the priority list of sequencers func (rc *RedisCoordinator) GetPriorities(ctx context.Context) ([]string, error) { prioritiesString, err := rc.Client.Get(ctx, PRIORITIES_KEY).Result() + if errors.Is(err, redis.Nil) { + return []string{}, nil + } if err != nil { - if errors.Is(err, redis.Nil) { - err = errors.New("sequencer priorities unset") - } return []string{}, err } prioritiesList := strings.Split(prioritiesString, ",") diff --git a/util/rpcclient/rpcclient.go b/util/rpcclient/rpcclient.go index dee6e9252a..02b41cf15d 100644 --- a/util/rpcclient/rpcclient.go +++ b/util/rpcclient/rpcclient.go @@ -21,14 +21,14 @@ import ( ) type ClientConfig struct { - URL string `koanf:"url"` - JWTSecret string `koanf:"jwtsecret"` - Timeout time.Duration `koanf:"timeout" reload:"hot"` - Retries uint `koanf:"retries" reload:"hot"` - ConnectionWait time.Duration `koanf:"connection-wait"` - ArgLogLimit uint `koanf:"arg-log-limit" reload:"hot"` - RetryErrors string `koanf:"retry-errors" reload:"hot"` - RetryDelay time.Duration `koanf:"retry-delay"` + URL string `json:"url,omitempty" koanf:"url"` + JWTSecret string `json:"jwtsecret,omitempty" koanf:"jwtsecret"` + Timeout time.Duration `json:"timeout,omitempty" koanf:"timeout" reload:"hot"` + Retries uint `json:"retries,omitempty" koanf:"retries" reload:"hot"` + ConnectionWait time.Duration `json:"connection-wait,omitempty" koanf:"connection-wait"` + ArgLogLimit uint `json:"arg-log-limit,omitempty" koanf:"arg-log-limit" reload:"hot"` + RetryErrors string `json:"retry-errors,omitempty" koanf:"retry-errors" reload:"hot"` + RetryDelay time.Duration `json:"retry-delay,omitempty" koanf:"retry-delay"` retryErrors *regexp.Regexp } @@ -127,6 +127,25 @@ func (m limitedArgumentsMarshal) String() string { return res } +var blobTxUnderpricedRegexp = regexp.MustCompile(`replacement transaction underpriced: new tx gas fee cap (\d*) <= (\d*) queued`) + +// IsAlreadyKnownError returns true if the error appears to be an "already known" error. +// This check is based on the error's string form and is not precise. +func IsAlreadyKnownError(err error) bool { + s := err.Error() + if strings.Contains(s, "already known") { + return true + } + // go-ethereum returns "replacement transaction underpriced" instead of "already known" for blob txs. + // This is fixed in https://github.com/ethereum/go-ethereum/pull/29210 + // TODO: Once a new geth release is out with this fix, we can remove this check. + matches := blobTxUnderpricedRegexp.FindSubmatch([]byte(s)) + if len(matches) == 3 { + return string(matches[1]) == string(matches[2]) + } + return false +} + func (c *RpcClient) CallContext(ctx_in context.Context, result interface{}, method string, args ...interface{}) error { if c.client == nil { return errors.New("not connected") @@ -155,13 +174,26 @@ func (c *RpcClient) CallContext(ctx_in context.Context, result interface{}, meth ctx, cancelCtx = context.WithCancel(ctx_in) } err = c.client.CallContext(ctx, result, method, args...) + cancelCtx() logger := log.Trace limit := int(c.config().ArgLogLimit) - if err != nil && err.Error() != "already known" { + if err != nil && !IsAlreadyKnownError(err) { logger = log.Info } - logger("rpc response", "method", method, "logId", logId, "err", err, "result", limitedMarshal{limit, result}, "attempt", i, "args", limitedArgumentsMarshal{limit, args}) + logEntry := []interface{}{ + "method", method, + "logId", logId, + "err", err, + "result", limitedMarshal{limit, result}, + "attempt", i, + "args", limitedArgumentsMarshal{limit, args}, + } + var dataErr rpc.DataError + if errors.As(err, &dataErr) { + logEntry = append(logEntry, "errorData", limitedMarshal{limit, dataErr.ErrorData()}) + } + logger("rpc response", logEntry...) if err == nil { return nil } diff --git a/util/rpcclient/rpcclient_test.go b/util/rpcclient/rpcclient_test.go index b885770f60..8613671d37 100644 --- a/util/rpcclient/rpcclient_test.go +++ b/util/rpcclient/rpcclient_test.go @@ -182,6 +182,25 @@ func TestRpcClientRetry(t *testing.T) { } } +func TestIsAlreadyKnownError(t *testing.T) { + for _, testCase := range []struct { + input string + expected bool + }{ + {"already known", true}, + {"insufficient balance", false}, + {"foo already known\nbar", true}, + {"replacement transaction underpriced: new tx gas fee cap 3824396284 \u003c= 3824396284 queued", true}, + {"replacement transaction underpriced: new tx gas fee cap 1234 \u003c= 5678 queued", false}, + {"foo replacement transaction underpriced: new tx gas fee cap 3824396284 \u003c= 3824396284 queued bar", true}, + } { + got := IsAlreadyKnownError(errors.New(testCase.input)) + if got != testCase.expected { + t.Errorf("IsAlreadyKnownError(%q) = %v expected %v", testCase.input, got, testCase.expected) + } + } +} + func Require(t *testing.T, err error, printables ...interface{}) { t.Helper() testhelpers.RequireImpl(t, err, printables...)