From b9b6c59ee6dc4b30d8e96f43f11adfe918bfc4b9 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Tue, 15 Feb 2022 13:16:37 +0200 Subject: [PATCH 01/32] initial stopwaiter --- arbnode/arb_interface.go | 1 + arbnode/batch_poster.go | 11 ++++-- arbnode/delayed_sequencer.go | 10 ++++-- arbnode/forwarder.go | 2 ++ arbnode/inbox_reader.go | 11 ++++-- arbnode/node.go | 23 ++++++++++++ arbnode/sequencer.go | 21 +++++++---- arbnode/transaction_streamer.go | 10 ++++-- broadcastclient/broadcastclient.go | 14 +++++--- broadcastclient/broadcastclient_test.go | 8 ++--- broadcaster/broadcaster.go | 4 +-- broadcaster/broadcaster_test.go | 2 +- util/stopwaiter.go | 48 +++++++++++++++++++++++++ validator/block_validator.go | 16 +++++---- validator/preimage_cache.go | 1 + wsbroadcastserver/clientconnection.go | 15 ++++---- wsbroadcastserver/clientmanager.go | 18 ++++------ wsbroadcastserver/wsbroadcastserver.go | 4 +-- 18 files changed, 162 insertions(+), 57 deletions(-) create mode 100644 util/stopwaiter.go diff --git a/arbnode/arb_interface.go b/arbnode/arb_interface.go index f0fc59683f..96f2ea10a5 100644 --- a/arbnode/arb_interface.go +++ b/arbnode/arb_interface.go @@ -15,6 +15,7 @@ type TransactionPublisher interface { PublishTransaction(ctx context.Context, tx *types.Transaction) error Initialize(context.Context) error Start(context.Context) error + StopAndWait() } type ArbInterface struct { diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index e1c9b6daf0..185b42a379 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -16,9 +16,12 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/offchainlabs/arbstate/arbstate" "github.com/offchainlabs/arbstate/solgen/go/bridgegen" + "github.com/offchainlabs/arbstate/util" ) type BatchPoster struct { + util.StopWaiter + client L1Interface inbox *InboxTracker streamer *TransactionStreamer @@ -337,8 +340,9 @@ func (b *BatchPoster) postSequencerBatch() error { return err } -func (b *BatchPoster) Start(ctx context.Context) { - go (func() { +func (b *BatchPoster) Start(ctxIn context.Context) { + ctx := b.StopWaiter.Start(ctxIn) + b.ThreadTracker.LaunchThread(func() { for { err := b.postSequencerBatch() if err != nil { @@ -347,8 +351,9 @@ func (b *BatchPoster) Start(ctx context.Context) { select { case <-ctx.Done(): return + // TODO: dont use time.After case <-time.After(b.config.BatchPollDelay): } } - })() + }) } diff --git a/arbnode/delayed_sequencer.go b/arbnode/delayed_sequencer.go index 8a520580e4..ecc046917d 100644 --- a/arbnode/delayed_sequencer.go +++ b/arbnode/delayed_sequencer.go @@ -10,9 +10,12 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/arbstate/arbos" + "github.com/offchainlabs/arbstate/util" ) type DelayedSequencer struct { + util.StopWaiter + client L1Interface bridge *DelayedBridge inbox *InboxTracker @@ -167,8 +170,9 @@ func (d *DelayedSequencer) run(ctx context.Context) error { } } -func (d *DelayedSequencer) Start(ctx context.Context) { - go (func() { +func (d *DelayedSequencer) Start(ctxIn context.Context) { + ctx := d.StopWaiter.Start(ctxIn) + d.ThreadTracker.LaunchThread(func() { for { err := d.run(ctx) if err != nil && !errors.Is(err, context.Canceled) { @@ -180,5 +184,5 @@ func (d *DelayedSequencer) Start(ctx context.Context) { case <-time.After(time.Second): } } - })() + }) } diff --git a/arbnode/forwarder.go b/arbnode/forwarder.go index 49a7e1743f..af622c748f 100644 --- a/arbnode/forwarder.go +++ b/arbnode/forwarder.go @@ -38,3 +38,5 @@ func (f *TxForwarder) Initialize(ctx context.Context) error { func (f *TxForwarder) Start(ctx context.Context) error { return nil } + +func (f *TxForwarder) StopAndWait() {} diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index 218c7ace61..4b5044ee8c 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -11,6 +11,7 @@ import ( "time" "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/arbstate/util" ) type InboxReaderConfig struct { @@ -32,6 +33,8 @@ var TestInboxReaderConfig = InboxReaderConfig{ } type InboxReader struct { + util.StopWaiter + // Only in run thread caughtUp bool firstMessageBlock *big.Int @@ -57,8 +60,9 @@ func NewInboxReader(tracker *InboxTracker, client L1Interface, firstMessageBlock }, nil } -func (r *InboxReader) Start(ctx context.Context) { - go (func() { +func (r *InboxReader) Start(ctxIn context.Context) { + ctx := r.StopWaiter.Start(ctxIn) + r.ThreadTracker.LaunchThread(func() { for { err := r.run(ctx) if err != nil && !errors.Is(err, context.Canceled) { @@ -67,10 +71,11 @@ func (r *InboxReader) Start(ctx context.Context) { select { case <-ctx.Done(): return + // TODO: don't use time.After case <-time.After(time.Second): } } - })() + }) } func (r *InboxReader) Tracker() *InboxTracker { diff --git a/arbnode/node.go b/arbnode/node.go index f0e9e0749c..4af58d0176 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -265,6 +265,29 @@ func (n *Node) Start(ctx context.Context) error { return nil } +func (n *Node) StopAndWait(ctx context.Context) { + if n.BroadcastClient != nil { + n.BroadcastClient.StopAndWait() + } + if n.BroadcastServer != nil { + n.BroadcastServer.StopAndWait() + } + if n.BlockValidator != nil { + n.BlockValidator.StopAndWait() + } + if n.BatchPoster != nil { + n.BatchPoster.StopAndWait() + } + if n.DelayedSequencer != nil { + n.DelayedSequencer.StopAndWait() + } + if n.InboxReader != nil { + n.InboxReader.StopAndWait() + } + n.TxPublisher.StopAndWait() + n.TxStreamer.StopAndWait() +} + func CreateDefaultStack() (*node.Node, error) { stackConf := node.DefaultConfig var err error diff --git a/arbnode/sequencer.go b/arbnode/sequencer.go index e83e851189..b419f933a4 100644 --- a/arbnode/sequencer.go +++ b/arbnode/sequencer.go @@ -20,6 +20,7 @@ import ( "github.com/offchainlabs/arbstate/arbos" "github.com/offchainlabs/arbstate/arbos/arbosState" "github.com/offchainlabs/arbstate/arbos/l1pricing" + "github.com/offchainlabs/arbstate/util" "github.com/pkg/errors" ) @@ -37,6 +38,8 @@ type txQueueItem struct { } type Sequencer struct { + util.StopWaiter + txStreamer *TransactionStreamer txQueue chan txQueueItem l1Client L1Interface @@ -194,7 +197,8 @@ func (s *Sequencer) sequenceTransactions() { } } -func (s *Sequencer) Start(ctx context.Context) error { +func (s *Sequencer) Start(ctxIn context.Context) error { + ctx := s.StopWaiter.Start(ctxIn) if s.l1Client != nil { initialBlockNr := atomic.LoadUint64(&s.l1BlockNumber) if initialBlockNr == 0 { @@ -204,7 +208,7 @@ func (s *Sequencer) Start(ctx context.Context) error { headerChan, cancel := HeaderSubscribeWithRetry(ctx, s.l1Client) defer cancel() - go (func() { + s.ThreadTracker.LaunchThread(func() { for { select { case header, ok := <-headerChan: @@ -216,15 +220,20 @@ func (s *Sequencer) Start(ctx context.Context) error { return } } - })() + }) } - go (func() { + s.ThreadTracker.LaunchThread(func() { for { s.sequenceTransactions() - time.Sleep(minBlockInterval) + select { + // TODO: don't use time.After + case <-time.After(minBlockInterval): + case <-ctx.Done(): + return + } } - })() + }) return nil } diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 9bfc407284..97cc373819 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -23,12 +23,15 @@ import ( "github.com/offchainlabs/arbstate/arbos" "github.com/offchainlabs/arbstate/arbstate" "github.com/offchainlabs/arbstate/broadcaster" + "github.com/offchainlabs/arbstate/util" "github.com/offchainlabs/arbstate/validator" ) // Produces blocks from a node's L1 messages, storing the results in the blockchain and recording their positions // The streamer is notified when there's new batches to process type TransactionStreamer struct { + util.StopWaiter + db ethdb.Database bc *core.BlockChain @@ -531,8 +534,9 @@ func (s *TransactionStreamer) Initialize() error { return s.cleanupInconsistentState() } -func (s *TransactionStreamer) Start(ctx context.Context) { - go (func() { +func (s *TransactionStreamer) Start(ctxIn context.Context) { + ctx := s.StopWaiter.Start(ctxIn) + s.ThreadTracker.LaunchThread(func() { for { err := s.createBlocks(ctx) if err != nil && !errors.Is(err, context.Canceled) { @@ -545,5 +549,5 @@ func (s *TransactionStreamer) Start(ctx context.Context) { case <-time.After(10 * time.Second): } } - })() + }) } diff --git a/broadcastclient/broadcastclient.go b/broadcastclient/broadcastclient.go index e2e3970a66..41eaf54cdb 100644 --- a/broadcastclient/broadcastclient.go +++ b/broadcastclient/broadcastclient.go @@ -20,6 +20,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/arbstate/arbstate" "github.com/offchainlabs/arbstate/broadcaster" + "github.com/offchainlabs/arbstate/util" "github.com/offchainlabs/arbstate/wsbroadcastserver" ) @@ -35,6 +36,8 @@ type TransactionStreamerInterface interface { } type BroadcastClient struct { + util.StopWaiter + websocketUrl string lastInboxSeqNum *big.Int @@ -67,8 +70,9 @@ func NewBroadcastClient(websocketUrl string, lastInboxSeqNum *big.Int, idleTimeo } } -func (bc *BroadcastClient) Start(ctx context.Context) { - go (func() { +func (bc *BroadcastClient) Start(ctxIn context.Context) { + ctx := bc.StopWaiter.Start(ctxIn) + bc.ThreadTracker.LaunchThread(func() { for { err := bc.connect(ctx) if err == nil { @@ -82,7 +86,7 @@ func (bc *BroadcastClient) Start(ctx context.Context) { case <-time.After(5 * time.Second): } } - })() + }) } func (bc *BroadcastClient) connect(ctx context.Context) error { @@ -115,7 +119,7 @@ func (bc *BroadcastClient) connect(ctx context.Context) error { } func (bc *BroadcastClient) startBackgroundReader(ctx context.Context) { - go func() { + bc.ThreadTracker.LaunchThread(func() { for { select { case <-ctx.Done(): @@ -170,7 +174,7 @@ func (bc *BroadcastClient) startBackgroundReader(ctx context.Context) { } } } - }() + }) } func (bc *BroadcastClient) GetRetryCount() int64 { diff --git a/broadcastclient/broadcastclient_test.go b/broadcastclient/broadcastclient_test.go index bcfb0852fb..7064262ab7 100644 --- a/broadcastclient/broadcastclient_test.go +++ b/broadcastclient/broadcastclient_test.go @@ -37,7 +37,7 @@ func TestReceiveMessages(t *testing.T) { if err != nil { t.Fatal(err) } - defer b.Stop() + defer b.StopAndWait() var wg sync.WaitGroup for i := 0; i < clientCount; i++ { @@ -120,7 +120,7 @@ func TestServerClientDisconnect(t *testing.T) { if err != nil { t.Fatal(err) } - defer b.Stop() + defer b.StopAndWait() ts := NewDummyTransactionStreamer() broadcastClient := NewBroadcastClient("ws://127.0.0.1:9743/", nil, 20*time.Second, ts) @@ -171,7 +171,7 @@ func TestBroadcastClientReconnectsOnServerDisconnect(t *testing.T) { if err != nil { t.Fatal(err) } - defer b1.Stop() + defer b1.StopAndWait() broadcastClient := NewBroadcastClient("ws://127.0.0.1:9743/", nil, 2*time.Second, nil) @@ -205,7 +205,7 @@ func TestBroadcasterSendsCachedMessagesOnClientConnect(t *testing.T) { if err != nil { t.Fatal(err) } - defer b.Stop() + defer b.StopAndWait() b.BroadcastSingle(arbstate.MessageWithMetadata{}, 0) b.BroadcastSingle(arbstate.MessageWithMetadata{}, 1) diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index fc697baaa6..6003a89e89 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -190,6 +190,6 @@ func (b *Broadcaster) Start(ctx context.Context) error { return b.server.Start(ctx) } -func (b *Broadcaster) Stop() { - b.server.Stop() +func (b *Broadcaster) StopAndWait() { + b.server.StopAndWait() } diff --git a/broadcaster/broadcaster_test.go b/broadcaster/broadcaster_test.go index bf5b497a52..31cf3998bd 100644 --- a/broadcaster/broadcaster_test.go +++ b/broadcaster/broadcaster_test.go @@ -66,7 +66,7 @@ func TestBroadcasterMessagesRemovedOnConfirmation(t *testing.T) { b := NewBroadcaster(broadcasterSettings) Require(t, b.Start(ctx)) - defer b.Stop() + defer b.StopAndWait() dummyMessage := arbstate.MessageWithMetadata{} expectMessageCount := func(count int, contextMessage string) predicate { diff --git a/util/stopwaiter.go b/util/stopwaiter.go new file mode 100644 index 0000000000..fe1b44cfac --- /dev/null +++ b/util/stopwaiter.go @@ -0,0 +1,48 @@ +package util + +import ( + "context" + "sync" +) + +type ThreadTracker struct { + stopFunc func() + wg sync.WaitGroup +} + +func NewThreadTracker(stopFunc func()) *ThreadTracker { + return &ThreadTracker{ + stopFunc: stopFunc, + } +} + +// StopAndWait will only wait for thread launced with this function +func (s *ThreadTracker) LaunchThread(foo func()) { + s.wg.Add(1) + go func() { + foo() + s.wg.Done() + }() +} + +func (s *ThreadTracker) StopAndWait() { + s.stopFunc() + s.wg.Wait() +} + +type StopWaiter struct { + ThreadTracker *ThreadTracker +} + +func (s *StopWaiter) Start(ctx context.Context) context.Context { + wrapped, cancelfunc := context.WithCancel(ctx) + s.ThreadTracker = NewThreadTracker(cancelfunc) + return wrapped +} + +// Not thread safe vs Start or itself +func (s *StopWaiter) StopAndWait() { + if s.ThreadTracker != nil { + s.ThreadTracker.StopAndWait() + } +} diff --git a/validator/block_validator.go b/validator/block_validator.go index d61557995f..1860346d06 100644 --- a/validator/block_validator.go +++ b/validator/block_validator.go @@ -22,10 +22,12 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/arbstate/arbos" "github.com/offchainlabs/arbstate/arbstate" + "github.com/offchainlabs/arbstate/util" "github.com/pkg/errors" ) type BlockValidator struct { + util.StopWaiter inboxTracker InboxTrackerInterface blockchain *core.BlockChain @@ -228,7 +230,7 @@ func (v *BlockValidator) prepareBlock(header *types.Header, prevHeader *types.He } func (v *BlockValidator) NewBlock(block *types.Block, prevHeader *types.Header, msg arbstate.MessageWithMetadata) { - go v.prepareBlock(block.Header(), prevHeader, msg) + v.ThreadTracker.LaunchThread(func() { v.prepareBlock(block.Header(), prevHeader, msg) }) } var launchTime = time.Now().Format("2006_01_02__15_04") @@ -499,6 +501,7 @@ func (v *BlockValidator) sendValidations(ctx context.Context) { } atomic.AddInt32(&v.atomicValidationsRunning, 1) validationEntry.SeqMsgNr = startPos.BatchNumber + // validation can take long time. Don't wait for it when shutting down go v.validate(ctx, validationEntry, startPos, endPos) v.posNextSend += 1 v.globalPosNextSend = endPos @@ -506,7 +509,7 @@ func (v *BlockValidator) sendValidations(ctx context.Context) { } func (v *BlockValidator) startValidationLoop(ctx context.Context) { - go (func() { + v.ThreadTracker.LaunchThread(func() { for { select { case _, ok := <-v.sendValidationsChan: @@ -518,7 +521,7 @@ func (v *BlockValidator) startValidationLoop(ctx context.Context) { } v.sendValidations(ctx) } - })() + }) } func (v *BlockValidator) progressValidated() { @@ -556,7 +559,7 @@ func (v *BlockValidator) progressValidated() { } func (v *BlockValidator) startProgressLoop(ctx context.Context) { - go (func() { + v.ThreadTracker.LaunchThread(func() { for { select { case _, ok := <-v.checkProgressChan: @@ -568,7 +571,7 @@ func (v *BlockValidator) startProgressLoop(ctx context.Context) { } v.progressValidated() } - })() + }) } func (v *BlockValidator) BlocksValidated() uint64 { @@ -585,7 +588,8 @@ func (v *BlockValidator) ProcessBatches(batches map[uint64][]byte) { } } -func (v *BlockValidator) Start(ctx context.Context) error { +func (v *BlockValidator) Start(ctxIn context.Context) error { + ctx := v.StopWaiter.Start(ctxIn) v.startProgressLoop(ctx) v.startValidationLoop(ctx) return nil diff --git a/validator/preimage_cache.go b/validator/preimage_cache.go index 5109aafcd7..fc4e9c6895 100644 --- a/validator/preimage_cache.go +++ b/validator/preimage_cache.go @@ -78,6 +78,7 @@ func (p *preimageCache) RemoveFromCache(hashlist []common.Hash) error { curEntry.Data = nil deletionsNum := atomic.AddInt32(&p.deletionsSinceMaintenance, 1) if deletionsNum%maintenanceEvery == 0 { + // maintains in-memory structure. No need for StopAndWait go p.CacheMaintenance() } } diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index 551c112d8b..7482c60760 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -18,6 +18,7 @@ import ( "github.com/gobwas/ws" "github.com/gobwas/ws/wsutil" "github.com/mailru/easygo/netpoll" + "github.com/offchainlabs/arbstate/util" ) // MaxSendQueue is the maximum number of items in a clients out channel before client gets disconnected. @@ -26,6 +27,8 @@ const MaxSendQueue = 1000 // ClientConnection represents client connection. type ClientConnection struct { + util.StopWaiter + ioMutex sync.Mutex conn net.Conn @@ -34,7 +37,6 @@ type ClientConnection struct { clientManager *ClientManager lastHeardUnix int64 - cancelFunc context.CancelFunc out chan []byte } @@ -50,11 +52,9 @@ func NewClientConnection(conn net.Conn, desc *netpoll.Desc, clientManager *Clien } func (cc *ClientConnection) Start(parentCtx context.Context) { - ctx, cancelFunc := context.WithCancel(parentCtx) - cc.cancelFunc = cancelFunc + ctx := cc.StopWaiter.Start(parentCtx) go func() { - defer cc.cancelFunc() defer close(cc.out) for { select { @@ -79,13 +79,12 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { }() } -func (cc *ClientConnection) Stop() { - if cc.cancelFunc != nil { - cc.cancelFunc() - } else { +func (cc *ClientConnection) StopAndWait() { + if cc.ThreadTracker == nil { // If client connection never started, need to close channel close(cc.out) } + cc.StopWaiter.StopAndWait() } func (cc *ClientConnection) GetLastHeard() time.Time { diff --git a/wsbroadcastserver/clientmanager.go b/wsbroadcastserver/clientmanager.go index 7be8cbb97d..89738a8b8a 100644 --- a/wsbroadcastserver/clientmanager.go +++ b/wsbroadcastserver/clientmanager.go @@ -13,6 +13,7 @@ import ( "time" "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/arbstate/util" "github.com/pkg/errors" "github.com/gobwas/ws" @@ -30,7 +31,8 @@ type CatchupBuffer interface { // ClientManager manages client connections type ClientManager struct { - cancelFunc context.CancelFunc + util.StopWaiter + clientPtrMap map[*ClientConnection]bool clientCount int32 pool *gopool.Pool @@ -91,7 +93,7 @@ func (cm *ClientManager) removeAll() { } func (cm *ClientManager) removeClientImpl(clientConnection *ClientConnection) { - clientConnection.Stop() + clientConnection.StopAndWait() err := cm.poller.Stop(clientConnection.desc) if err != nil { @@ -193,16 +195,10 @@ func (cm *ClientManager) verifyClients() { } } -func (cm *ClientManager) Stop() { - cm.cancelFunc() -} - func (cm *ClientManager) Start(parentCtx context.Context) { - ctx, cancelFunc := context.WithCancel(parentCtx) - cm.cancelFunc = cancelFunc + ctx := cm.StopWaiter.Start(parentCtx) - go func() { - defer cancelFunc() + cm.ThreadTracker.LaunchThread(func() { defer cm.removeAll() pingInterval := time.NewTicker(cm.settings.Ping) @@ -230,5 +226,5 @@ func (cm *ClientManager) Start(parentCtx context.Context) { cm.verifyClients() } } - }() + }) } diff --git a/wsbroadcastserver/wsbroadcastserver.go b/wsbroadcastserver/wsbroadcastserver.go index 03dcc55fa4..561bd980e7 100644 --- a/wsbroadcastserver/wsbroadcastserver.go +++ b/wsbroadcastserver/wsbroadcastserver.go @@ -221,7 +221,7 @@ func (s *WSBroadcastServer) Start(ctx context.Context) error { return nil } -func (s *WSBroadcastServer) Stop() { +func (s *WSBroadcastServer) StopAndWait() { err := s.listener.Close() if err != nil { log.Warn("error in listener.Close", "err", err) @@ -237,7 +237,7 @@ func (s *WSBroadcastServer) Stop() { log.Warn("error in acceptDesc.Close", "err", err) } - s.clientManager.Stop() + s.clientManager.StopAndWait() s.started = false } From ad06e08e754d894d3acf681d68291535b71a7715 Mon Sep 17 00:00:00 2001 From: Harry Kalodner Date: Mon, 21 Feb 2022 01:42:56 -0500 Subject: [PATCH 02/32] Track L1 gas used in receipt and l1 block in header --- arbos/block_processor.go | 33 +++------------------------------ arbos/tx_processor.go | 4 ++++ go-ethereum | 2 +- 3 files changed, 8 insertions(+), 31 deletions(-) diff --git a/arbos/block_processor.go b/arbos/block_processor.go index 2e7c0651db..aaef992159 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -7,6 +7,7 @@ package arbos import ( "encoding/binary" "fmt" + "github.com/ethereum/go-ethereum/arbitrum" "math" "math/big" "strconv" @@ -360,35 +361,6 @@ func ProduceBlockAdvanced( return block, receipts } -type ArbitrumHeaderInfo struct { - SendRoot common.Hash - SendCount uint64 -} - -func (info ArbitrumHeaderInfo) Extra() []byte { - return info.SendRoot[:] -} - -func (info ArbitrumHeaderInfo) MixDigest() [32]byte { - mixDigest := common.Hash{} - binary.BigEndian.PutUint64(mixDigest[:8], info.SendCount) - return mixDigest -} - -func DeserializeHeaderExtraInformation(header *types.Header) (ArbitrumHeaderInfo, error) { - if header.Number.Sign() == 0 || len(header.Extra) == 0 { - // The genesis block doesn't have an ArbOS encoded extra field - return ArbitrumHeaderInfo{}, nil - } - if len(header.Extra) != 32 { - return ArbitrumHeaderInfo{}, fmt.Errorf("unexpected header extra field length %v", len(header.Extra)) - } - extra := ArbitrumHeaderInfo{} - copy(extra.SendRoot[:], header.Extra) - extra.SendCount = binary.BigEndian.Uint64(header.MixDigest[:8]) - return extra, nil -} - func FinalizeBlock(header *types.Header, txs types.Transactions, statedb *state.StateDB) { if header != nil { state, err := arbosState.OpenSystemArbosState(statedb, false) @@ -405,7 +377,8 @@ func FinalizeBlock(header *types.Header, txs types.Transactions, statedb *state. acc := state.SendMerkleAccumulator() root, _ := acc.Root() size, _ := acc.Size() - arbitrumHeader := ArbitrumHeaderInfo{root, size} + nextL1BlockNumber, _ := state.Blockhashes().NextBlockNumber() + arbitrumHeader := arbitrum.HeaderInfo{root, size, nextL1BlockNumber} header.Extra = arbitrumHeader.Extra() header.MixDigest = arbitrumHeader.MixDigest() diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index e2fd57d210..2e732bb1ce 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -411,3 +411,7 @@ func (p *TxProcessor) L1BlockHash(blockCtx vm.BlockContext, l1BlocKNumber uint64 } return state.Blockhashes().BlockHash(l1BlocKNumber) } + +func (p *TxProcessor) FillReceiptInfo(receipt *types.Receipt) { + receipt.L1GasUsed = p.posterGas +} diff --git a/go-ethereum b/go-ethereum index 6deb4eb71a..8db04dfb56 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 6deb4eb71a779bf5c768864e2b3c957bdd415a51 +Subproject commit 8db04dfb569d823aa472e14636a98a6eb0bf502e From e6680566d4460a894e1e2f0b46cedc82bd980fcb Mon Sep 17 00:00:00 2001 From: Harry Kalodner Date: Mon, 21 Feb 2022 13:38:08 -0500 Subject: [PATCH 03/32] Move HeaderInfo into types --- arbos/block_processor.go | 3 +-- go-ethereum | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/arbos/block_processor.go b/arbos/block_processor.go index aaef992159..012dc2c26e 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -7,7 +7,6 @@ package arbos import ( "encoding/binary" "fmt" - "github.com/ethereum/go-ethereum/arbitrum" "math" "math/big" "strconv" @@ -378,7 +377,7 @@ func FinalizeBlock(header *types.Header, txs types.Transactions, statedb *state. root, _ := acc.Root() size, _ := acc.Size() nextL1BlockNumber, _ := state.Blockhashes().NextBlockNumber() - arbitrumHeader := arbitrum.HeaderInfo{root, size, nextL1BlockNumber} + arbitrumHeader := types.HeaderInfo{root, size, nextL1BlockNumber} header.Extra = arbitrumHeader.Extra() header.MixDigest = arbitrumHeader.MixDigest() diff --git a/go-ethereum b/go-ethereum index 8db04dfb56..45de7beb48 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 8db04dfb569d823aa472e14636a98a6eb0bf502e +Subproject commit 45de7beb4873c6ec1ab00bd745eb3bfdf78d6a2c From 4f6a1218140a9e5fb2fbbdd6f52da11aace44790 Mon Sep 17 00:00:00 2001 From: Harry Kalodner Date: Mon, 21 Feb 2022 13:52:07 -0500 Subject: [PATCH 04/32] Fix compilation --- cmd/replay/main.go | 2 +- validator/block_challenge_backend.go | 3 +-- validator/block_validator.go | 4 ++-- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 4452fd3621..7e0021e132 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -149,7 +149,7 @@ func main() { log.Info("Final State", "newBlockHash", newBlockHash, "StateRoot", newBlock.Root()) - extraInfo, err := arbos.DeserializeHeaderExtraInformation(newBlock.Header()) + extraInfo, err := types.DeserializeHeaderExtraInformation(newBlock.Header()) if err != nil { panic(fmt.Sprintf("Error deserializing header extra info: %v", err)) } diff --git a/validator/block_challenge_backend.go b/validator/block_challenge_backend.go index be616c6d68..5be861dd86 100644 --- a/validator/block_challenge_backend.go +++ b/validator/block_challenge_backend.go @@ -16,7 +16,6 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/offchainlabs/arbstate/arbos" "github.com/offchainlabs/arbstate/solgen/go/challengegen" "github.com/pkg/errors" @@ -192,7 +191,7 @@ func (b *BlockChallengeBackend) FindGlobalStateFromHeader(ctx context.Context, h return GoGlobalState{}, errors.New("findBatchFromMessageCount returned bad batch") } } - extraInfo, err := arbos.DeserializeHeaderExtraInformation(header) + extraInfo, err := types.DeserializeHeaderExtraInformation(header) if err != nil { return GoGlobalState{}, err } diff --git a/validator/block_validator.go b/validator/block_validator.go index d61557995f..1906f86a9b 100644 --- a/validator/block_validator.go +++ b/validator/block_validator.go @@ -125,11 +125,11 @@ type validationEntry struct { } func newValidationEntry(prevHeader *types.Header, header *types.Header, hasDelayed bool, delayedMsgNr uint64, preimages []common.Hash) (*validationEntry, error) { - extraInfo, err := arbos.DeserializeHeaderExtraInformation(header) + extraInfo, err := types.DeserializeHeaderExtraInformation(header) if err != nil { return nil, err } - prevExtraInfo, err := arbos.DeserializeHeaderExtraInformation(prevHeader) + prevExtraInfo, err := types.DeserializeHeaderExtraInformation(prevHeader) if err != nil { return nil, err } From e4be932eb09a6adb074fa055b09ea801a4931b54 Mon Sep 17 00:00:00 2001 From: Harry Kalodner Date: Mon, 21 Feb 2022 13:52:50 -0500 Subject: [PATCH 05/32] Fix lint issue --- arbos/block_processor.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arbos/block_processor.go b/arbos/block_processor.go index 012dc2c26e..14ba23b5f3 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -377,7 +377,11 @@ func FinalizeBlock(header *types.Header, txs types.Transactions, statedb *state. root, _ := acc.Root() size, _ := acc.Size() nextL1BlockNumber, _ := state.Blockhashes().NextBlockNumber() - arbitrumHeader := types.HeaderInfo{root, size, nextL1BlockNumber} + arbitrumHeader := types.HeaderInfo{ + SendRoot: root, + SendCount: size, + L1BlockNumber: nextL1BlockNumber, + } header.Extra = arbitrumHeader.Extra() header.MixDigest = arbitrumHeader.MixDigest() From 68152b9c36b48b16536f9e09af48a96a9a6adec5 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Tue, 22 Feb 2022 16:52:28 -0600 Subject: [PATCH 06/32] add Gas.md --- docs/arbos/Gas.md | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 docs/arbos/Gas.md diff --git a/docs/arbos/Gas.md b/docs/arbos/Gas.md new file mode 100644 index 0000000000..9eae014604 --- /dev/null +++ b/docs/arbos/Gas.md @@ -0,0 +1,11 @@ +# Gas +TODO + +## Gas Estimating Retryables + + + +### NodeInterface.sol +To avoid creating new RPC methods for client-side tooling, nitro geth's [`InterceptRPCMessage`][InterceptRPCMessage_link] hook provides an opportunity to swap out the message its handling before deriving a transaction from it. ArbOS uses this hook to detect messages sent to the address `0xc8`, the location of a fictional contract ... TODO + +[InterceptRPCMessage_link]: todo From ad48ff57efbd2c23829c338a34af4e9d0e4e2300 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Tue, 22 Feb 2022 23:18:07 -0600 Subject: [PATCH 07/32] document L2 tips --- docs/arbos/Gas.md | 11 +++++++++-- go-ethereum | 2 +- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/docs/arbos/Gas.md b/docs/arbos/Gas.md index 9eae014604..3cf7510bdf 100644 --- a/docs/arbos/Gas.md +++ b/docs/arbos/Gas.md @@ -1,11 +1,18 @@ # Gas TODO -## Gas Estimating Retryables +## Tips in L2 +While tips are not advised for those using the sequencer, which prioritizes transactions on a first-come first-served basis, 3rd-party aggregators may choose to order txes based on tips. A user specifies a tip by setting a gas price in excess of the basefee and will [pay that difference][pay_difference_link] on the amount of gas the tx uses. + +A poster receives the tip only when the user has set them as their [preferred aggregator](Precompiles.md#ArbAggregator). Otherwise the tip [goes to the network fee collector][goes_to_network_link]. This disincentives unpreferred aggregators from racing to post txes with large tips. +[pay_difference_link]: https://github.com/OffchainLabs/go-ethereum/blob/edf6a19157606070b6a6660c8decc513e2408cb7/core/state_transition.go#L358 +[goes_to_network_link]: https://github.com/OffchainLabs/nitro/blob/c93c806a5cfe99f92a534d3c952a83c3c8b3088c/arbos/tx_processor.go#L262 +## Gas Estimating Retryables +Gas estimation via the node's RPC will -### NodeInterface.sol +## NodeInterface.sol To avoid creating new RPC methods for client-side tooling, nitro geth's [`InterceptRPCMessage`][InterceptRPCMessage_link] hook provides an opportunity to swap out the message its handling before deriving a transaction from it. ArbOS uses this hook to detect messages sent to the address `0xc8`, the location of a fictional contract ... TODO [InterceptRPCMessage_link]: todo diff --git a/go-ethereum b/go-ethereum index e55e6310bd..edf6a19157 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit e55e6310bd367ec34ee61b8591db835c87592c55 +Subproject commit edf6a19157606070b6a6660c8decc513e2408cb7 From f1e3d55d189039327f847864fedc08793d1cb7af Mon Sep 17 00:00:00 2001 From: Harry Kalodner Date: Wed, 23 Feb 2022 01:31:44 -0500 Subject: [PATCH 08/32] Use UpdateHeaderWithInfo method --- arbos/block_processor.go | 3 +-- go-ethereum | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/arbos/block_processor.go b/arbos/block_processor.go index 14ba23b5f3..d919597de8 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -382,8 +382,7 @@ func FinalizeBlock(header *types.Header, txs types.Transactions, statedb *state. SendCount: size, L1BlockNumber: nextL1BlockNumber, } - header.Extra = arbitrumHeader.Extra() - header.MixDigest = arbitrumHeader.MixDigest() + arbitrumHeader.UpdateHeaderWithInfo(header) state.UpgradeArbosVersionIfNecessary(header.Time) } diff --git a/go-ethereum b/go-ethereum index 45de7beb48..288e32e9ae 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 45de7beb4873c6ec1ab00bd745eb3bfdf78d6a2c +Subproject commit 288e32e9aefd6719bd91bb4b6997dc568b881908 From 067c6ad9dbe2730763b5eb86b72787ed270c8d82 Mon Sep 17 00:00:00 2001 From: Harry Kalodner Date: Wed, 23 Feb 2022 02:28:32 -0500 Subject: [PATCH 09/32] Update go-ethereum --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 288e32e9ae..2ad6c592e2 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 288e32e9aefd6719bd91bb4b6997dc568b881908 +Subproject commit 2ad6c592e2cd8a4d3248c2faa3b7cb0c836577e4 From 3262e6ef3fb89d1a1adb2660b3f45d6f002c2789 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Wed, 23 Feb 2022 13:30:34 +0200 Subject: [PATCH 10/32] update stopwaiter API --- arbnode/batch_poster.go | 4 +- arbnode/delayed_sequencer.go | 4 +- arbnode/inbox_reader.go | 4 +- arbnode/sequencer.go | 8 +-- arbnode/transaction_streamer.go | 4 +- broadcastclient/broadcastclient.go | 10 +-- util/stopwaiter.go | 99 ++++++++++++++++++++++----- validator/block_validator.go | 16 ++--- wsbroadcastserver/clientconnection.go | 9 ++- wsbroadcastserver/clientmanager.go | 4 +- 10 files changed, 111 insertions(+), 51 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index d3f16472b4..6154e80eb6 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -341,8 +341,8 @@ func (b *BatchPoster) postSequencerBatch() error { } func (b *BatchPoster) Start(ctxIn context.Context) { - ctx := b.StopWaiter.Start(ctxIn) - b.ThreadTracker.LaunchThread(func() { + b.StopWaiter.Start(ctxIn) + b.LaunchThread(func(ctx context.Context) { for { err := b.postSequencerBatch() if err != nil { diff --git a/arbnode/delayed_sequencer.go b/arbnode/delayed_sequencer.go index ecc046917d..a2ebebf58c 100644 --- a/arbnode/delayed_sequencer.go +++ b/arbnode/delayed_sequencer.go @@ -171,8 +171,8 @@ func (d *DelayedSequencer) run(ctx context.Context) error { } func (d *DelayedSequencer) Start(ctxIn context.Context) { - ctx := d.StopWaiter.Start(ctxIn) - d.ThreadTracker.LaunchThread(func() { + d.StopWaiter.Start(ctxIn) + d.LaunchThread(func(ctx context.Context) { for { err := d.run(ctx) if err != nil && !errors.Is(err, context.Canceled) { diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index 286a3ce347..750a3c0cd4 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -61,8 +61,8 @@ func NewInboxReader(tracker *InboxTracker, client L1Interface, firstMessageBlock } func (r *InboxReader) Start(ctxIn context.Context) error { - ctx := r.StopWaiter.Start(ctxIn) - r.ThreadTracker.LaunchThread(func() { + r.StopWaiter.Start(ctxIn) + r.LaunchThread(func(ctx context.Context) { for { err := r.run(ctx) if err != nil && !errors.Is(err, context.Canceled) { diff --git a/arbnode/sequencer.go b/arbnode/sequencer.go index bed8ec359b..a51ba091ca 100644 --- a/arbnode/sequencer.go +++ b/arbnode/sequencer.go @@ -198,17 +198,17 @@ func (s *Sequencer) sequenceTransactions() { } func (s *Sequencer) Start(ctxIn context.Context) error { - ctx := s.StopWaiter.Start(ctxIn) + s.StopWaiter.Start(ctxIn) if s.l1Client != nil { initialBlockNr := atomic.LoadUint64(&s.l1BlockNumber) if initialBlockNr == 0 { return errors.New("sequencer not initialized") } - headerChan, cancel := HeaderSubscribeWithRetry(ctx, s.l1Client) + headerChan, cancel := HeaderSubscribeWithRetry(s.GetContext(), s.l1Client) defer cancel() - s.ThreadTracker.LaunchThread(func() { + s.LaunchThread(func(ctx context.Context) { for { select { case header, ok := <-headerChan: @@ -223,7 +223,7 @@ func (s *Sequencer) Start(ctxIn context.Context) error { }) } - s.ThreadTracker.LaunchThread(func() { + s.LaunchThread(func(ctx context.Context) { for { s.sequenceTransactions() select { diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 97cc373819..f20b2fb80a 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -535,8 +535,8 @@ func (s *TransactionStreamer) Initialize() error { } func (s *TransactionStreamer) Start(ctxIn context.Context) { - ctx := s.StopWaiter.Start(ctxIn) - s.ThreadTracker.LaunchThread(func() { + s.StopWaiter.Start(ctxIn) + s.LaunchThread(func(ctx context.Context) { for { err := s.createBlocks(ctx) if err != nil && !errors.Is(err, context.Canceled) { diff --git a/broadcastclient/broadcastclient.go b/broadcastclient/broadcastclient.go index 41eaf54cdb..d1922af23d 100644 --- a/broadcastclient/broadcastclient.go +++ b/broadcastclient/broadcastclient.go @@ -71,12 +71,12 @@ func NewBroadcastClient(websocketUrl string, lastInboxSeqNum *big.Int, idleTimeo } func (bc *BroadcastClient) Start(ctxIn context.Context) { - ctx := bc.StopWaiter.Start(ctxIn) - bc.ThreadTracker.LaunchThread(func() { + bc.StopWaiter.Start(ctxIn) + bc.LaunchThread(func(ctx context.Context) { for { err := bc.connect(ctx) if err == nil { - bc.startBackgroundReader(ctx) + bc.startBackgroundReader() break } log.Warn("failed connect to sequencer broadcast, waiting and retrying", "url", bc.websocketUrl, "err", err) @@ -118,8 +118,8 @@ func (bc *BroadcastClient) connect(ctx context.Context) error { return nil } -func (bc *BroadcastClient) startBackgroundReader(ctx context.Context) { - bc.ThreadTracker.LaunchThread(func() { +func (bc *BroadcastClient) startBackgroundReader() { + bc.LaunchThread(func(ctx context.Context) { for { select { case <-ctx.Done(): diff --git a/util/stopwaiter.go b/util/stopwaiter.go index fe1b44cfac..2c7bd5ab92 100644 --- a/util/stopwaiter.go +++ b/util/stopwaiter.go @@ -2,47 +2,108 @@ package util import ( "context" + "errors" "sync" + "sync/atomic" + "time" ) -type ThreadTracker struct { +type StopWaiterSafe struct { + started uint32 + stopped uint32 stopFunc func() + ctx context.Context wg sync.WaitGroup } -func NewThreadTracker(stopFunc func()) *ThreadTracker { - return &ThreadTracker{ - stopFunc: stopFunc, +func (s *StopWaiterSafe) Started() bool { + return atomic.LoadUint32(&s.started) != 0 +} + +func (s *StopWaiterSafe) Stopped() bool { + return atomic.LoadUint32(&s.stopped) != 0 +} + +func (s *StopWaiterSafe) GetContext() context.Context { + return s.ctx +} + +// start-after-start will error, start-after-stop will immediately cancel +func (s *StopWaiterSafe) Start(ctx context.Context) error { + alreadyStarted := atomic.SwapUint32(&s.started, 1) + if alreadyStarted != 0 { + return errors.New("start after start") } + s.ctx, s.stopFunc = context.WithCancel(ctx) + if s.Stopped() { + s.stopFunc() + } + return nil +} + +// Stopping multiple times, even before start, will work +func (s *StopWaiterSafe) StopAndWait() { + atomic.StoreUint32(&s.stopped, 1) + s.stopFunc() + s.wg.Wait() } -// StopAndWait will only wait for thread launced with this function -func (s *ThreadTracker) LaunchThread(foo func()) { +// If stop was already called, thread might silently not be launched +func (s *StopWaiterSafe) LaunchThread(foo func(context.Context)) error { + if !s.Started() { + return errors.New("launch thread before start") + } + if s.Stopped() { + return nil + } s.wg.Add(1) go func() { - foo() + foo(s.ctx) s.wg.Done() }() + return nil } -func (s *ThreadTracker) StopAndWait() { - s.stopFunc() - s.wg.Wait() +// This calls go foo() directly, with the benefit of being easily searchable +func (s *StopWaiterSafe) LaunchUntrackedThread(foo func()) { + go foo() +} + +func (s *StopWaiterSafe) LaunchWithInterval(foo func(context.Context), interval time.Duration) error { + return s.LaunchThread(func(ctx context.Context) { + ThreadMainLoop: + for { + foo(ctx) + timer := time.NewTimer(interval) + select { + case <-ctx.Done(): + timer.Stop() + break ThreadMainLoop + case <-timer.C: + } + } + }) } +// May panic on race conditions instead of returning errors type StopWaiter struct { - ThreadTracker *ThreadTracker + StopWaiterSafe } -func (s *StopWaiter) Start(ctx context.Context) context.Context { - wrapped, cancelfunc := context.WithCancel(ctx) - s.ThreadTracker = NewThreadTracker(cancelfunc) - return wrapped +func (s *StopWaiter) Start(ctx context.Context) { + if err := s.StopWaiterSafe.Start(ctx); err != nil { + panic(err) + } +} + +func (s *StopWaiter) LaunchThread(foo func(context.Context)) { + if err := s.StopWaiterSafe.LaunchThread(foo); err != nil { + panic(err) + } } -// Not thread safe vs Start or itself -func (s *StopWaiter) StopAndWait() { - if s.ThreadTracker != nil { - s.ThreadTracker.StopAndWait() +func (s *StopWaiter) LaunchWithInterval(foo func(context.Context), interval time.Duration) { + if err := s.StopWaiterSafe.LaunchWithInterval(foo, interval); err != nil { + panic(err) } } diff --git a/validator/block_validator.go b/validator/block_validator.go index 1860346d06..16d589f196 100644 --- a/validator/block_validator.go +++ b/validator/block_validator.go @@ -230,7 +230,7 @@ func (v *BlockValidator) prepareBlock(header *types.Header, prevHeader *types.He } func (v *BlockValidator) NewBlock(block *types.Block, prevHeader *types.Header, msg arbstate.MessageWithMetadata) { - v.ThreadTracker.LaunchThread(func() { v.prepareBlock(block.Header(), prevHeader, msg) }) + v.LaunchUntrackedThread(func() { v.prepareBlock(block.Header(), prevHeader, msg) }) } var launchTime = time.Now().Format("2006_01_02__15_04") @@ -508,8 +508,8 @@ func (v *BlockValidator) sendValidations(ctx context.Context) { } } -func (v *BlockValidator) startValidationLoop(ctx context.Context) { - v.ThreadTracker.LaunchThread(func() { +func (v *BlockValidator) startValidationLoop() { + v.LaunchThread(func(ctx context.Context) { for { select { case _, ok := <-v.sendValidationsChan: @@ -558,8 +558,8 @@ func (v *BlockValidator) progressValidated() { } } -func (v *BlockValidator) startProgressLoop(ctx context.Context) { - v.ThreadTracker.LaunchThread(func() { +func (v *BlockValidator) startProgressLoop() { + v.LaunchThread(func(ctx context.Context) { for { select { case _, ok := <-v.checkProgressChan: @@ -589,9 +589,9 @@ func (v *BlockValidator) ProcessBatches(batches map[uint64][]byte) { } func (v *BlockValidator) Start(ctxIn context.Context) error { - ctx := v.StopWaiter.Start(ctxIn) - v.startProgressLoop(ctx) - v.startValidationLoop(ctx) + v.StopWaiter.Start(ctxIn) + v.startProgressLoop() + v.startValidationLoop() return nil } diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index 7482c60760..026fcd0f03 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -52,9 +52,8 @@ func NewClientConnection(conn net.Conn, desc *netpoll.Desc, clientManager *Clien } func (cc *ClientConnection) Start(parentCtx context.Context) { - ctx := cc.StopWaiter.Start(parentCtx) - - go func() { + cc.StopWaiter.Start(parentCtx) + cc.LaunchThread(func(ctx context.Context) { defer close(cc.out) for { select { @@ -76,11 +75,11 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { } } } - }() + }) } func (cc *ClientConnection) StopAndWait() { - if cc.ThreadTracker == nil { + if !cc.Started() { // If client connection never started, need to close channel close(cc.out) } diff --git a/wsbroadcastserver/clientmanager.go b/wsbroadcastserver/clientmanager.go index 89738a8b8a..471054a5a4 100644 --- a/wsbroadcastserver/clientmanager.go +++ b/wsbroadcastserver/clientmanager.go @@ -196,9 +196,9 @@ func (cm *ClientManager) verifyClients() { } func (cm *ClientManager) Start(parentCtx context.Context) { - ctx := cm.StopWaiter.Start(parentCtx) + cm.StopWaiter.Start(parentCtx) - cm.ThreadTracker.LaunchThread(func() { + cm.LaunchThread(func(ctx context.Context) { defer cm.removeAll() pingInterval := time.NewTicker(cm.settings.Ping) From 1b6a9f00c4ae748d923b4f64acd494f41c96159f Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Wed, 23 Feb 2022 13:13:41 -0600 Subject: [PATCH 11/32] nodeinterface.sol and gas estimating retryables --- docs/arbos/Gas.md | 35 ++++++++++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/docs/arbos/Gas.md b/docs/arbos/Gas.md index 3cf7510bdf..694a98ca99 100644 --- a/docs/arbos/Gas.md +++ b/docs/arbos/Gas.md @@ -1,5 +1,9 @@ -# Gas -TODO +# Gas and Fees +Fees exist to + +Gas is a concept that serves two purposes: and buying network resources. + + ## Tips in L2 While tips are not advised for those using the sequencer, which prioritizes transactions on a first-come first-served basis, 3rd-party aggregators may choose to order txes based on tips. A user specifies a tip by setting a gas price in excess of the basefee and will [pay that difference][pay_difference_link] on the amount of gas the tx uses. @@ -9,10 +13,31 @@ A poster receives the tip only when the user has set them as their [preferred ag [pay_difference_link]: https://github.com/OffchainLabs/go-ethereum/blob/edf6a19157606070b6a6660c8decc513e2408cb7/core/state_transition.go#L358 [goes_to_network_link]: https://github.com/OffchainLabs/nitro/blob/c93c806a5cfe99f92a534d3c952a83c3c8b3088c/arbos/tx_processor.go#L262 +## Geth Gas Pool vs ArbOS's + + ## Gas Estimating Retryables -Gas estimation via the node's RPC will +When a transaction schedules another, the subsequent tx's execution [will be included][estimation_inclusion_link] when estimating gas via the node's RPC. A tx's gas estimate, then, can only be found if all the txes succeed at a given gas limit. This is especially important when working with retryables and scheduling redeem attempts. + +Because a call to [`redeem`](#ArbRetryableTx) donates all of the caller's gas, one must use a subcall to limit the amount sent should multiple calls be made. Otherwise the first will take all of the gas and force the second to necessarily fail irrespective of the estimation's gas limit. + +Gas estimation for Retryable submissions is possible via `NodeInterface.sol` and similarly requires the auto-redeem attempt succeed. + +## NodeInterface.sol +To avoid creating new RPC methods for client-side tooling, nitro geth's [`InterceptRPCMessage`][InterceptRPCMessage_link] hook provides an opportunity to swap out the message its handling before deriving a transaction from it. The node [uses this hook][use_hook_link] to detect messages sent to the address `0xc8`, the location of the fictional `NodeInterface` contract specified in [`NodeInterface.sol`][node_interface_link]. + +`NodeInterface` isn't deployed on L2 and only exists in the RPC, but it contains methods callable via `0xc8`. Doing so requires setting the `To` field to `0xc8` and supplying calldata for the method. Below is the list of methods. + +| Method | Info | +|:-----------------------------------------------------------------|:----------------------------------------------------| +| [`estimateRetryableTicket`][estimateRetryableTicket_link]   | Estimates the gas needed for a retryable submission | + + +[estimation_inclusion_link]: https://github.com/OffchainLabs/go-ethereum/blob/edf6a19157606070b6a6660c8decc513e2408cb7/internal/ethapi/api.go#L955 +[use_hook_link]: https://github.com/OffchainLabs/nitro/blob/57e03322926f796f75a21f8735cc64ea0a2d11c3/arbstate/node-interface.go#L17 +[node_interface_link]: https://github.com/OffchainLabs/nitro/blob/master/solgen/src/node_interface/NodeInterface.sol +[estimateRetryableTicket_link]: https://github.com/OffchainLabs/nitro/blob/8ab1d6730164e18d0ca1bd5635ca12aadf36a640/solgen/src/node_interface/NodeInterface.sol#L21 + -## NodeInterface.sol -To avoid creating new RPC methods for client-side tooling, nitro geth's [`InterceptRPCMessage`][InterceptRPCMessage_link] hook provides an opportunity to swap out the message its handling before deriving a transaction from it. ArbOS uses this hook to detect messages sent to the address `0xc8`, the location of a fictional contract ... TODO [InterceptRPCMessage_link]: todo From 27f9cbaa249a4c73a147e99b7f69d74a89130a1b Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Wed, 23 Feb 2022 19:37:51 +0200 Subject: [PATCH 12/32] stopwaiter: update API, testing --- arbnode/batch_poster.go | 25 ++++------ arbnode/delayed_sequencer.go | 16 ++----- arbnode/inbox_reader.go | 17 ++----- arbnode/node.go | 2 +- arbnode/sequencer.go | 28 ++++++----- cmd/node/node.go | 1 + system_tests/block_validator_test.go | 7 +-- system_tests/seqfeed_test.go | 14 ++++-- system_tests/twonodes_test.go | 7 ++- system_tests/twonodeslong_test.go | 9 +++- util/stopwaiter.go | 72 ++++++++++++++++++++-------- validator/block_validator.go | 2 +- validator/staker.go | 58 +++++++++------------- 13 files changed, 137 insertions(+), 121 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 4fc02e6de3..2f59cd627f 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -344,24 +344,17 @@ func (b *BatchPoster) postSequencerBatch() (*types.Transaction, error) { func (b *BatchPoster) Start(ctxIn context.Context) { b.StopWaiter.Start(ctxIn) - b.LaunchThread(func(ctx context.Context) { - for { - tx, err := b.postSequencerBatch() + b.CallIteratively(func(ctx context.Context) time.Duration { + tx, err := b.postSequencerBatch() + if err != nil { + log.Error("error posting batch", "err", err) + } + if tx != nil { + _, err = arbutil.EnsureTxSucceededWithTimeout(ctx, b.client, tx, time.Minute) if err != nil { - log.Error("error posting batch", "err", err) - } - if tx != nil { - _, err = arbutil.EnsureTxSucceededWithTimeout(ctx, b.client, tx, time.Minute) - if err != nil { - log.Error("failed ensuring batch tx succeeded", "err", err) - } - } - select { - case <-ctx.Done(): - return - // TODO: dont use time.After - case <-time.After(b.config.BatchPollDelay): + log.Error("failed ensuring batch tx succeeded", "err", err) } } + return b.config.BatchPollDelay }) } diff --git a/arbnode/delayed_sequencer.go b/arbnode/delayed_sequencer.go index 93ff317df1..917858abbf 100644 --- a/arbnode/delayed_sequencer.go +++ b/arbnode/delayed_sequencer.go @@ -172,17 +172,11 @@ func (d *DelayedSequencer) run(ctx context.Context) error { func (d *DelayedSequencer) Start(ctxIn context.Context) { d.StopWaiter.Start(ctxIn) - d.LaunchThread(func(ctx context.Context) { - for { - err := d.run(ctx) - if err != nil && !errors.Is(err, context.Canceled) { - log.Error("error reading inbox", "err", err) - } - select { - case <-ctx.Done(): - return - case <-time.After(time.Second): - } + d.CallIteratively(func(ctx context.Context) time.Duration { + err := d.run(ctx) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error("error reading inbox", "err", err) } + return time.Second }) } diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index dbf51c3963..bfbb493609 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -63,19 +63,12 @@ func NewInboxReader(tracker *InboxTracker, client arbutil.L1Interface, firstMess func (r *InboxReader) Start(ctxIn context.Context) error { r.StopWaiter.Start(ctxIn) - r.LaunchThread(func(ctx context.Context) { - for { - err := r.run(ctx) - if err != nil && !errors.Is(err, context.Canceled) { - log.Error("error reading inbox", "err", err) - } - select { - case <-ctx.Done(): - return - // TODO: don't use time.After - case <-time.After(time.Second): - } + r.CallIteratively(func(ctx context.Context) time.Duration { + err := r.run(ctx) + if err != nil && !errors.Is(err, context.Canceled) { + log.Error("error reading inbox", "err", err) } + return time.Second }) // Ensure we read the init message before other things start up diff --git a/arbnode/node.go b/arbnode/node.go index 70756782ba..44bd39091e 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -433,7 +433,7 @@ func (n *Node) Start(ctx context.Context) error { return nil } -func (n *Node) StopAndWait(ctx context.Context) { +func (n *Node) StopAndWait() { if n.BroadcastClient != nil { n.BroadcastClient.StopAndWait() } diff --git a/arbnode/sequencer.go b/arbnode/sequencer.go index 9b42cb66a4..ba34932825 100644 --- a/arbnode/sequencer.go +++ b/arbnode/sequencer.go @@ -63,7 +63,12 @@ func (s *Sequencer) PublishTransaction(ctx context.Context, tx *types.Transactio resultChan, ctx, } - return <-resultChan + select { + case res := <-resultChan: + return res + case <-ctx.Done(): + return ctx.Err() + } } func (s *Sequencer) Initialize(ctx context.Context) error { @@ -97,7 +102,7 @@ func postTxFilter(state *arbosState.ArbosState, tx *types.Transaction, sender co return nil } -func (s *Sequencer) sequenceTransactions() { +func (s *Sequencer) sequenceTransactions(ctx context.Context) { timestamp := common.BigToHash(new(big.Int).SetInt64(time.Now().Unix())) l1Block := atomic.LoadUint64(&s.l1BlockNumber) for s.l1Client != nil && l1Block == 0 { @@ -112,7 +117,11 @@ func (s *Sequencer) sequenceTransactions() { for { var queueItem txQueueItem if len(txes) == 0 { - queueItem = <-s.txQueue + select { + case queueItem = <-s.txQueue: + case <-ctx.Done(): + return + } } else { done := false select { @@ -224,16 +233,9 @@ func (s *Sequencer) Start(ctxIn context.Context) error { }) } - s.LaunchThread(func(ctx context.Context) { - for { - s.sequenceTransactions() - select { - // TODO: don't use time.After - case <-time.After(minBlockInterval): - case <-ctx.Done(): - return - } - } + s.CallIteratively(func(ctx context.Context) time.Duration { + s.sequenceTransactions(ctx) + return minBlockInterval }) return nil diff --git a/cmd/node/node.go b/cmd/node/node.go index 3c713f0393..4d2afea8f6 100644 --- a/cmd/node/node.go +++ b/cmd/node/node.go @@ -288,4 +288,5 @@ func main() { } stack.Wait() + node.StopAndWait() } diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index 6f624754b9..12165dbb79 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -21,10 +21,10 @@ import ( func TestBlockValidatorSimple(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node1, l2client, l1info, _, l1client, l1stack := CreateTestNodeOnL1(t, ctx, true) + l2info, nodeA, l2client, l1info, _, l1client, l1stack := CreateTestNodeOnL1(t, ctx, true) defer l1stack.Close() - l2clientB, nodeB := Create2ndNode(t, ctx, node1, l1stack, &l2info.ArbInitData, true) + l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, &l2info.ArbInitData, true) l2info.GenerateAccount("User2") @@ -67,8 +67,9 @@ func TestBlockValidatorSimple(t *testing.T) { lastBlockHeader, err := l2clientB.HeaderByNumber(ctx, nil) Require(t, err) testDeadLine, _ := t.Deadline() + nodeA.StopAndWait() if !nodeB.BlockValidator.WaitForBlock(lastBlockHeader.Number.Uint64(), time.Until(testDeadLine)-time.Second*10) { Fail(t, "did not validate all blocks") } - + nodeB.StopAndWait() } diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index d60d8e46ea..e3777c6f3d 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -49,8 +49,8 @@ func TestSequencerFeed(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info1, _, client1 := CreateTestL2WithConfig(t, ctx, nil, &seqNodeConfig, true) - _, _, client2 := CreateTestL2WithConfig(t, ctx, nil, &clientNodeConfig, false) + l2info1, nodeA, client1 := CreateTestL2WithConfig(t, ctx, nil, &seqNodeConfig, true) + _, nodeB, client2 := CreateTestL2WithConfig(t, ctx, nil, &clientNodeConfig, false) l2info1.GenerateAccount("User2") @@ -69,6 +69,8 @@ func TestSequencerFeed(t *testing.T) { if l2balance.Cmp(big.NewInt(1e12)) != 0 { t.Fatal("Unexpected balance:", l2balance) } + nodeA.StopAndWait() + nodeB.StopAndWait() } func TestLyingSequencer(t *testing.T) { @@ -89,7 +91,7 @@ func TestLyingSequencer(t *testing.T) { nodeConfigC.BatchPoster = false nodeConfigC.Broadcaster = true nodeConfigC.BroadcasterConfig = *newBroadcasterConfigTest(port) - l2clientC, _ := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, &l2infoA.ArbInitData, &nodeConfigC) + l2clientC, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, &l2infoA.ArbInitData, &nodeConfigC) // The client node, connects to lying sequencer's feed nodeConfigB := arbnode.NodeConfigL1Test @@ -97,7 +99,7 @@ func TestLyingSequencer(t *testing.T) { nodeConfigB.BatchPoster = false nodeConfigB.BroadcastClient = true nodeConfigB.BroadcastClientConfig = *newBroadcastClientConfigTest(port) - l2clientB, _ := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, &l2infoA.ArbInitData, &nodeConfigB) + l2clientB, nodeC := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, &l2infoA.ArbInitData, &nodeConfigB) l2infoA.GenerateAccount("FraudUser") l2infoA.GenerateAccount("RealUser") @@ -160,4 +162,8 @@ func TestLyingSequencer(t *testing.T) { if l2balanceRealAcct.Cmp(big.NewInt(1e12)) != 0 { t.Fatal("Unexpected balance:", l2balanceRealAcct) } + + nodeA.StopAndWait() + nodeB.StopAndWait() + nodeC.StopAndWait() } diff --git a/system_tests/twonodes_test.go b/system_tests/twonodes_test.go index a463e51bea..7f40429be9 100644 --- a/system_tests/twonodes_test.go +++ b/system_tests/twonodes_test.go @@ -17,10 +17,10 @@ import ( func TestTwoNodesSimple(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node1, l2clientA, l1info, _, l1client, l1stack := CreateTestNodeOnL1(t, ctx, true) + l2info, nodeA, l2clientA, l1info, _, l1client, l1stack := CreateTestNodeOnL1(t, ctx, true) defer l1stack.Close() - l2clientB, _ := Create2ndNode(t, ctx, node1, l1stack, &l2info.ArbInitData, false) + l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, &l2info.ArbInitData, false) l2info.GenerateAccount("User2") @@ -51,4 +51,7 @@ func TestTwoNodesSimple(t *testing.T) { if l2balance.Cmp(big.NewInt(1e12)) != 0 { Fail(t, "Unexpected balance:", l2balance) } + + nodeA.StopAndWait() + nodeB.StopAndWait() } diff --git a/system_tests/twonodeslong_test.go b/system_tests/twonodeslong_test.go index e1e0ed084a..1463fb3100 100644 --- a/system_tests/twonodeslong_test.go +++ b/system_tests/twonodeslong_test.go @@ -37,10 +37,10 @@ func TestTwoNodesLong(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l2info, node1, l2client, l1info, l1backend, l1client, l1stack := CreateTestNodeOnL1(t, ctx, true) + l2info, nodeA, l2client, l1info, l1backend, l1client, l1stack := CreateTestNodeOnL1(t, ctx, true) defer l1stack.Close() - l2clientB, nodeB := Create2ndNode(t, ctx, node1, l1stack, &l2info.ArbInitData, false) + l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, &l2info.ArbInitData, false) l2info.GenerateAccount("DelayedFaucet") l2info.GenerateAccount("DelayedReceiver") @@ -157,6 +157,9 @@ func TestTwoNodesLong(t *testing.T) { t.Error("owner balance", ownerBalance, "delayed faucet", delayedFaucetBalance) Fail(t, "Unexpected balance") } + + nodeA.StopAndWait() + if nodeB.BlockValidator != nil { lastBlockHeader, err := l2clientB.HeaderByNumber(ctx, nil) Require(t, err) @@ -171,4 +174,6 @@ func TestTwoNodesLong(t *testing.T) { Fail(t, "did not validate all blocks") } } + + nodeB.StopAndWait() } diff --git a/util/stopwaiter.go b/util/stopwaiter.go index 2c7bd5ab92..3e79d98930 100644 --- a/util/stopwaiter.go +++ b/util/stopwaiter.go @@ -4,47 +4,67 @@ import ( "context" "errors" "sync" - "sync/atomic" "time" ) type StopWaiterSafe struct { - started uint32 - stopped uint32 - stopFunc func() + mutex sync.Mutex // protects started, stopped, ctx, stopFunc + started bool + stopped bool ctx context.Context - wg sync.WaitGroup + stopFunc func() + + wg sync.WaitGroup } func (s *StopWaiterSafe) Started() bool { - return atomic.LoadUint32(&s.started) != 0 + s.mutex.Lock() + defer s.mutex.Unlock() + return s.started } func (s *StopWaiterSafe) Stopped() bool { - return atomic.LoadUint32(&s.stopped) != 0 + s.mutex.Lock() + defer s.mutex.Unlock() + return s.stopped } -func (s *StopWaiterSafe) GetContext() context.Context { - return s.ctx +func (s *StopWaiterSafe) GetContext() (context.Context, error) { + s.mutex.Lock() + defer s.mutex.Unlock() + if s.started { + return s.ctx, nil + } + return nil, errors.New("not started") } // start-after-start will error, start-after-stop will immediately cancel func (s *StopWaiterSafe) Start(ctx context.Context) error { - alreadyStarted := atomic.SwapUint32(&s.started, 1) - if alreadyStarted != 0 { + s.mutex.Lock() + defer s.mutex.Unlock() + if s.started { return errors.New("start after start") } + s.started = true s.ctx, s.stopFunc = context.WithCancel(ctx) - if s.Stopped() { + if s.stopped { s.stopFunc() } return nil } +func (s *StopWaiterSafe) StopOnly() { + s.mutex.Lock() + defer s.mutex.Unlock() + if s.started && !s.stopped { + s.stopFunc() + } + s.stopped = true +} + // Stopping multiple times, even before start, will work func (s *StopWaiterSafe) StopAndWait() { - atomic.StoreUint32(&s.stopped, 1) - s.stopFunc() + s.StopOnly() s.wg.Wait() } @@ -69,16 +89,20 @@ func (s *StopWaiterSafe) LaunchUntrackedThread(foo func()) { go foo() } -func (s *StopWaiterSafe) LaunchWithInterval(foo func(context.Context), interval time.Duration) error { +// call function iteratively in a thread. +// input param return value is how long to wait before next invocation +func (s *StopWaiterSafe) CallIteratively(foo func(context.Context) time.Duration) error { return s.LaunchThread(func(ctx context.Context) { - ThreadMainLoop: for { - foo(ctx) + interval := foo(ctx) + if ctx.Err() != nil { + return + } timer := time.NewTimer(interval) select { case <-ctx.Done(): timer.Stop() - break ThreadMainLoop + return case <-timer.C: } } @@ -102,8 +126,16 @@ func (s *StopWaiter) LaunchThread(foo func(context.Context)) { } } -func (s *StopWaiter) LaunchWithInterval(foo func(context.Context), interval time.Duration) { - if err := s.StopWaiterSafe.LaunchWithInterval(foo, interval); err != nil { +func (s *StopWaiter) CallIteratively(foo func(context.Context) time.Duration) { + if err := s.StopWaiterSafe.CallIteratively(foo); err != nil { + panic(err) + } +} + +func (s *StopWaiter) GetContext() context.Context { + ctx, err := s.StopWaiterSafe.GetContext() + if err != nil { panic(err) } + return ctx } diff --git a/validator/block_validator.go b/validator/block_validator.go index 25d17fbafe..c4c654e605 100644 --- a/validator/block_validator.go +++ b/validator/block_validator.go @@ -523,7 +523,7 @@ func (v *BlockValidator) sendValidations(ctx context.Context) { atomic.AddInt32(&v.atomicValidationsRunning, 1) validationEntry.SeqMsgNr = startPos.BatchNumber // validation can take long time. Don't wait for it when shutting down - go v.validate(ctx, validationEntry, startPos, endPos) + v.LaunchThread(func(ctx context.Context) { v.validate(ctx, validationEntry, startPos, endPos) }) v.posNextSend += 1 v.globalPosNextSend = endPos } diff --git a/validator/staker.go b/validator/staker.go index f5ade4e480..28a54519a6 100644 --- a/validator/staker.go +++ b/validator/staker.go @@ -18,6 +18,7 @@ import ( "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/arbstate/arbutil" + "github.com/offchainlabs/arbstate/util" "github.com/pkg/errors" ) @@ -60,6 +61,7 @@ type nodeAndHash struct { type Staker struct { *Validator + util.StopWaiter activeChallenge *ChallengeManager strategy StakerStrategy baseCallOpts bind.CallOpts @@ -129,44 +131,28 @@ func NewStaker( }, nil } -func (s *Staker) Start(ctx context.Context) chan bool { - done := make(chan bool) - go func() { - defer func() { - done <- true - }() - backoff := time.Second - for { - arbTx, err := s.Act(ctx) - if err == nil && arbTx != nil { - _, err = arbutil.EnsureTxSucceededWithTimeout(ctx, s.client, arbTx, txTimeout) - err = errors.Wrap(err, "error waiting for tx receipt") - if err == nil { - log.Info("successfully executed staker transaction", "hash", arbTx.Hash()) - } - } - if err != nil { - log.Warn("error acting as staker", "err", err) - select { - case <-ctx.Done(): - return - case <-time.After(backoff): - } - if backoff < 60*time.Second { - backoff *= 2 - } - continue - } else { - backoff = time.Second - } - select { - case <-time.After(s.config.StakerDelay): - default: - return +func (s *Staker) Start(ctxIn context.Context) { + s.StopWaiter.Start(ctxIn) + backoff := time.Second + s.CallIteratively(func(ctx context.Context) time.Duration { + arbTx, err := s.Act(ctx) + if err == nil && arbTx != nil { + _, err = arbutil.EnsureTxSucceededWithTimeout(ctx, s.client, arbTx, txTimeout) + err = errors.Wrap(err, "error waiting for tx receipt") + if err == nil { + log.Info("successfully executed staker transaction", "hash", arbTx.Hash()) } } - }() - return done + if err == nil { + backoff = time.Second + return s.config.StakerDelay + } + log.Warn("error acting as staker", "err", err) + if backoff < 60*time.Second { + backoff *= 2 + } + return backoff + }) } func (s *Staker) shouldAct(ctx context.Context) bool { From 6b26429a948e1413d3d8682652694c71808f8dee Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Wed, 23 Feb 2022 21:56:19 +0200 Subject: [PATCH 13/32] launchthread: improve atomicity --- util/stopwaiter.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/util/stopwaiter.go b/util/stopwaiter.go index 3e79d98930..18beea787f 100644 --- a/util/stopwaiter.go +++ b/util/stopwaiter.go @@ -70,15 +70,16 @@ func (s *StopWaiterSafe) StopAndWait() { // If stop was already called, thread might silently not be launched func (s *StopWaiterSafe) LaunchThread(foo func(context.Context)) error { - if !s.Started() { - return errors.New("launch thread before start") + ctx, err := s.GetContext() + if err != nil { + return err } if s.Stopped() { return nil } s.wg.Add(1) go func() { - foo(s.ctx) + foo(ctx) s.wg.Done() }() return nil From eb2faac397088826a9a9a710fed7d1d3d39aabc4 Mon Sep 17 00:00:00 2001 From: Harry Kalodner Date: Wed, 23 Feb 2022 17:22:43 -0500 Subject: [PATCH 14/32] Fix tx resultChan channel leak --- arbnode/sequencer.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/arbnode/sequencer.go b/arbnode/sequencer.go index 5bbd314988..75687d1982 100644 --- a/arbnode/sequencer.go +++ b/arbnode/sequencer.go @@ -37,6 +37,11 @@ type txQueueItem struct { ctx context.Context } +func (i *txQueueItem) returnResult(err error) { + i.resultChan <- err + close(i.resultChan) +} + type Sequencer struct { txStreamer *TransactionStreamer txQueue chan txQueueItem @@ -123,17 +128,17 @@ func (s *Sequencer) sequenceTransactions() { } err := queueItem.ctx.Err() if err != nil { - queueItem.resultChan <- err + queueItem.returnResult(err) continue } txBytes, err := queueItem.tx.MarshalBinary() if err != nil { - queueItem.resultChan <- err + queueItem.returnResult(err) continue } if len(txBytes) > int(maxTxDataSize) { // This tx is too large - queueItem.resultChan <- core.ErrOversizedData + queueItem.returnResult(core.ErrOversizedData) continue } if totalBatchSize+len(txBytes) > int(maxTxDataSize) { @@ -143,7 +148,7 @@ func (s *Sequencer) sequenceTransactions() { select { case s.txQueue <- queueItem: default: - queueItem.resultChan <- core.ErrOversizedData + queueItem.returnResult(core.ErrOversizedData) } break } @@ -174,7 +179,7 @@ func (s *Sequencer) sequenceTransactions() { if err != nil { log.Error("error sequencing transactions", "err", err) for _, queueItem := range queueItems { - queueItem.resultChan <- err + queueItem.returnResult(err) } return } @@ -191,7 +196,7 @@ func (s *Sequencer) sequenceTransactions() { default: } } - queueItem.resultChan <- err + queueItem.returnResult(err) } } From d97cc00bb04c18f6e6370f0f4acef1e04c1683ed Mon Sep 17 00:00:00 2001 From: Harry Kalodner Date: Wed, 23 Feb 2022 17:35:06 -0500 Subject: [PATCH 15/32] Update go-ethereum --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 2ad6c592e2..51d99b22c9 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 2ad6c592e2cd8a4d3248c2faa3b7cb0c836577e4 +Subproject commit 51d99b22c92c7995009ac2294d2d4d57156db2ee From 98f9d79eb2e53dbab6f57353f845b850433c5ff8 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Wed, 23 Feb 2022 21:29:56 -0600 Subject: [PATCH 16/32] gas-related docs & changes revealed by docs --- arbnode/node.go | 2 +- arbos/block_processor.go | 19 ++++++++++++++----- arbos/l2pricing/l2pricing.go | 2 +- docs/arbos/ArbOS.md | 17 ++++++++++++++--- docs/arbos/Gas.md | 22 +++++++++------------- docs/arbos/Geth.md | 21 +++++++++++++++++---- docs/arbos/Precompiles.md | 20 ++++++++++---------- 7 files changed, 66 insertions(+), 37 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index baf250ada7..91cf914677 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -553,7 +553,7 @@ func WriteOrTestGenblock(chainDb ethdb.Database, initData statetransfer.InitData Time: timestamp, ParentHash: prevHash, Extra: []byte("ArbitrumMainnet"), - GasLimit: l2pricing.L2GasLimit, + GasLimit: l2pricing.GethBlockGasLimit, GasUsed: 0, BaseFee: big.NewInt(l2pricing.InitialBaseFeeWei), Difficulty: genDifficulty, diff --git a/arbos/block_processor.go b/arbos/block_processor.go index 6f964c4db5..7f296dfa21 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -64,7 +64,7 @@ func createNewHeader(prevHeader *types.Header, l1info *L1Info, state *arbosState Bloom: [256]byte{}, // Filled in later Difficulty: big.NewInt(1), Number: blockNumber, - GasLimit: l2pricing.L2GasLimit, + GasLimit: l2pricing.GethBlockGasLimit, GasUsed: 0, Time: timestamp, Extra: []byte{}, // Unused @@ -163,7 +163,7 @@ func ProduceBlockAdvanced( userTxsCompleted := 0 // We'll check that the block can fit each message, so this pool is set to not run out - gethGas := core.GasPool(1 << 63) + gethGas := core.GasPool(l2pricing.GethBlockGasLimit) for len(txes) > 0 || len(redeems) > 0 { // repeatedly process the next tx, doing redeems created along the way in FIFO order @@ -233,6 +233,10 @@ func ProduceBlockAdvanced( } computeGas := tx.Gas() - dataGas + if computeGas < params.TxGas { + // ensure at least TxGas is left in the pool before trying a state transition + computeGas = params.TxGas + } if computeGas > gasLeft && isUserTx && userTxsCompleted > 0 { return nil, nil, core.ErrGasLimitReached @@ -241,8 +245,6 @@ func ProduceBlockAdvanced( snap := statedb.Snapshot() statedb.Prepare(tx.Hash(), len(receipts)) // the number of successful state transitions - gasLeft -= computeGas - receipt, result, err := core.ApplyTransaction( chainConfig, chainContext, @@ -315,9 +317,16 @@ func ProduceBlockAdvanced( } } + computeUsed := gasUsed - dataGas + if computeUsed < params.TxGas { + // a tx, even if invalid, must at least reduce the pool by TxGas + computeUsed = params.TxGas + } + gasLeft -= computeUsed + complete = append(complete, tx) receipts = append(receipts, receipt) - gasLeft -= gasUsed - dataGas + if isUserTx { userTxsCompleted++ } diff --git a/arbos/l2pricing/l2pricing.go b/arbos/l2pricing/l2pricing.go index 6db45d49c7..6583095813 100644 --- a/arbos/l2pricing/l2pricing.go +++ b/arbos/l2pricing/l2pricing.go @@ -36,7 +36,7 @@ const ( maxPerBlockGasLimitOffset ) -const L2GasLimit = 1 << 63 +const GethBlockGasLimit = 1 << 63 func InitializeL2PricingState(sto *storage.Storage) error { _ = sto.SetUint64ByUint64(gasPoolOffset, InitialGasPoolSeconds*InitialSpeedLimitPerSecond) diff --git a/docs/arbos/ArbOS.md b/docs/arbos/ArbOS.md index dacbc222f2..ae24229b23 100644 --- a/docs/arbos/ArbOS.md +++ b/docs/arbos/ArbOS.md @@ -36,7 +36,7 @@ An [`L1IncomingMessage`][L1IncomingMessage_link] represents an incoming sequence A Retryable is a transaction whose *submission* is separable from its *execution*. A retryable can be submitted for a fixed cost (dependent only on its calldata size) paid at L1. If the L1 transition to request submission succeeds (i.e. does not revert) then the submission of the Retryable to the L2 state is guaranteed to succeed. -In the common case a Retryable's submission is followed by an attempt to execute the transaction. If the attempt fails or isn't scheduled after the Retryable is submitted, anyone can try to *redeem* it, by calling the [`redeem`](Precompiles.md#ArbRetryableTx) method of the [`ArbRetryableTx`](Precompiles.md#ArbRetryableTx) precompile. The party requesting the redeem provides the gas that will be used to execute the Retryable. If execution of the Retryable succeeds, the Retryable is deleted. If execution fails, the Retryable continues to exist and further attempts can be made to redeem it. If a fixed period (currently one week) elapses without a successful redeem, the Retryable expires and will be [automatically discarded][discard_link], unless some party has paid a fee to [*renew*][renew_link] the Retryable for another full period. A Retryable can live indefinitely as long as it is renewed each time before it expires. +In the common case a Retryable's submission is followed by an attempt to execute the transaction. If the attempt fails or isn't scheduled after the Retryable is submitted, anyone can try to *redeem* it, by calling the [`redeem`](Precompiles.md#ArbRetryableTx) method of the [`ArbRetryableTx`](Precompiles.md#ArbRetryableTx) precompile. The party requesting the redeem provides the gas that will be used to execute the Retryable. If execution of the Retryable succeeds, the Retryable is deleted. If execution fails, the Retryable continues to exist and further attempts can be made to redeem it. If a fixed period (currently one week) elapses without a successful redeem, the Retryable expires and will be [automatically discarded][discard_link], unless some party has paid a fee to [*renew*][renew_link] the Retryable for another full period. A Retryable can live indefinitely as long as it is renewed each time before it expires. [discard_link]: https://github.com/OffchainLabs/nitro/blob/fa36a0f138b8a7e684194f9840315d80c390f324/arbos/retryables/retryable.go#L262 [renew_link]: https://github.com/OffchainLabs/nitro/blob/fa36a0f138b8a7e684194f9840315d80c390f324/arbos/retryables/retryable.go#L207 @@ -104,7 +104,7 @@ This component maintains the last 256 L1 block hashes in a circular buffer. This [ArbitrumInternalTx_link]: https://github.com/OffchainLabs/nitro/blob/fa36a0f138b8a7e684194f9840315d80c390f324/arbos/block_processor.go#L116 [TxProcessor_link]: https://github.com/OffchainLabs/nitro/blob/fa36a0f138b8a7e684194f9840315d80c390f324/arbos/tx_processor.go#L33 -### [`l1PricingState`][l1PricingState_link] +### [`l1PricingState`][l1PricingState_link] In addition to supporting the [`ArbAggregator precompile`](Precompiles.md#ArbAggregator), the L1 pricing state provides tools for determining the L1 component of a transaction's gas costs. Aggregators, whose compressed batches are the messages ArbOS uses to build L2 blocks, inform ArbOS of their compression ratios so that L2 fees can be fairly allocated between the network fee account and the aggregator posting a given transaction. @@ -114,12 +114,23 @@ The L1 pricing state also keeps a running estimate of the L1 gas price, which up [l1PricingState_link]: https://github.com/OffchainLabs/nitro/blob/fa36a0f138b8a7e684194f9840315d80c390f324/arbos/l1pricing/l1pricing.go#L16 -### [`l2PricingState`][l2PricingState_link] +### [`l2PricingState`][l2PricingState_link] The L2 pricing state tracks L2 resource usage to determine a reasonable L2 gas price. This process considers a variety of factors, including user demand, the state of geth, and the computational speed limit. The primary mechanism for doing so consists of a pair of pools, one larger than the other, that drain as L2-specific resources are consumed and filled as time passes. L1-specific resources like L1 calldata are not tracked by the pools, as they have little bearing on the actual work done by the network actors that the speed limit is meant to keep stable and synced. While much of this state is accessible through the [`ArbGasInfo`](Precompiles.md#ArbGasInfo) and [`ArbOwner`](Precompiles.md#ArbOwner) precompiles, most changes are automatic and happen during [block production][block_production_link] and [the transaction hooks](Geth.md#Hooks). Each of an incoming message's txes removes from the pool the L2 component of the gas it uses, and afterward the message's timestamp [informs the pricing mechanism][notify_pricer_link] of the time that's passed as ArbOS [finalizes the block][finalizeblock_link]. +ArbOS's larger gas pool [determines][maintain_limit_link] the per-block gas limit, setting a dynamic [upper limit][per_block_limit_link] on the amount of compute gas an L2 block may have. This limit is always enforced, though for the [first transaction][first_transaction_link] it's done in the [GasChargingHook](Geth.md#GasChargingHook) to avoid sharp decreases in the L1 gas price from over-inflating the compute component purchased to above the gas limit. This improves UX by allowing the first tx to succeed rather than requiring a resubmission. Because the first tx lowers the amount of space left in the block, subsequent transactions do not employ this strategy and may fail from such compute-component inflation. This is acceptable because such txes are only present in cases where the system is under heavy load and the result is that the user's tx is dropped without charges since the state transition fails early. Those trusting the sequencer can rely on the tx being automatically resubmitted in such a scenario. + +ArbOS's per-block gas limit is distinct from geth's block limit, which ArbOS [sets sufficiently high][geth_pool_set_link] so as to never run out. This is safe since geth's block limit exists to constrain the amount of work done per block, which ArbOS already does via its own per-block gas limit. Though it'll never run out, a block's txes use the [same geth gas pool][same_geth_pool_link] to maintain the invariant that the pool decreases monotonically after each tx. Block headers [use the geth block limit][use_geth_pool_link] for internal consistency and to ensure gas estimation works. + [l2PricingState_link]: https://github.com/OffchainLabs/nitro/blob/fa36a0f138b8a7e684194f9840315d80c390f324/arbos/l2pricing/l2pricing.go#L14 [block_production_link]: https://github.com/OffchainLabs/nitro/blob/fa36a0f138b8a7e684194f9840315d80c390f324/arbos/block_processor.go#L77 [notify_pricer_link]: https://github.com/OffchainLabs/nitro/blob/fa36a0f138b8a7e684194f9840315d80c390f324/arbos/block_processor.go#L336 + +[maintain_limit_link]: https://github.com/OffchainLabs/nitro/blob/2ba6d1aa45abcc46c28f3d4f560691ce5a396af8/arbos/l2pricing/pools.go#L98 +[per_block_limit_link]: https://github.com/OffchainLabs/nitro/blob/2ba6d1aa45abcc46c28f3d4f560691ce5a396af8/arbos/block_processor.go#L146 +[first_transaction_link]: https://github.com/OffchainLabs/nitro/blob/2ba6d1aa45abcc46c28f3d4f560691ce5a396af8/arbos/block_processor.go#L237 +[geth_pool_set_link]: https://github.com/OffchainLabs/nitro/blob/2ba6d1aa45abcc46c28f3d4f560691ce5a396af8/arbos/block_processor.go#L166 +[same_geth_pool_link]: https://github.com/OffchainLabs/nitro/blob/2ba6d1aa45abcc46c28f3d4f560691ce5a396af8/arbos/block_processor.go#L199 +[use_geth_pool_link]: https://github.com/OffchainLabs/nitro/blob/2ba6d1aa45abcc46c28f3d4f560691ce5a396af8/arbos/block_processor.go#L67 diff --git a/docs/arbos/Gas.md b/docs/arbos/Gas.md index 694a98ca99..bcb6e6d71f 100644 --- a/docs/arbos/Gas.md +++ b/docs/arbos/Gas.md @@ -1,10 +1,12 @@ # Gas and Fees -Fees exist to +A transaction's fees include the sum Gas is a concept that serves two purposes: and buying network resources. + + ## Tips in L2 While tips are not advised for those using the sequencer, which prioritizes transactions on a first-come first-served basis, 3rd-party aggregators may choose to order txes based on tips. A user specifies a tip by setting a gas price in excess of the basefee and will [pay that difference][pay_difference_link] on the amount of gas the tx uses. @@ -13,15 +15,15 @@ A poster receives the tip only when the user has set them as their [preferred ag [pay_difference_link]: https://github.com/OffchainLabs/go-ethereum/blob/edf6a19157606070b6a6660c8decc513e2408cb7/core/state_transition.go#L358 [goes_to_network_link]: https://github.com/OffchainLabs/nitro/blob/c93c806a5cfe99f92a534d3c952a83c3c8b3088c/arbos/tx_processor.go#L262 -## Geth Gas Pool vs ArbOS's - - ## Gas Estimating Retryables When a transaction schedules another, the subsequent tx's execution [will be included][estimation_inclusion_link] when estimating gas via the node's RPC. A tx's gas estimate, then, can only be found if all the txes succeed at a given gas limit. This is especially important when working with retryables and scheduling redeem attempts. -Because a call to [`redeem`](#ArbRetryableTx) donates all of the caller's gas, one must use a subcall to limit the amount sent should multiple calls be made. Otherwise the first will take all of the gas and force the second to necessarily fail irrespective of the estimation's gas limit. +Because a call to [`redeem`](#ArbRetryableTx) donates all of the call's gas, doing multiple requires limiting the amount of gas provided to each subcall. Otherwise the first will take all of the gas and force the second to necessarily fail irrespective of the estimation's gas limit. -Gas estimation for Retryable submissions is possible via `NodeInterface.sol` and similarly requires the auto-redeem attempt succeed. +Gas estimation for Retryable submissions is possible via [`NodeInterface.sol`][node_interface_link] and similarly requires the auto-redeem attempt succeed. + +[estimation_inclusion_link]: https://github.com/OffchainLabs/go-ethereum/blob/edf6a19157606070b6a6660c8decc513e2408cb7/internal/ethapi/api.go#L955 +[node_interface_link]: https://github.com/OffchainLabs/nitro/blob/master/solgen/src/node_interface/NodeInterface.sol ## NodeInterface.sol To avoid creating new RPC methods for client-side tooling, nitro geth's [`InterceptRPCMessage`][InterceptRPCMessage_link] hook provides an opportunity to swap out the message its handling before deriving a transaction from it. The node [uses this hook][use_hook_link] to detect messages sent to the address `0xc8`, the location of the fictional `NodeInterface` contract specified in [`NodeInterface.sol`][node_interface_link]. @@ -32,12 +34,6 @@ To avoid creating new RPC methods for client-side tooling, nitro geth's [`Interc |:-----------------------------------------------------------------|:----------------------------------------------------| | [`estimateRetryableTicket`][estimateRetryableTicket_link]   | Estimates the gas needed for a retryable submission | - -[estimation_inclusion_link]: https://github.com/OffchainLabs/go-ethereum/blob/edf6a19157606070b6a6660c8decc513e2408cb7/internal/ethapi/api.go#L955 +[InterceptRPCMessage_link]: https://github.com/OffchainLabs/go-ethereum/blob/f31341b3dfa987719b012bc976a6f4fe3b8a1221/internal/ethapi/api.go#L929 [use_hook_link]: https://github.com/OffchainLabs/nitro/blob/57e03322926f796f75a21f8735cc64ea0a2d11c3/arbstate/node-interface.go#L17 -[node_interface_link]: https://github.com/OffchainLabs/nitro/blob/master/solgen/src/node_interface/NodeInterface.sol [estimateRetryableTicket_link]: https://github.com/OffchainLabs/nitro/blob/8ab1d6730164e18d0ca1bd5635ca12aadf36a640/solgen/src/node_interface/NodeInterface.sol#L21 - - - -[InterceptRPCMessage_link]: todo diff --git a/docs/arbos/Geth.md b/docs/arbos/Geth.md index 5431ae9aa8..30bc7e71f8 100644 --- a/docs/arbos/Geth.md +++ b/docs/arbos/Geth.md @@ -26,6 +26,7 @@ Below is [`ApplyTransaction`][ApplyTransaction_link]'s callgraph, with additiona * [`PushCaller`](#PushCaller) * [`PopCaller`](#PopCaller) * `core.StateTransition.refundGas` + * [`ForceRefundGas`](#ForceRefundGas) * [`NonrefundableGas`](#NonrefundableGas) * [`EndTxHook`](#EndTxHook) * added return parameter: `transactionResult` @@ -48,12 +49,23 @@ The hook returns `true` for both of these transaction types, signifying that the This fallible hook ensures the user has enough funds to pay their poster's L1 calldata costs. If not, the tx is reverted and the [`EVM`][EVM_link] does not start. In the common case that the user can pay, the amount paid for calldata is set aside for later reimbursement of the poster. All other fees go to the network account, as they represent the tx's burden on validators and nodes more generally. +If the user attempts to purchase compute gas in excess of ArbOS's per-block gas limit, the difference is [set aside][difference_set_aside_link] and [refunded later][refunded_later_link] via [`ForceRefundGas`](#ForceRefundGas) so that only the gas limit is used. Note that the limit observed may not be the same as that seen [at the start of the block][that_seen_link] if ArbOS's larger gas pool falls below the [`MaxPerBlockGasLimit`][max_perblock_limit_link] while processing the block's previous txes. + +[difference_set_aside_link]: https://github.com/OffchainLabs/nitro/blob/2ba6d1aa45abcc46c28f3d4f560691ce5a396af8/arbos/tx_processor.go#L272 +[refunded_later_link]: https://github.com/OffchainLabs/go-ethereum/blob/f31341b3dfa987719b012bc976a6f4fe3b8a1221/core/state_transition.go#L389 +[that_seen_link]: https://github.com/OffchainLabs/nitro/blob/2ba6d1aa45abcc46c28f3d4f560691ce5a396af8/arbos/block_processor.go#L146 +[max_perblock_limit_link]: https://github.com/OffchainLabs/nitro/blob/2ba6d1aa45abcc46c28f3d4f560691ce5a396af8/arbos/l2pricing/pools.go#L100 + ### [`PushCaller`][PushCaller_link] and [`PopCaller`][PopCaller_link] These hooks track the callers within the EVM callstack, pushing and popping as calls are made and complete. This provides [`ArbSys`](Precompiles.md#ArbSys) with info about the callstack, which it uses to implement the methods [`WasMyCallersAddressAliased`](Precompiles.md#ArbSys) and [`MyCallersAddressWithoutAliasing`](Precompiles.md#ArbSys). ### [`L1BlockHash`][L1BlockHash_link] and [`L1BlockNumber`][L1BlockNumber_link] In arbitrum, the BlockHash and Number operations return data that relies on underlying L1 blocks intead of L2 blocks, to accomendate the normal use-case of these opcodes, which often assume ethereum-like time passes between different blocks. The L1BlockHash and L1BlockNumber hooks have the required data for these operations. +### [`ForceRefundGas`][ForceRefundGas_link] + +This hook allows ArbOS to add additional refunds to the user's tx. This is currently only used to refund any compute gas purchased in excess of ArbOS's per-block gas limit during the [`GasChargingHook`](#GasChargingHook). + ### [`NonrefundableGas`][NonrefundableGas_link] Because poster costs come at the expense of L1 aggregators and not the network more broadly, the amounts paid for L1 calldata should not be refunded. This hook provides geth access to the equivalent amount of L2 gas the poster's cost equals, ensuring this amount is not reimbursed for network-incentivized behaviors like freeing storage slots. @@ -70,6 +82,7 @@ The [`EndTxHook`][EndTxHook_link] is called after the [`EVM`][EVM_link] has retu [GasChargingHook_link]: https://github.com/OffchainLabs/nitro/blob/fa36a0f138b8a7e684194f9840315d80c390f324/arbos/tx_processor.go#L205 [PushCaller_link]: https://github.com/OffchainLabs/nitro/blob/fa36a0f138b8a7e684194f9840315d80c390f324/arbos/tx_processor.go#L60 [PopCaller_link]: https://github.com/OffchainLabs/nitro/blob/fa36a0f138b8a7e684194f9840315d80c390f324/arbos/tx_processor.go#L64 +[ForceRefundGas_link]: https://github.com/OffchainLabs/nitro/blob/2ba6d1aa45abcc46c28f3d4f560691ce5a396af8/arbos/tx_processor.go#L291 [NonrefundableGas_link]: https://github.com/OffchainLabs/nitro/blob/fa36a0f138b8a7e684194f9840315d80c390f324/arbos/tx_processor.go#L248 [EndTxHook_link]: https://github.com/OffchainLabs/nitro/blob/fa36a0f138b8a7e684194f9840315d80c390f324/arbos/tx_processor.go#L255 [L1BlockHash_link]: https://github.com/OffchainLabs/nitro/blob/df5344a48f4a24173b9a3794318079a869aae58b/arbos/tx_processor.go#L407 @@ -168,11 +181,11 @@ A [geth message][geth_message_link] may be processed for various purposes. For e A message [derived from a transaction][AsMessage_link] will carry that transaction in a field accessible via its [`UnderlyingTransaction`][underlying_link] method. While this is related to the way a given message is used, they are not one-to-one. The table below shows the various run modes and whether each could have an underlying transaction. -| Run Mode | Scope | Carries an Underlying Tx? | -|:-----------------------------------------|:------------------------|:-----------------------------------------------------------------------------| -| [`MessageCommitMode`][MC0] | state transition   | always | +| Run Mode | Scope | Carries an Underlying Tx? | +|:-----------------------------------------|:------------------------|:-----------------------------------------------------------------------------------| +| [`MessageCommitMode`][MC0] | state transition   | always | | [`MessageGasEstimationMode`][MC1]   | gas estimation | when created via [`NodeInterface.sol`](Gas.md#NodeInterface.sol) or when scheduled | -| [`MessageEthcallMode`][MC2] | eth_calls | never | +| [`MessageEthcallMode`][MC2] | eth_calls | never | [MC0]: https://github.com/OffchainLabs/go-ethereum/blob/1e9c9b86135dafebf7ab84641a5674e4249ee849/core/types/transaction.go#L648 [MC1]: https://github.com/OffchainLabs/go-ethereum/blob/1e9c9b86135dafebf7ab84641a5674e4249ee849/core/types/transaction.go#L649 diff --git a/docs/arbos/Precompiles.md b/docs/arbos/Precompiles.md index 3e896181e5..3595d7259f 100644 --- a/docs/arbos/Precompiles.md +++ b/docs/arbos/Precompiles.md @@ -335,13 +335,13 @@ Provides non-owners with info about the current chain owners. Provides methods for managing retryables. The model has been adjusted for Nitro, most notably in terms of how retry transactions are scheduled. For more information on retryables, please see [the retryable documentation](ArbOS.md#Retryables). -| Methods | | Nitro changes | -|:---------------------------------------------------------------------------|:----------------------------------------------------------------------------|:-----------------------| -| [][RTs0] [`Cancel`][RT0]`(ticket)` | Cancel the ticket and refund its callvalue to its beneficiary | | -| [][RTs1] [`GetBeneficiary`][RT1]`(ticket)`   | Gets the beneficiary of the ticket | | -| [][RTs2] [`GetLifetime`][RT2]`()` | Gets the default lifetime period a retryable has at creation | Reverts when not found | -| [][RTs3] [`GetTimeout`][RT3]`(ticket)` | Gets the timestamp for when ticket will expire | | -| [][RTs4] [`Keepalive`][RT4]`(ticket)` | Adds one lifetime period to the ticket's expiry | Doesn't add callvalue | +| Methods | | Nitro changes | +|:---------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|:-----------------------| +| [][RTs0] [`Cancel`][RT0]`(ticket)` | Cancel the ticket and refund its callvalue to its beneficiary | | +| [][RTs1] [`GetBeneficiary`][RT1]`(ticket)`   | Gets the beneficiary of the ticket | | +| [][RTs2] [`GetLifetime`][RT2]`()` | Gets the default lifetime period a retryable has at creation | Reverts when not found | +| [][RTs3] [`GetTimeout`][RT3]`(ticket)` | Gets the timestamp for when ticket will expire | | +| [][RTs4] [`Keepalive`][RT4]`(ticket)` | Adds one lifetime period to the ticket's expiry | Doesn't add callvalue | | [][RTs5] [`Redeem`][RT5]`(ticket)` | Schedule an attempt to redeem the retryable, donating all of the call's gas   | Happens next tx | [RT0]: https://github.com/OffchainLabs/nitro/blob/8f2988fcb2683c7b39de2635af22f1f8d6fa9c79/precompiles/ArbRetryableTx.go#L184 @@ -445,10 +445,10 @@ Provides system-level functionality for interacting with L1 and understanding th [Ses0]: https://github.com/OffchainLabs/nitro/blob/f11ba39cf91ee1fe1b5f6b67e8386e5efd147667/solgen/src/precompiles/ArbSys.sol#L107 [Ses1]: https://github.com/OffchainLabs/nitro/blob/f11ba39cf91ee1fe1b5f6b67e8386e5efd147667/solgen/src/precompiles/ArbSys.sol#L126 -| Removed Methods | | -| :----------------------------------------------------------- | :----------------------------------------------------------- | +| Removed Methods | | +|:---------------------------------------------------------------------------------|:------------------------------------------------------------------| | [][Srs0] [`GetStorageAt`][Sr0]`(account, index)`   | Nitro doesn't need this introspection, and users couldn't call it | -| [][Srs1] [`GetTransactionCount`][Sr1]`(account)` | Nitro doesn't need this introspection, and users couldn't call it | +| [][Srs1] [`GetTransactionCount`][Sr1]`(account)` | Nitro doesn't need this introspection, and users couldn't call it | [Sr0]: https://github.com/OffchainLabs/arb-os/blob/89e36db597c4857a4dac3efd7cc01b13c7845cc0/arb_os/arbsys.mini#L335 [Sr1]: https://github.com/OffchainLabs/arb-os/blob/89e36db597c4857a4dac3efd7cc01b13c7845cc0/arb_os/arbsys.mini#L315 From da7b3ddd9e44c4720af693679df58a0ed263782a Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 23 Feb 2022 23:07:37 -0600 Subject: [PATCH 17/32] Integrate L1 validator / staker into arbnode and test-node.bash --- arbnode/node.go | 87 +++++++++++++++++++++++------ cmd/deploy/deploy.go | 3 +- cmd/node/node.go | 27 ++++++++- docker-compose.yaml | 5 +- system_tests/common_test.go | 8 +-- system_tests/full_challenge_test.go | 4 +- system_tests/staker_test.go | 36 +++++------- test-node.bash | 5 +- validator/l1_validator.go | 71 +++++++++++------------ validator/rollup_watcher.go | 19 ++++--- validator/staker.go | 61 ++++++++++---------- 11 files changed, 196 insertions(+), 130 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 1cc03fcd7b..c47bfd4115 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -43,11 +43,13 @@ import ( ) type RollupAddresses struct { - Bridge common.Address - Inbox common.Address - SequencerInbox common.Address - Rollup common.Address - DeployedAt uint64 + Bridge common.Address + Inbox common.Address + SequencerInbox common.Address + Rollup common.Address + ValidatorUtils common.Address + ValidatorWalletCreator common.Address + DeployedAt uint64 } func andTxSucceeded(ctx context.Context, l1client arbutil.L1Interface, txTimeout time.Duration, tx *types.Transaction, err error) error { @@ -197,7 +199,7 @@ func deployRollupCreator(ctx context.Context, client arbutil.L1Interface, auth * return rollupCreator, rollupCreatorAddress, nil } -func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *bind.TransactOpts, sequencer common.Address, wasmModuleRoot common.Hash, txTimeout time.Duration) (*RollupAddresses, error) { +func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *bind.TransactOpts, sequencer common.Address, authorizeValidators uint64, wasmModuleRoot common.Hash, txTimeout time.Duration) (*RollupAddresses, error) { rollupCreator, rollupCreatorAddress, err := deployRollupCreator(ctx, l1client, deployAuth, txTimeout) if err != nil { return nil, err @@ -253,12 +255,40 @@ func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *b return nil, fmt.Errorf("error setting is batch poster: %w", err) } + validatorUtils, tx, _, err := rollupgen.DeployValidatorUtils(deployAuth, l1client) + err = andTxSucceeded(ctx, l1client, txTimeout, tx, err) + if err != nil { + return nil, fmt.Errorf("validator utils deploy error: %w", err) + } + + validatorWalletCreator, tx, _, err := rollupgen.DeployValidatorWalletCreator(deployAuth, l1client) + err = andTxSucceeded(ctx, l1client, txTimeout, tx, err) + if err != nil { + return nil, fmt.Errorf("validator utils deploy error: %w", err) + } + + var allowValidators []bool + var validatorAddrs []common.Address + for i := uint64(1); i <= authorizeValidators; i++ { + validatorAddrs = append(validatorAddrs, crypto.CreateAddress(validatorWalletCreator, i)) + allowValidators = append(allowValidators, true) + } + if len(validatorAddrs) > 0 { + tx, err = rollup.SetValidator(deployAuth, validatorAddrs, allowValidators) + err = andTxSucceeded(ctx, l1client, txTimeout, tx, err) + if err != nil { + return nil, fmt.Errorf("error setting validator: %w", err) + } + } + return &RollupAddresses{ - Bridge: info.DelayedBridge, - Inbox: info.InboxAddress, - SequencerInbox: info.SequencerInbox, - DeployedAt: receipt.BlockNumber.Uint64(), - Rollup: info.RollupAddress, + Bridge: info.DelayedBridge, + Inbox: info.InboxAddress, + SequencerInbox: info.SequencerInbox, + DeployedAt: receipt.BlockNumber.Uint64(), + Rollup: info.RollupAddress, + ValidatorUtils: validatorUtils, + ValidatorWalletCreator: validatorWalletCreator, }, nil } @@ -276,10 +306,12 @@ type NodeConfig struct { BroadcasterConfig wsbroadcastserver.BroadcasterConfig BroadcastClient bool BroadcastClientConfig broadcastclient.BroadcastClientConfig + L1Validator bool + L1ValidatorConfig validator.L1ValidatorConfig } -var NodeConfigDefault = NodeConfig{arbitrum.DefaultConfig, true, DefaultInboxReaderConfig, DefaultDelayedSequencerConfig, true, DefaultBatchPosterConfig, "", false, validator.DefaultBlockValidatorConfig, false, wsbroadcastserver.DefaultBroadcasterConfig, false, broadcastclient.DefaultBroadcastClientConfig} -var NodeConfigL1Test = NodeConfig{arbitrum.DefaultConfig, true, TestInboxReaderConfig, TestDelayedSequencerConfig, true, TestBatchPosterConfig, "", false, validator.DefaultBlockValidatorConfig, false, wsbroadcastserver.DefaultBroadcasterConfig, false, broadcastclient.DefaultBroadcastClientConfig} +var NodeConfigDefault = NodeConfig{arbitrum.DefaultConfig, true, DefaultInboxReaderConfig, DefaultDelayedSequencerConfig, true, DefaultBatchPosterConfig, "", false, validator.DefaultBlockValidatorConfig, false, wsbroadcastserver.DefaultBroadcasterConfig, false, broadcastclient.DefaultBroadcastClientConfig, false, validator.DefaultL1ValidatorConfig} +var NodeConfigL1Test = NodeConfig{arbitrum.DefaultConfig, true, TestInboxReaderConfig, TestDelayedSequencerConfig, true, TestBatchPosterConfig, "", false, validator.DefaultBlockValidatorConfig, false, wsbroadcastserver.DefaultBroadcasterConfig, false, broadcastclient.DefaultBroadcastClientConfig, false, validator.DefaultL1ValidatorConfig} var NodeConfigL2Test = NodeConfig{ArbConfig: arbitrum.DefaultConfig, L1Reader: false} type Node struct { @@ -293,11 +325,12 @@ type Node struct { DelayedSequencer *DelayedSequencer BatchPoster *BatchPoster BlockValidator *validator.BlockValidator + Staker *validator.Staker BroadcastServer *broadcaster.Broadcaster BroadcastClient *broadcastclient.BroadcastClient } -func CreateNode(stack *node.Node, chainDb ethdb.Database, config *NodeConfig, l2BlockChain *core.BlockChain, l1client arbutil.L1Interface, deployInfo *RollupAddresses, sequencerTxOpt *bind.TransactOpts) (*Node, error) { +func CreateNode(stack *node.Node, chainDb ethdb.Database, config *NodeConfig, l2BlockChain *core.BlockChain, l1client arbutil.L1Interface, deployInfo *RollupAddresses, sequencerTxOpt *bind.TransactOpts, validatorTxOpts *bind.TransactOpts) (*Node, error) { var broadcastServer *broadcaster.Broadcaster if config.Broadcaster { broadcastServer = broadcaster.NewBroadcaster(config.BroadcasterConfig) @@ -334,7 +367,7 @@ func CreateNode(stack *node.Node, chainDb ethdb.Database, config *NodeConfig, l2 broadcastClient = broadcastclient.NewBroadcastClient(config.BroadcastClientConfig.URL, nil, config.BroadcastClientConfig.Timeout, txStreamer) } if !config.L1Reader { - return &Node{backend, arbInterface, txStreamer, txPublisher, nil, nil, nil, nil, nil, nil, broadcastServer, broadcastClient}, nil + return &Node{backend, arbInterface, txStreamer, txPublisher, nil, nil, nil, nil, nil, nil, nil, broadcastServer, broadcastClient}, nil } if deployInfo == nil { @@ -365,8 +398,21 @@ func CreateNode(stack *node.Node, chainDb ethdb.Database, config *NodeConfig, l2 } } + var staker *validator.Staker + if config.L1Validator { + // TODO: remember validator wallet in JSON instead of querying it from L1 every time + wallet, err := validator.NewValidatorWallet(nil, deployInfo.ValidatorWalletCreator, deployInfo.Rollup, l1client, validatorTxOpts, int64(deployInfo.DeployedAt), func(common.Address) {}) + if err != nil { + return nil, err + } + staker, err = validator.NewStaker(l1client, wallet, bind.CallOpts{}, config.L1ValidatorConfig, l2BlockChain, inboxReader, inboxTracker, txStreamer, blockValidator, deployInfo.ValidatorUtils) + if err != nil { + return nil, err + } + } + if !config.BatchPoster { - return &Node{backend, arbInterface, txStreamer, txPublisher, deployInfo, inboxReader, inboxTracker, nil, nil, blockValidator, broadcastServer, broadcastClient}, nil + return &Node{backend, arbInterface, txStreamer, txPublisher, deployInfo, inboxReader, inboxTracker, nil, nil, blockValidator, staker, broadcastServer, broadcastClient}, nil } if sequencerTxOpt == nil { @@ -380,7 +426,7 @@ func CreateNode(stack *node.Node, chainDb ethdb.Database, config *NodeConfig, l2 if err != nil { return nil, err } - return &Node{backend, arbInterface, txStreamer, txPublisher, deployInfo, inboxReader, inboxTracker, delayedSequencer, batchPoster, blockValidator, broadcastServer, broadcastClient}, nil + return &Node{backend, arbInterface, txStreamer, txPublisher, deployInfo, inboxReader, inboxTracker, delayedSequencer, batchPoster, blockValidator, staker, broadcastServer, broadcastClient}, nil } func (n *Node) Start(ctx context.Context) error { @@ -421,6 +467,13 @@ func (n *Node) Start(ctx context.Context) error { return err } } + if n.Staker != nil { + err = n.Staker.Initialize(ctx) + if err != nil { + return err + } + n.Staker.Start(ctx) + } if n.BroadcastServer != nil { err = n.BroadcastServer.Start(ctx) if err != nil { diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index 7a0ecaa574..cf2c218e1b 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -31,6 +31,7 @@ func main() { l1passphrase := flag.String("l1passphrase", "passphrase", "l1 private key file passphrase") outfile := flag.String("l1deployment", "deploy.json", "deployment output json file") l1ChainIdUint := flag.Uint64("l1chainid", 1337, "L1 chain ID") + authorizevalidators := flag.Uint64("authorizevalidators", 0, "Number of validators to preemptively authorize") flag.Parse() l1ChainId := new(big.Int).SetUint64(*l1ChainIdUint) @@ -46,7 +47,7 @@ func main() { panic(err) } - deployPtr, err := arbnode.DeployOnL1(ctx, l1client, l1TransactionOpts, l1TransactionOpts.From, common.HexToHash(*wasmmoduleroot), time.Minute*5) + deployPtr, err := arbnode.DeployOnL1(ctx, l1client, l1TransactionOpts, l1TransactionOpts.From, *authorizevalidators, common.HexToHash(*wasmmoduleroot), time.Minute*5) if err != nil { flag.Usage() panic(err) diff --git a/cmd/node/node.go b/cmd/node/node.go index 3c713f0393..e7c121ea05 100644 --- a/cmd/node/node.go +++ b/cmd/node/node.go @@ -71,6 +71,10 @@ func main() { feedInputUrl := flag.String("feed.input.url", "", "URL of sequence feed source") feedInputTimeout := flag.Duration("feed.input.timeout", 20*time.Second, "duration to wait before timing out conection to server") + l1validator := flag.Bool("l1validator", false, "enable L1 validator and staker functionality") + validatorstrategy := flag.String("validatorstrategy", "watchtower", "L1 validator strategy, either watchtower, defensive, stakeLatest, or makeNodes (requires l1role=validator)") + l1validatorwithoutblockvalidator := flag.Bool("UNSAFEl1validatorwithoutblockvalidator", false, "DANGEROUS! allows running an L1 validator without a block validator") + flag.Parse() glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) @@ -96,6 +100,19 @@ func main() { panic("l1role not recognized") } + if *l1validator { + if !nodeConf.L1Reader { + flag.Usage() + panic("l1validator requires l1role other than \"none\"") + } + nodeConf.L1Validator = true + nodeConf.L1ValidatorConfig.Strategy = *validatorstrategy + if !nodeConf.BlockValidator && !*l1validatorwithoutblockvalidator { + flag.Usage() + panic("L1 validator requires block validator to safely function") + } + } + ctx := context.Background() var l1client *ethclient.Client @@ -110,7 +127,7 @@ func main() { flag.Usage() panic(err) } - if nodeConf.BatchPoster { + if nodeConf.BatchPoster || nodeConf.L1Validator { l1TransactionOpts, err = util.GetTransactOptsFromKeystore(*l1keystore, *seqAccount, *l1passphrase, l1ChainId) if err != nil { panic(err) @@ -127,7 +144,11 @@ func main() { // TODO actually figure out the wasmModuleRoot panic("deploy as validator not yet supported") } - deployPtr, err := arbnode.DeployOnL1(ctx, l1client, l1TransactionOpts, l1Addr, wasmModuleRoot, time.Minute*5) + var validators uint64 + if nodeConf.L1Validator { + validators++ + } + deployPtr, err := arbnode.DeployOnL1(ctx, l1client, l1TransactionOpts, l1Addr, validators, wasmModuleRoot, time.Minute*5) if err != nil { flag.Usage() panic(err) @@ -275,7 +296,7 @@ func main() { } } - node, err := arbnode.CreateNode(stack, chainDb, &nodeConf, l2BlockChain, l1client, &deployInfo, l1TransactionOpts) + node, err := arbnode.CreateNode(stack, chainDb, &nodeConf, l2BlockChain, l1client, &deployInfo, l1TransactionOpts, l1TransactionOpts) if err != nil { panic(err) } diff --git a/docker-compose.yaml b/docker-compose.yaml index 6769d2ccd6..4b8c0d4c57 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -9,8 +9,9 @@ services: volumes: - "l1data:/root/.ethereum" - "l1keystore:/keystore" - command: --keystore /keystore --http --http.addr 0.0.0.0 --http.vhosts * --ws --ws.addr 0.0.0.0 --dev --password /root/.ethereum/passphrase + command: --keystore /keystore --http --http.addr 0.0.0.0 --http.vhosts * --http.api personal,eth,net,web3 --ws --ws.addr 0.0.0.0 --ws.api personal,eth,net,web3 --dev --dev.period 1 --password /root/.ethereum/passphrase sequencer: + pid: host # allow debugging build: . ports: - "7545:7545" @@ -19,7 +20,7 @@ services: - "seqdata:/data" - "l1keystore:/l1keystore" - "deploydata:/deploydata" - command: -datadir /data -dev -l1role sequencer -l1conn ws://geth:8546 -l1keystore /l1keystore -l1deployment /deploydata/deployment.json -httphost 0.0.0.0 -wshost 0.0.0.0 + command: -datadir /data -dev -l1role sequencer -l1conn ws://geth:8546 -l1keystore /l1keystore -l1deployment /deploydata/deployment.json -httphost 0.0.0.0 -wshost 0.0.0.0 -UNSAFEl1validatorwithoutblockvalidator -l1validator -validatorstrategy MakeNodes depends_on: - geth volumes: diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 0976019a14..2b914a0013 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -151,7 +151,7 @@ func DeployOnTestL1(t *testing.T, ctx context.Context, l1info info, l1client cli l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(9223372036854775807), nil)}) l1TransactionOpts := l1info.GetDefaultTransactOpts("RollupOwner") - addresses, err := arbnode.DeployOnL1(ctx, l1client, &l1TransactionOpts, l1info.GetAddress("Sequencer"), common.Hash{}, 5*time.Second) + addresses, err := arbnode.DeployOnL1(ctx, l1client, &l1TransactionOpts, l1info.GetAddress("Sequencer"), 0, common.Hash{}, 5*time.Second) Require(t, err) l1info.SetContract("Bridge", addresses.Bridge) l1info.SetContract("SequencerInbox", addresses.SequencerInbox) @@ -204,7 +204,7 @@ func CreateTestNodeOnL1WithConfig(t *testing.T, ctx context.Context, isSequencer if !isSequencer { nodeConfig.BatchPoster = false } - node, err := arbnode.CreateNode(l2stack, l2chainDb, nodeConfig, l2blockchain, l1client, addresses, sequencerTxOptsPtr) + node, err := arbnode.CreateNode(l2stack, l2chainDb, nodeConfig, l2blockchain, l1client, addresses, sequencerTxOptsPtr, nil) Require(t, err) Require(t, node.Start(ctx)) @@ -221,7 +221,7 @@ func CreateTestL2(t *testing.T, ctx context.Context) (*BlockchainTestInfo, *arbn func CreateTestL2WithConfig(t *testing.T, ctx context.Context, l2Info *BlockchainTestInfo, nodeConfig *arbnode.NodeConfig, takeOwnership bool) (*BlockchainTestInfo, *arbnode.Node, *ethclient.Client) { l2info, stack, chainDb, blockchain := createL2BlockChain(t, l2Info) - node, err := arbnode.CreateNode(stack, chainDb, nodeConfig, blockchain, nil, nil, nil) + node, err := arbnode.CreateNode(stack, chainDb, nodeConfig, blockchain, nil, nil, nil, nil) Require(t, err) // Give the node an init message @@ -287,7 +287,7 @@ func Create2ndNodeWithConfig(t *testing.T, ctx context.Context, first *arbnode.N l2blockchain, err := arbnode.WriteOrTestBlockChain(l2chainDb, nil, initReader, 0, params.ArbitrumTestChainConfig()) Require(t, err) - node, err := arbnode.CreateNode(l2stack, l2chainDb, nodeConfig, l2blockchain, l1client, first.DeployInfo, nil) + node, err := arbnode.CreateNode(l2stack, l2chainDb, nodeConfig, l2blockchain, l1client, first.DeployInfo, nil, nil) Require(t, err) err = node.Start(ctx) diff --git a/system_tests/full_challenge_test.go b/system_tests/full_challenge_test.go index 05ac6c734b..42ad0ad558 100644 --- a/system_tests/full_challenge_test.go +++ b/system_tests/full_challenge_test.go @@ -245,14 +245,14 @@ func runChallengeTest(t *testing.T, asserterIsCorrect bool) { asserterL2Info, asserterL2Stack, asserterL2ChainDb, asserterL2Blockchain := createL2BlockChain(t, nil) rollupAddresses.SequencerInbox = asserterSeqInboxAddr - asserterL2, err := arbnode.CreateNode(asserterL2Stack, asserterL2ChainDb, &conf, asserterL2Blockchain, l1Backend, rollupAddresses, nil) + asserterL2, err := arbnode.CreateNode(asserterL2Stack, asserterL2ChainDb, &conf, asserterL2Blockchain, l1Backend, rollupAddresses, nil, nil) Require(t, err) err = asserterL2.Start(ctx) Require(t, err) challengerL2Info, challengerL2Stack, challengerL2ChainDb, challengerL2Blockchain := createL2BlockChain(t, nil) rollupAddresses.SequencerInbox = challengerSeqInboxAddr - challengerL2, err := arbnode.CreateNode(challengerL2Stack, challengerL2ChainDb, &conf, challengerL2Blockchain, l1Backend, rollupAddresses, nil) + challengerL2, err := arbnode.CreateNode(challengerL2Stack, challengerL2ChainDb, &conf, challengerL2Blockchain, l1Backend, rollupAddresses, nil, nil) Require(t, err) err = challengerL2.Start(ctx) Require(t, err) diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 7d94635f70..61ddaa9ea0 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -14,7 +14,6 @@ import ( "fmt" "math/big" "testing" - "time" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -78,16 +77,6 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) deployAuth := l1info.GetDefaultTransactOpts("RollupOwner") - valWalletFactory, tx, _, err := rollupgen.DeployValidatorWalletCreator(&deployAuth, l1client) - Require(t, err) - _, err = arbutil.EnsureTxSucceededWithTimeout(ctx, l1client, tx, time.Second*5) - Require(t, err) - - valUtils, tx, _, err := rollupgen.DeployValidatorUtils(&deployAuth, l1client) - Require(t, err) - _, err = arbutil.EnsureTxSucceededWithTimeout(ctx, l1client, tx, time.Second*5) - Require(t, err) - balance := big.NewInt(params.Ether) balance.Mul(balance, big.NewInt(100)) l1info.GenerateAccount("ValidatorA") @@ -98,20 +87,20 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) TransferBalance(t, "Faucet", "ValidatorB", balance, l1info, l1client, ctx) l1authB := l1info.GetDefaultTransactOpts("ValidatorB") - valWalletAddrA, err := validator.CreateValidatorWallet(ctx, valWalletFactory, 0, &l1authA, l1client) + valWalletAddrA, err := validator.CreateValidatorWallet(ctx, l2nodeA.DeployInfo.ValidatorWalletCreator, 0, &l1authA, l1client) Require(t, err) - valWalletAddrCheck, err := validator.CreateValidatorWallet(ctx, valWalletFactory, 0, &l1authA, l1client) + valWalletAddrCheck, err := validator.CreateValidatorWallet(ctx, l2nodeA.DeployInfo.ValidatorWalletCreator, 0, &l1authA, l1client) Require(t, err) if valWalletAddrA == valWalletAddrCheck { Require(t, err, "didn't cache validator wallet address", valWalletAddrA.String(), "vs", valWalletAddrCheck.String()) } - valWalletAddrB, err := validator.CreateValidatorWallet(ctx, valWalletFactory, 0, &l1authB, l1client) + valWalletAddrB, err := validator.CreateValidatorWallet(ctx, l2nodeA.DeployInfo.ValidatorWalletCreator, 0, &l1authB, l1client) Require(t, err) rollup, err := rollupgen.NewRollupAdminLogic(l2nodeA.DeployInfo.Rollup, l1client) Require(t, err) - tx, err = rollup.SetValidator(&deployAuth, []common.Address{valWalletAddrA, valWalletAddrB}, []bool{true, true}) + tx, err := rollup.SetValidator(&deployAuth, []common.Address{valWalletAddrA, valWalletAddrB}, []bool{true, true}) Require(t, err) _, err = arbutil.EnsureTxSucceeded(ctx, l1client, tx) Require(t, err) @@ -121,12 +110,11 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) _, err = arbutil.EnsureTxSucceeded(ctx, l1client, tx) Require(t, err) - valConfig := validator.ValidatorConfig{ - UtilsAddress: valUtils.Hex(), + valConfig := validator.L1ValidatorConfig{ TargetNumMachines: 4, } - valWalletA, err := validator.NewValidatorWallet(nil, valWalletFactory, l2nodeA.DeployInfo.Rollup, l1client, &l1authA, 0, func(common.Address) {}) + valWalletA, err := validator.NewValidatorWallet(nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l1client, &l1authA, 0, func(common.Address) {}) Require(t, err) if honestStakerInactive { valConfig.Strategy = "Defensive" @@ -134,37 +122,39 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) valConfig.Strategy = "MakeNodes" } stakerA, err := validator.NewStaker( - ctx, l1client, valWalletA, bind.CallOpts{}, - &l1authA, valConfig, l2nodeA.ArbInterface.BlockChain(), l2nodeA.InboxReader, l2nodeA.InboxTracker, l2nodeA.TxStreamer, l2nodeA.BlockValidator, + l2nodeA.DeployInfo.ValidatorUtils, ) Require(t, err) + err = stakerA.Initialize(ctx) + Require(t, err) - valWalletB, err := validator.NewValidatorWallet(nil, valWalletFactory, l2nodeB.DeployInfo.Rollup, l1client, &l1authB, 0, func(common.Address) {}) + valWalletB, err := validator.NewValidatorWallet(nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeB.DeployInfo.Rollup, l1client, &l1authB, 0, func(common.Address) {}) Require(t, err) valConfig.Strategy = "MakeNodes" stakerB, err := validator.NewStaker( - ctx, l1client, valWalletB, bind.CallOpts{}, - &l1authB, valConfig, l2nodeB.ArbInterface.BlockChain(), l2nodeB.InboxReader, l2nodeB.InboxTracker, l2nodeB.TxStreamer, l2nodeB.BlockValidator, + l2nodeA.DeployInfo.ValidatorUtils, ) Require(t, err) + err = stakerB.Initialize(ctx) + Require(t, err) l2info.GenerateAccount("BackgroundUser") tx = l2info.PrepareTx("Faucet", "BackgroundUser", l2info.TransferGas, balance, nil) diff --git a/test-node.bash b/test-node.bash index e40cbb0dda..9979c0daa8 100755 --- a/test-node.bash +++ b/test-node.bash @@ -1,5 +1,7 @@ #!/bin/bash +set -e + mydir=`dirname $0` cd "$mydir" @@ -32,6 +34,7 @@ if [[ $# -eq 1 ]]; then read -p "are you sure? [y/n]" -n 1 response if [[ $response == "y" ]] || [[ $response == "Y" ]]; then force_init=true + echo else exit 0 fi @@ -63,7 +66,7 @@ if $force_init; then docker-compose run geth account new --password /root/.ethereum/passphrase --keystore /keystore echo == Deploying L2 - docker-compose run --entrypoint target/bin/deploy sequencer -l1conn ws://geth:8546 -l1keystore /l1keystore -l1deployment /deploydata/deployment.json + docker-compose run --entrypoint target/bin/deploy sequencer -l1conn ws://geth:8546 -l1keystore /l1keystore -l1deployment /deploydata/deployment.json -authorizevalidators 10 fi echo == Launching Sequencer diff --git a/validator/l1_validator.go b/validator/l1_validator.go index aa9cd4d954..c45edcbb21 100644 --- a/validator/l1_validator.go +++ b/validator/l1_validator.go @@ -38,7 +38,7 @@ const ( CONFLICT_TYPE_INCOMPLETE ) -type Validator struct { +type L1Validator struct { rollup *RollupWatcher rollupAddress common.Address challengeManagerAddress common.Address @@ -55,8 +55,7 @@ type Validator struct { blockValidator *BlockValidator } -func NewValidator( - ctx context.Context, +func NewL1Validator( client arbutil.L1Interface, wallet *ValidatorWallet, validatorUtilsAddress common.Address, @@ -65,18 +64,12 @@ func NewValidator( inboxTracker InboxTrackerInterface, txStreamer TransactionStreamerInterface, blockValidator *BlockValidator, -) (*Validator, error) { +) (*L1Validator, error) { builder, err := NewValidatorTxBuilder(wallet) if err != nil { return nil, err } - rollup, err := NewRollupWatcher(ctx, wallet.RollupAddress(), builder, callOpts) - if err != nil { - return nil, err - } - localCallOpts := callOpts - localCallOpts.Context = ctx - challengeManagerAddress, err := rollup.ChallengeManager(&localCallOpts) + rollup, err := NewRollupWatcher(wallet.RollupAddress(), builder, callOpts) if err != nil { return nil, err } @@ -91,32 +84,40 @@ func NewValidator( if err != nil { return nil, err } - return &Validator{ - rollup: rollup, - rollupAddress: wallet.RollupAddress(), - challengeManagerAddress: challengeManagerAddress, - validatorUtils: validatorUtils, - client: client, - builder: builder, - wallet: wallet, - callOpts: callOpts, - genesisBlockNumber: genesisBlockNumber, - l2Blockchain: l2Blockchain, - inboxTracker: inboxTracker, - txStreamer: txStreamer, - blockValidator: blockValidator, + return &L1Validator{ + rollup: rollup, + rollupAddress: wallet.RollupAddress(), + validatorUtils: validatorUtils, + client: client, + builder: builder, + wallet: wallet, + callOpts: callOpts, + genesisBlockNumber: genesisBlockNumber, + l2Blockchain: l2Blockchain, + inboxTracker: inboxTracker, + txStreamer: txStreamer, + blockValidator: blockValidator, }, nil } -func (v *Validator) getCallOpts(ctx context.Context) *bind.CallOpts { +func (v *L1Validator) getCallOpts(ctx context.Context) *bind.CallOpts { opts := v.callOpts opts.Context = ctx return &opts } +func (v *L1Validator) Initialize(ctx context.Context) error { + err := v.rollup.Initialize(ctx) + if err != nil { + return err + } + v.challengeManagerAddress, err = v.rollup.ChallengeManager(v.getCallOpts(ctx)) + return err +} + // removeOldStakers removes the stakes of all validators staked on the latest confirmed node (aka "refundable" or "old" stakers), // except its own if dontRemoveSelf is true -func (v *Validator) removeOldStakers(ctx context.Context, dontRemoveSelf bool) (*types.Transaction, error) { +func (v *L1Validator) removeOldStakers(ctx context.Context, dontRemoveSelf bool) (*types.Transaction, error) { stakersToEliminate, err := v.validatorUtils.RefundableStakers(v.getCallOpts(ctx), v.rollupAddress) if err != nil { return nil, err @@ -139,7 +140,7 @@ func (v *Validator) removeOldStakers(ctx context.Context, dontRemoveSelf bool) ( return v.wallet.ReturnOldDeposits(ctx, stakersToEliminate) } -func (v *Validator) resolveTimedOutChallenges(ctx context.Context) (*types.Transaction, error) { +func (v *L1Validator) resolveTimedOutChallenges(ctx context.Context) (*types.Transaction, error) { challengesToEliminate, _, err := v.validatorUtils.TimedOutChallenges(v.getCallOpts(ctx), v.rollupAddress, 0, 10) if err != nil { return nil, err @@ -151,7 +152,7 @@ func (v *Validator) resolveTimedOutChallenges(ctx context.Context) (*types.Trans return v.wallet.TimeoutChallenges(ctx, v.challengeManagerAddress, challengesToEliminate) } -func (v *Validator) resolveNextNode(ctx context.Context, info *StakerInfo) (bool, error) { +func (v *L1Validator) resolveNextNode(ctx context.Context, info *StakerInfo) (bool, error) { callOpts := v.getCallOpts(ctx) confirmType, err := v.validatorUtils.CheckDecidableNextNode(callOpts, v.rollupAddress) if err != nil { @@ -184,7 +185,7 @@ func (v *Validator) resolveNextNode(ctx context.Context, info *StakerInfo) (bool } } -func (v *Validator) isRequiredStakeElevated(ctx context.Context) (bool, error) { +func (v *L1Validator) isRequiredStakeElevated(ctx context.Context) (bool, error) { callOpts := v.getCallOpts(ctx) requiredStake, err := v.rollup.CurrentRequiredStake(callOpts) if err != nil { @@ -220,7 +221,7 @@ type OurStakerInfo struct { // Returns (block number, global state inbox position is invalid, error). // If global state is invalid, block number is set to the last of the batch. -func (v *Validator) blockNumberFromGlobalState(gs GoGlobalState) (int64, bool, error) { +func (v *L1Validator) blockNumberFromGlobalState(gs GoGlobalState) (int64, bool, error) { var batchHeight arbutil.MessageIndex if gs.Batch > 0 { var err error @@ -244,7 +245,7 @@ func (v *Validator) blockNumberFromGlobalState(gs GoGlobalState) (int64, bool, e return arbutil.MessageCountToBlockNumber(batchHeight+arbutil.MessageIndex(gs.PosInBatch), v.genesisBlockNumber), false, nil } -func (v *Validator) generateNodeAction(ctx context.Context, stakerInfo *OurStakerInfo, strategy StakerStrategy) (nodeAction, bool, error) { +func (v *L1Validator) generateNodeAction(ctx context.Context, stakerInfo *OurStakerInfo, strategy StakerStrategy) (nodeAction, bool, error) { startState, prevInboxMaxCount, startStateProposed, err := lookupNodeStartState(ctx, v.rollup, stakerInfo.LatestStakedNode, stakerInfo.LatestStakedNodeHash) if err != nil { return nil, false, err @@ -283,7 +284,7 @@ func (v *Validator) generateNodeAction(ctx context.Context, stakerInfo *OurStake if v.blockValidator != nil { blocksValidated = v.blockValidator.BlocksValidated() } else { - blocksValidated = v.l2Blockchain.CurrentHeader().Number.Uint64() + blocksValidated = v.l2Blockchain.CurrentHeader().Number.Uint64() + 1 if localBatchCount > 0 { messageCount, err := v.inboxTracker.GetBatchMessageCount(localBatchCount - 1) @@ -293,7 +294,7 @@ func (v *Validator) generateNodeAction(ctx context.Context, stakerInfo *OurStake // Must be non-negative as a batch must contain at least one message lastBatchBlock := uint64(arbutil.MessageCountToBlockNumber(messageCount, v.genesisBlockNumber)) if blocksValidated > lastBatchBlock { - blocksValidated = lastBatchBlock + blocksValidated = lastBatchBlock + 1 } } else { blocksValidated = 0 @@ -424,7 +425,7 @@ func (v *Validator) generateNodeAction(ctx context.Context, stakerInfo *OurStake return correctNode, wrongNodesExist, nil } -func (v *Validator) createNewNodeAction( +func (v *L1Validator) createNewNodeAction( ctx context.Context, stakerInfo *OurStakerInfo, blocksValidated uint64, diff --git a/validator/rollup_watcher.go b/validator/rollup_watcher.go index 8ee23c5662..1ef9e35895 100644 --- a/validator/rollup_watcher.go +++ b/validator/rollup_watcher.go @@ -49,22 +49,14 @@ type RollupWatcher struct { baseCallOpts bind.CallOpts } -func NewRollupWatcher(ctx context.Context, address common.Address, client arbutil.L1Interface, callOpts bind.CallOpts) (*RollupWatcher, error) { +func NewRollupWatcher(address common.Address, client arbutil.L1Interface, callOpts bind.CallOpts) (*RollupWatcher, error) { con, err := rollupgen.NewRollupUserLogic(address, client) if err != nil { return nil, errors.WithStack(err) } - opts := callOpts - opts.Context = ctx - firstNode, err := con.GetNode(&opts, 0) - if err != nil { - return nil, err - } - return &RollupWatcher{ address: address, - fromBlock: firstNode.CreatedAtBlock, client: client, baseCallOpts: callOpts, RollupUserLogic: con, @@ -77,6 +69,15 @@ func (r *RollupWatcher) getCallOpts(ctx context.Context) *bind.CallOpts { return &opts } +func (r *RollupWatcher) Initialize(ctx context.Context) error { + firstNode, err := r.GetNode(r.getCallOpts(ctx), 0) + if err != nil { + return err + } + r.fromBlock = firstNode.CreatedAtBlock + return nil +} + func (r *RollupWatcher) LookupCreation(ctx context.Context) (*rollupgen.RollupUserLogicRollupInitialized, error) { var toBlock *big.Int if r.fromBlock > 0 { diff --git a/validator/staker.go b/validator/staker.go index f5ade4e480..ec819d1174 100644 --- a/validator/staker.go +++ b/validator/staker.go @@ -15,7 +15,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/arbstate/arbutil" "github.com/pkg/errors" @@ -41,16 +40,24 @@ type L1PostingStrategy struct { HighGasDelayBlocks int64 } -type ValidatorConfig struct { - Strategy string - UtilsAddress string - StakerDelay time.Duration - WalletFactoryAddress string - L1PostingStrategy L1PostingStrategy - DontChallenge bool - WithdrawDestination string - TargetNumMachines int - ConfirmationBlocks int64 +type L1ValidatorConfig struct { + Strategy string + StakerDelay time.Duration + L1PostingStrategy L1PostingStrategy + DontChallenge bool + WithdrawDestination string + TargetNumMachines int + ConfirmationBlocks int64 +} + +var DefaultL1ValidatorConfig = L1ValidatorConfig{ + Strategy: "Watchtower", + StakerDelay: time.Minute, + L1PostingStrategy: L1PostingStrategy{}, + DontChallenge: false, + WithdrawDestination: "", + TargetNumMachines: 4, + ConfirmationBlocks: 12, } type nodeAndHash struct { @@ -59,12 +66,11 @@ type nodeAndHash struct { } type Staker struct { - *Validator + *L1Validator activeChallenge *ChallengeManager strategy StakerStrategy baseCallOpts bind.CallOpts - auth *bind.TransactOpts - config ValidatorConfig + config L1ValidatorConfig highGasBlocksBuffer *big.Int lastActCalledBlock *big.Int inactiveLastCheckedNode *nodeAndHash @@ -88,27 +94,22 @@ func stakerStrategyFromString(s string) (StakerStrategy, error) { } func NewStaker( - ctx context.Context, - client *ethclient.Client, + client arbutil.L1Interface, wallet *ValidatorWallet, callOpts bind.CallOpts, - auth *bind.TransactOpts, - config ValidatorConfig, + config L1ValidatorConfig, l2Blockchain *core.BlockChain, inboxReader InboxReaderInterface, inboxTracker InboxTrackerInterface, txStreamer TransactionStreamerInterface, blockValidator *BlockValidator, + validatorUtilsAddress common.Address, ) (*Staker, error) { - if !common.IsHexAddress(config.UtilsAddress) { - return nil, fmt.Errorf("invalid validator utils address \"%v\"", config.UtilsAddress) - } - validatorUtilsAddress := common.HexToAddress(config.UtilsAddress) strategy, err := stakerStrategyFromString(config.Strategy) if err != nil { return nil, err } - val, err := NewValidator(ctx, client, wallet, validatorUtilsAddress, callOpts, l2Blockchain, inboxTracker, txStreamer, blockValidator) + val, err := NewL1Validator(client, wallet, validatorUtilsAddress, callOpts, l2Blockchain, inboxTracker, txStreamer, blockValidator) if err != nil { return nil, err } @@ -117,10 +118,9 @@ func NewStaker( withdrawDestination = common.HexToAddress(config.WithdrawDestination) } return &Staker{ - Validator: val, + L1Validator: val, strategy: strategy, baseCallOpts: callOpts, - auth: auth, config: config, highGasBlocksBuffer: big.NewInt(config.L1PostingStrategy.HighGasDelayBlocks), lastActCalledBlock: nil, @@ -129,14 +129,10 @@ func NewStaker( }, nil } -func (s *Staker) Start(ctx context.Context) chan bool { - done := make(chan bool) +func (s *Staker) Start(ctx context.Context) { go func() { - defer func() { - done <- true - }() backoff := time.Second - for { + for ctx.Err() == nil { arbTx, err := s.Act(ctx) if err == nil && arbTx != nil { _, err = arbutil.EnsureTxSucceededWithTimeout(ctx, s.client, arbTx, txTimeout) @@ -161,12 +157,11 @@ func (s *Staker) Start(ctx context.Context) chan bool { } select { case <-time.After(s.config.StakerDelay): - default: + case <-ctx.Done(): return } } }() - return done } func (s *Staker) shouldAct(ctx context.Context) bool { From 50c317f7be41f599b7e369d8b0352facd6d61711 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 23 Feb 2022 23:40:59 -0600 Subject: [PATCH 18/32] Fix TestStakersCooperative --- validator/l1_validator.go | 42 ++++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/validator/l1_validator.go b/validator/l1_validator.go index c45edcbb21..d4f6ddf054 100644 --- a/validator/l1_validator.go +++ b/validator/l1_validator.go @@ -231,15 +231,18 @@ func (v *L1Validator) blockNumberFromGlobalState(gs GoGlobalState) (int64, bool, } } - nextBatchHeight, err := v.inboxTracker.GetBatchMessageCount(gs.Batch) - if err != nil { - return 0, false, err - } + // Validate the PosInBatch if it's non-zero + if gs.PosInBatch > 0 { + nextBatchHeight, err := v.inboxTracker.GetBatchMessageCount(gs.Batch) + if err != nil { + return 0, false, err + } - if gs.PosInBatch >= uint64(nextBatchHeight-batchHeight) { - // This PosInBatch would enter the next batch. Return the last block before the next batch. - // We can be sure that MessageCountToBlockNumber will return a non-negative number as nextBatchHeight must be nonzero. - return arbutil.MessageCountToBlockNumber(nextBatchHeight, v.genesisBlockNumber), true, nil + if gs.PosInBatch >= uint64(nextBatchHeight-batchHeight) { + // This PosInBatch would enter the next batch. Return the last block before the next batch. + // We can be sure that MessageCountToBlockNumber will return a non-negative number as nextBatchHeight must be nonzero. + return arbutil.MessageCountToBlockNumber(nextBatchHeight, v.genesisBlockNumber), true, nil + } } return arbutil.MessageCountToBlockNumber(batchHeight+arbutil.MessageIndex(gs.PosInBatch), v.genesisBlockNumber), false, nil @@ -280,11 +283,11 @@ func (v *L1Validator) generateNodeAction(ctx context.Context, stakerInfo *OurSta } } - var blocksValidated uint64 + var lastBlockValidated uint64 if v.blockValidator != nil { - blocksValidated = v.blockValidator.BlocksValidated() + lastBlockValidated = v.blockValidator.BlocksValidated() } else { - blocksValidated = v.l2Blockchain.CurrentHeader().Number.Uint64() + 1 + lastBlockValidated = v.l2Blockchain.CurrentHeader().Number.Uint64() if localBatchCount > 0 { messageCount, err := v.inboxTracker.GetBatchMessageCount(localBatchCount - 1) @@ -293,11 +296,11 @@ func (v *L1Validator) generateNodeAction(ctx context.Context, stakerInfo *OurSta } // Must be non-negative as a batch must contain at least one message lastBatchBlock := uint64(arbutil.MessageCountToBlockNumber(messageCount, v.genesisBlockNumber)) - if blocksValidated > lastBatchBlock { - blocksValidated = lastBatchBlock + 1 + if lastBlockValidated > lastBatchBlock { + lastBlockValidated = lastBatchBlock } } else { - blocksValidated = 0 + lastBlockValidated = 0 } } @@ -351,8 +354,8 @@ func (v *L1Validator) generateNodeAction(ctx context.Context, stakerInfo *OurSta if err != nil { return nil, false, err } - if int64(blocksValidated) < lastBlockNum { - return nil, false, fmt.Errorf("waiting for validator to catch up to assertion blocks: %v/%v", blocksValidated, lastBlockNum) + if int64(lastBlockValidated) < lastBlockNum { + return nil, false, fmt.Errorf("waiting for validator to catch up to assertion blocks: %v/%v", lastBlockValidated, lastBlockNum) } var expectedBlockHash common.Hash var expectedSendRoot common.Hash @@ -418,7 +421,7 @@ func (v *L1Validator) generateNodeAction(ctx context.Context, stakerInfo *OurSta if len(successorNodes) > 0 { lastNodeHashIfExists = &successorNodes[len(successorNodes)-1].NodeHash } - action, err := v.createNewNodeAction(ctx, stakerInfo, blocksValidated, localBatchCount, prevInboxMaxCount, startBlock, startState, lastNodeHashIfExists) + action, err := v.createNewNodeAction(ctx, stakerInfo, lastBlockValidated, localBatchCount, prevInboxMaxCount, startBlock, startState, lastNodeHashIfExists) return action, wrongNodesExist, err } @@ -428,7 +431,7 @@ func (v *L1Validator) generateNodeAction(ctx context.Context, stakerInfo *OurSta func (v *L1Validator) createNewNodeAction( ctx context.Context, stakerInfo *OurStakerInfo, - blocksValidated uint64, + lastBlockValidated uint64, localBatchCount uint64, prevInboxMaxCount *big.Int, startBlock *types.Block, @@ -444,11 +447,10 @@ func (v *L1Validator) createNewNodeAction( return nil, nil } - if blocksValidated == 0 || localBatchCount == 0 { + if localBatchCount == 0 { // we haven't validated anything return nil, nil } - lastBlockValidated := blocksValidated - 1 if startBlock != nil && lastBlockValidated <= startBlock.NumberU64() { // we haven't validated any new blocks return nil, nil From 3292ab3a45d9f25fa90cf234750af7fd47837107 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Wed, 23 Feb 2022 23:42:47 -0600 Subject: [PATCH 19/32] gas overview --- docs/arbos/Gas.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/docs/arbos/Gas.md b/docs/arbos/Gas.md index bcb6e6d71f..f86cd1f8a9 100644 --- a/docs/arbos/Gas.md +++ b/docs/arbos/Gas.md @@ -1,11 +1,14 @@ # Gas and Fees -A transaction's fees include the sum - -Gas is a concept that serves two purposes: and buying network resources. +There are two parties a user pays when submitting a tx: +- the poster, if reimbursable, for L1 resources such as the L1 calldata needed to post the tx +- the network fee account for L2 resources, which include the computation, storage, and other burdens L2 nodes must bare to service the tx +Though subject to change when batch-compression pricing is fully implemented, [the L1 component](ArbOS.md#l1pricingstate) of a tx's fee consists of the approximate cost of posting the tx's calldata to the L1 inbox. This best-effort estimate is computed using ArbOS's running L1 gas-price estimate and the poster's self-reported compressibility-discount ratio. These [charges are dropped][drop_l1_link] if the poster is not the user's preferred aggregator, which the user may set using [`ArbAggregator`](Precompiles.md#ArbAggregator)'s [`SetPreferredAggregator`](Precompiles.md#ArbAggregator) precompile method. +[The L2 component](ArbOS.md#l2pricingstate) consists of the traditional fees geth would pay to miners in a vanilla L1 chain, such as the computation and storage charges applying the state transition function entails. ArbOS charges additional fees for executing its L2-specific [precompiles](Precompiles.md), whose fees are dynamically priced according to the specific resources used while executing the call. +[drop_l1_link]: https://github.com/OffchainLabs/nitro/blob/2ba6d1aa45abcc46c28f3d4f560691ce5a396af8/arbos/l1pricing/l1pricing.go#L232 ## Tips in L2 While tips are not advised for those using the sequencer, which prioritizes transactions on a first-come first-served basis, 3rd-party aggregators may choose to order txes based on tips. A user specifies a tip by setting a gas price in excess of the basefee and will [pay that difference][pay_difference_link] on the amount of gas the tx uses. @@ -18,7 +21,7 @@ A poster receives the tip only when the user has set them as their [preferred ag ## Gas Estimating Retryables When a transaction schedules another, the subsequent tx's execution [will be included][estimation_inclusion_link] when estimating gas via the node's RPC. A tx's gas estimate, then, can only be found if all the txes succeed at a given gas limit. This is especially important when working with retryables and scheduling redeem attempts. -Because a call to [`redeem`](#ArbRetryableTx) donates all of the call's gas, doing multiple requires limiting the amount of gas provided to each subcall. Otherwise the first will take all of the gas and force the second to necessarily fail irrespective of the estimation's gas limit. +Because a call to [`redeem`](Precompiles.md#ArbRetryableTx) donates all of the call's gas, doing multiple requires limiting the amount of gas provided to each subcall. Otherwise the first will take all of the gas and force the second to necessarily fail irrespective of the estimation's gas limit. Gas estimation for Retryable submissions is possible via [`NodeInterface.sol`][node_interface_link] and similarly requires the auto-redeem attempt succeed. From e7e90d10a7e4d85f4380c893ea61c34a0b27b89e Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Wed, 23 Feb 2022 23:43:51 -0600 Subject: [PATCH 20/32] update geth --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index edf6a19157..f31341b3df 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit edf6a19157606070b6a6660c8decc513e2408cb7 +Subproject commit f31341b3dfa987719b012bc976a6f4fe3b8a1221 From 637880eaecb5afc9c978ff9929d0c9934ca470c8 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Thu, 24 Feb 2022 08:14:00 +0200 Subject: [PATCH 21/32] dont track block validate thread --- validator/block_validator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validator/block_validator.go b/validator/block_validator.go index c4c654e605..46e60618d5 100644 --- a/validator/block_validator.go +++ b/validator/block_validator.go @@ -523,7 +523,7 @@ func (v *BlockValidator) sendValidations(ctx context.Context) { atomic.AddInt32(&v.atomicValidationsRunning, 1) validationEntry.SeqMsgNr = startPos.BatchNumber // validation can take long time. Don't wait for it when shutting down - v.LaunchThread(func(ctx context.Context) { v.validate(ctx, validationEntry, startPos, endPos) }) + v.LaunchUntrackedThread(func() { v.validate(v.GetContext(), validationEntry, startPos, endPos) }) v.posNextSend += 1 v.globalPosNextSend = endPos } From ec3f58c7b99573e07979aa2830189e8e0e364c3a Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Thu, 24 Feb 2022 09:52:59 +0200 Subject: [PATCH 22/32] broadcastclient: improve context usage in tests --- broadcastclient/broadcastclient_test.go | 31 +++++++++++++++++++++---- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/broadcastclient/broadcastclient_test.go b/broadcastclient/broadcastclient_test.go index 1960135b99..18028b56f8 100644 --- a/broadcastclient/broadcastclient_test.go +++ b/broadcastclient/broadcastclient_test.go @@ -17,7 +17,8 @@ import ( ) func TestReceiveMessages(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() settings := wsbroadcastserver.BroadcasterConfig{ Addr: "0.0.0.0", @@ -42,7 +43,6 @@ func TestReceiveMessages(t *testing.T) { var wg sync.WaitGroup for i := 0; i < clientCount; i++ { - wg.Add(1) startMakeBroadcastClient(ctx, t, i, messageCount, &wg) } @@ -82,18 +82,25 @@ func startMakeBroadcastClient(ctx context.Context, t *testing.T, index int, expe broadcastClient.Start(ctx) messageCount := 0 + wg.Add(1) + go func() { defer wg.Done() defer broadcastClient.Close() for { + gotMsg := false select { case <-ts.messageReceiver: messageCount++ + gotMsg = true if messageCount == expectedCount { return } case <-time.After(60 * time.Second): + case <-ctx.Done(): + } + if !gotMsg { t.Errorf("Client %d expected %d meesages, only got %d messages\n", index, expectedCount, messageCount) return } @@ -103,7 +110,8 @@ func startMakeBroadcastClient(ctx context.Context, t *testing.T, index int, expe } func TestServerClientDisconnect(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() settings := wsbroadcastserver.BroadcasterConfig{ Addr: "0.0.0.0", @@ -154,7 +162,8 @@ func TestServerClientDisconnect(t *testing.T) { } func TestBroadcastClientReconnectsOnServerDisconnect(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() settings := wsbroadcastserver.BroadcasterConfig{ Addr: "0.0.0.0", @@ -188,7 +197,8 @@ func TestBroadcastClientReconnectsOnServerDisconnect(t *testing.T) { } func TestBroadcasterSendsCachedMessagesOnClientConnect(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() settings := wsbroadcastserver.BroadcasterConfig{ Addr: "0.0.0.0", @@ -264,22 +274,33 @@ func connectAndGetCachedMessages(ctx context.Context, t *testing.T, clientIndex defer wg.Done() defer broadcastClient.Close() + gotMsg := false // Wait for client to receive first item select { case receivedMsg := <-ts.messageReceiver: t.Logf("client %d received first message: %v\n", clientIndex, receivedMsg) + gotMsg = true case <-time.After(10 * time.Second): + case <-ctx.Done(): + } + if !gotMsg { t.Errorf("client %d did not receive first batch item\n", clientIndex) return } + gotMsg = false // Wait for client to receive second item select { case receivedMsg := <-ts.messageReceiver: t.Logf("client %d received second message: %v\n", clientIndex, receivedMsg) + gotMsg = true case <-time.After(10 * time.Second): + case <-ctx.Done(): + } + if !gotMsg { t.Errorf("client %d did not receive second batch item\n", clientIndex) return } + }() } From d9a4a365f777d822f773d991f7e8ec63af0494eb Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Thu, 24 Feb 2022 10:26:16 +0200 Subject: [PATCH 23/32] small renames --- system_tests/seqfeed_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index e3777c6f3d..ea82ed6ee4 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -91,7 +91,7 @@ func TestLyingSequencer(t *testing.T) { nodeConfigC.BatchPoster = false nodeConfigC.Broadcaster = true nodeConfigC.BroadcasterConfig = *newBroadcasterConfigTest(port) - l2clientC, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, &l2infoA.ArbInitData, &nodeConfigC) + l2clientC, nodeC := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, &l2infoA.ArbInitData, &nodeConfigC) // The client node, connects to lying sequencer's feed nodeConfigB := arbnode.NodeConfigL1Test @@ -99,7 +99,7 @@ func TestLyingSequencer(t *testing.T) { nodeConfigB.BatchPoster = false nodeConfigB.BroadcastClient = true nodeConfigB.BroadcastClientConfig = *newBroadcastClientConfigTest(port) - l2clientB, nodeC := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, &l2infoA.ArbInitData, &nodeConfigB) + l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, &l2infoA.ArbInitData, &nodeConfigB) l2infoA.GenerateAccount("FraudUser") l2infoA.GenerateAccount("RealUser") From bc3ddfec8a4903ff0ce4a48bdcf5087bf85c3f07 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Thu, 24 Feb 2022 11:59:11 +0200 Subject: [PATCH 24/32] broadcaster: support dynamic port allocation --- broadcaster/broadcaster.go | 5 +++++ system_tests/seqfeed_test.go | 18 ++++++++++-------- wsbroadcastserver/wsbroadcastserver.go | 4 ++++ 3 files changed, 19 insertions(+), 8 deletions(-) diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index 905cb34510..e05eb3def1 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -6,6 +6,7 @@ package broadcaster import ( "context" + "net" "sync/atomic" "time" @@ -183,6 +184,10 @@ func (b *Broadcaster) ClientCount() int32 { return b.server.ClientCount() } +func (b *Broadcaster) ListenerAddr() net.Addr { + return b.server.ListenerAddr() +} + func (b *Broadcaster) GetCachedMessageCount() int { return b.catchupBuffer.GetMessageCount() } diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index ea82ed6ee4..292f283075 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -8,6 +8,7 @@ import ( "context" "fmt" "math/big" + "net" "strconv" "testing" "time" @@ -38,18 +39,19 @@ func newBroadcastClientConfigTest(port int) *broadcastclient.BroadcastClientConf } func TestSequencerFeed(t *testing.T) { - port := 9642 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + seqNodeConfig := arbnode.NodeConfigL2Test seqNodeConfig.Broadcaster = true - seqNodeConfig.BroadcasterConfig = *newBroadcasterConfigTest(port) + seqNodeConfig.BroadcasterConfig = *newBroadcasterConfigTest(0) + l2info1, nodeA, client1 := CreateTestL2WithConfig(t, ctx, nil, &seqNodeConfig, true) clientNodeConfig := arbnode.NodeConfigL2Test clientNodeConfig.BroadcastClient = true + port := nodeA.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port clientNodeConfig.BroadcastClientConfig = *newBroadcastClientConfigTest(port) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - l2info1, nodeA, client1 := CreateTestL2WithConfig(t, ctx, nil, &seqNodeConfig, true) _, nodeB, client2 := CreateTestL2WithConfig(t, ctx, nil, &clientNodeConfig, false) l2info1.GenerateAccount("User2") @@ -77,8 +79,6 @@ func TestLyingSequencer(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - port := 9643 - // The truthful sequencer nodeConfigA := arbnode.NodeConfigL1Test nodeConfigA.BatchPoster = true @@ -90,9 +90,11 @@ func TestLyingSequencer(t *testing.T) { nodeConfigC := arbnode.NodeConfigL1Test nodeConfigC.BatchPoster = false nodeConfigC.Broadcaster = true - nodeConfigC.BroadcasterConfig = *newBroadcasterConfigTest(port) + nodeConfigC.BroadcasterConfig = *newBroadcasterConfigTest(0) l2clientC, nodeC := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, &l2infoA.ArbInitData, &nodeConfigC) + port := nodeC.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port + // The client node, connects to lying sequencer's feed nodeConfigB := arbnode.NodeConfigL1Test nodeConfigB.Broadcaster = false diff --git a/wsbroadcastserver/wsbroadcastserver.go b/wsbroadcastserver/wsbroadcastserver.go index 561bd980e7..89baa3f99b 100644 --- a/wsbroadcastserver/wsbroadcastserver.go +++ b/wsbroadcastserver/wsbroadcastserver.go @@ -221,6 +221,10 @@ func (s *WSBroadcastServer) Start(ctx context.Context) error { return nil } +func (s *WSBroadcastServer) ListenerAddr() net.Addr { + return s.listener.Addr() +} + func (s *WSBroadcastServer) StopAndWait() { err := s.listener.Close() if err != nil { From 2726915b2591bb97d7aacb794b32ab045de19a75 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Thu, 24 Feb 2022 12:49:15 +0200 Subject: [PATCH 25/32] broadcast tests: use dynamic port allocation --- broadcastclient/broadcastclient_test.go | 31 +++++++++++++++---------- broadcaster/broadcaster_test.go | 2 +- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/broadcastclient/broadcastclient_test.go b/broadcastclient/broadcastclient_test.go index 18028b56f8..33364faa48 100644 --- a/broadcastclient/broadcastclient_test.go +++ b/broadcastclient/broadcastclient_test.go @@ -6,6 +6,8 @@ package broadcastclient import ( "context" + "fmt" + "net" "sync" "testing" "time" @@ -23,7 +25,7 @@ func TestReceiveMessages(t *testing.T) { settings := wsbroadcastserver.BroadcasterConfig{ Addr: "0.0.0.0", IOTimeout: 2 * time.Second, - Port: "9742", + Port: "0", Ping: 5 * time.Second, ClientTimeout: 20 * time.Second, Queue: 1, @@ -43,7 +45,7 @@ func TestReceiveMessages(t *testing.T) { var wg sync.WaitGroup for i := 0; i < clientCount; i++ { - startMakeBroadcastClient(ctx, t, i, messageCount, &wg) + startMakeBroadcastClient(ctx, t, b.ListenerAddr(), i, messageCount, &wg) } go func() { @@ -76,9 +78,14 @@ func (ts *dummyTransactionStreamer) AddMessages(pos arbutil.MessageIndex, force return nil } -func startMakeBroadcastClient(ctx context.Context, t *testing.T, index int, expectedCount int, wg *sync.WaitGroup) { +func newTestBroadcastClient(listenerAddress net.Addr, idleTimeout time.Duration, txStreamer TransactionStreamerInterface) *BroadcastClient { + port := listenerAddress.(*net.TCPAddr).Port + return NewBroadcastClient(fmt.Sprintf("ws://127.0.0.1:%d/", port), nil, idleTimeout, txStreamer) +} + +func startMakeBroadcastClient(ctx context.Context, t *testing.T, addr net.Addr, index int, expectedCount int, wg *sync.WaitGroup) { ts := NewDummyTransactionStreamer() - broadcastClient := NewBroadcastClient("ws://127.0.0.1:9742/", nil, 20*time.Second, ts) + broadcastClient := newTestBroadcastClient(addr, 20*time.Second, ts) broadcastClient.Start(ctx) messageCount := 0 @@ -116,7 +123,7 @@ func TestServerClientDisconnect(t *testing.T) { settings := wsbroadcastserver.BroadcasterConfig{ Addr: "0.0.0.0", IOTimeout: 2 * time.Second, - Port: "9743", + Port: "0", Ping: 1 * time.Second, ClientTimeout: 2 * time.Second, Queue: 1, @@ -132,7 +139,7 @@ func TestServerClientDisconnect(t *testing.T) { defer b.StopAndWait() ts := NewDummyTransactionStreamer() - broadcastClient := NewBroadcastClient("ws://127.0.0.1:9743/", nil, 20*time.Second, ts) + broadcastClient := newTestBroadcastClient(b.ListenerAddr(), 20*time.Second, ts) broadcastClient.Start(ctx) b.BroadcastSingle(arbstate.MessageWithMetadata{}, 0) @@ -168,7 +175,7 @@ func TestBroadcastClientReconnectsOnServerDisconnect(t *testing.T) { settings := wsbroadcastserver.BroadcasterConfig{ Addr: "0.0.0.0", IOTimeout: 2 * time.Second, - Port: "9743", + Port: "0", Ping: 50 * time.Second, ClientTimeout: 150 * time.Second, Queue: 1, @@ -183,7 +190,7 @@ func TestBroadcastClientReconnectsOnServerDisconnect(t *testing.T) { } defer b1.StopAndWait() - broadcastClient := NewBroadcastClient("ws://127.0.0.1:9743/", nil, 2*time.Second, nil) + broadcastClient := newTestBroadcastClient(b1.ListenerAddr(), 2*time.Second, nil) broadcastClient.Start(ctx) @@ -203,7 +210,7 @@ func TestBroadcasterSendsCachedMessagesOnClientConnect(t *testing.T) { settings := wsbroadcastserver.BroadcasterConfig{ Addr: "0.0.0.0", IOTimeout: 2 * time.Second, - Port: "9842", + Port: "0", Ping: 5 * time.Second, ClientTimeout: 15 * time.Second, Queue: 1, @@ -224,7 +231,7 @@ func TestBroadcasterSendsCachedMessagesOnClientConnect(t *testing.T) { var wg sync.WaitGroup for i := 0; i < 2; i++ { wg.Add(1) - connectAndGetCachedMessages(ctx, t, i, &wg) + connectAndGetCachedMessages(ctx, b.ListenerAddr(), t, i, &wg) } wg.Wait() @@ -265,9 +272,9 @@ func TestBroadcasterSendsCachedMessagesOnClientConnect(t *testing.T) { } } -func connectAndGetCachedMessages(ctx context.Context, t *testing.T, clientIndex int, wg *sync.WaitGroup) { +func connectAndGetCachedMessages(ctx context.Context, addr net.Addr, t *testing.T, clientIndex int, wg *sync.WaitGroup) { ts := NewDummyTransactionStreamer() - broadcastClient := NewBroadcastClient("ws://127.0.0.1:9842/", nil, 60*time.Second, ts) + broadcastClient := newTestBroadcastClient(addr, 60*time.Second, ts) broadcastClient.Start(ctx) go func() { diff --git a/broadcaster/broadcaster_test.go b/broadcaster/broadcaster_test.go index 31cf3998bd..bfbcf03046 100644 --- a/broadcaster/broadcaster_test.go +++ b/broadcaster/broadcaster_test.go @@ -57,7 +57,7 @@ func TestBroadcasterMessagesRemovedOnConfirmation(t *testing.T) { broadcasterSettings := wsbroadcastserver.BroadcasterConfig{ Addr: "0.0.0.0", IOTimeout: 2 * time.Second, - Port: "9642", + Port: "0", Ping: 5 * time.Second, ClientTimeout: 30 * time.Second, Queue: 1, From 9ba0ade91caba96198da496df5fb125fc351290c Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Thu, 24 Feb 2022 16:10:45 +0200 Subject: [PATCH 26/32] fix sequencer subscribe --- arbnode/sequencer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbnode/sequencer.go b/arbnode/sequencer.go index 5bbd314988..477760270e 100644 --- a/arbnode/sequencer.go +++ b/arbnode/sequencer.go @@ -203,9 +203,9 @@ func (s *Sequencer) Start(ctx context.Context) error { } headerChan, cancel := arbutil.HeaderSubscribeWithRetry(ctx, s.l1Client) - defer cancel() go (func() { + defer cancel() for { select { case header, ok := <-headerChan: From f516e7121ac3b604058fd8cc59c059870629331b Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 24 Feb 2022 11:44:12 -0600 Subject: [PATCH 27/32] Rename staker delay to staker interval and add node CLI option for it --- cmd/node/node.go | 2 ++ docker-compose.yaml | 2 +- validator/staker.go | 6 +++--- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/cmd/node/node.go b/cmd/node/node.go index 0d865b5fa2..34a653e5ba 100644 --- a/cmd/node/node.go +++ b/cmd/node/node.go @@ -74,6 +74,7 @@ func main() { l1validator := flag.Bool("l1validator", false, "enable L1 validator and staker functionality") validatorstrategy := flag.String("validatorstrategy", "watchtower", "L1 validator strategy, either watchtower, defensive, stakeLatest, or makeNodes (requires l1role=validator)") l1validatorwithoutblockvalidator := flag.Bool("UNSAFEl1validatorwithoutblockvalidator", false, "DANGEROUS! allows running an L1 validator without a block validator") + stakerinterval := flag.Duration("stakerinterval", time.Minute, "how often the L1 validator should check the status of the L1 rollup and maybe take action with its stake") flag.Parse() @@ -107,6 +108,7 @@ func main() { } nodeConf.L1Validator = true nodeConf.L1ValidatorConfig.Strategy = *validatorstrategy + nodeConf.L1ValidatorConfig.StakerInterval = *stakerinterval if !nodeConf.BlockValidator && !*l1validatorwithoutblockvalidator { flag.Usage() panic("L1 validator requires block validator to safely function") diff --git a/docker-compose.yaml b/docker-compose.yaml index 4b8c0d4c57..c415096815 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -20,7 +20,7 @@ services: - "seqdata:/data" - "l1keystore:/l1keystore" - "deploydata:/deploydata" - command: -datadir /data -dev -l1role sequencer -l1conn ws://geth:8546 -l1keystore /l1keystore -l1deployment /deploydata/deployment.json -httphost 0.0.0.0 -wshost 0.0.0.0 -UNSAFEl1validatorwithoutblockvalidator -l1validator -validatorstrategy MakeNodes + command: -datadir /data -dev -l1role sequencer -l1conn ws://geth:8546 -l1keystore /l1keystore -l1deployment /deploydata/deployment.json -httphost 0.0.0.0 -wshost 0.0.0.0 -UNSAFEl1validatorwithoutblockvalidator -l1validator -validatorstrategy MakeNodes -stakerinterval 10s depends_on: - geth volumes: diff --git a/validator/staker.go b/validator/staker.go index 919849d3e9..691b048454 100644 --- a/validator/staker.go +++ b/validator/staker.go @@ -43,7 +43,7 @@ type L1PostingStrategy struct { type L1ValidatorConfig struct { Strategy string - StakerDelay time.Duration + StakerInterval time.Duration L1PostingStrategy L1PostingStrategy DontChallenge bool WithdrawDestination string @@ -53,7 +53,7 @@ type L1ValidatorConfig struct { var DefaultL1ValidatorConfig = L1ValidatorConfig{ Strategy: "Watchtower", - StakerDelay: time.Minute, + StakerInterval: time.Minute, L1PostingStrategy: L1PostingStrategy{}, DontChallenge: false, WithdrawDestination: "", @@ -145,7 +145,7 @@ func (s *Staker) Start(ctxIn context.Context) { } if err == nil { backoff = time.Second - return s.config.StakerDelay + return s.config.StakerInterval } log.Warn("error acting as staker", "err", err) if backoff < 60*time.Second { From 70df5fb7b6a2527fe9bf9d1237c885c0a11e68b4 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Thu, 24 Feb 2022 12:03:39 -0600 Subject: [PATCH 28/32] remove TxGas in the failure case --- arbos/block_processor.go | 1 + 1 file changed, 1 insertion(+) diff --git a/arbos/block_processor.go b/arbos/block_processor.go index 7f296dfa21..24376ea23c 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -270,6 +270,7 @@ func ProduceBlockAdvanced( if err != nil { log.Debug("error applying transaction", "tx", tx, "err", err) + gasLeft -= params.TxGas continue } From 0ec7d64f0e211a01fcf85a07160b13c00b51c970 Mon Sep 17 00:00:00 2001 From: Harry Kalodner Date: Thu, 24 Feb 2022 14:53:29 -0500 Subject: [PATCH 29/32] Fix merge issues --- validator/l1_validator.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/validator/l1_validator.go b/validator/l1_validator.go index d4f6ddf054..332be7e406 100644 --- a/validator/l1_validator.go +++ b/validator/l1_validator.go @@ -15,7 +15,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/arbstate/arbos" "github.com/offchainlabs/arbstate/arbutil" "github.com/offchainlabs/arbstate/solgen/go/rollupgen" "github.com/pkg/errors" @@ -364,7 +363,7 @@ func (v *L1Validator) generateNodeAction(ctx context.Context, stakerInfo *OurSta if lastBlock == nil { return nil, false, fmt.Errorf("block %v not in database despite being validated", lastBlockNum) } - lastBlockExtra, err := arbos.DeserializeHeaderExtraInformation(lastBlock.Header()) + lastBlockExtra, err := types.DeserializeHeaderExtraInformation(lastBlock.Header()) if err != nil { return nil, false, err } @@ -503,7 +502,7 @@ func (v *L1Validator) createNewNodeAction( if assertingBlock == nil { return nil, fmt.Errorf("missing validated block %v", lastBlockValidated) } - assertingBlockExtra, err := arbos.DeserializeHeaderExtraInformation(assertingBlock.Header()) + assertingBlockExtra, err := types.DeserializeHeaderExtraInformation(assertingBlock.Header()) if err != nil { return nil, err } From 41b399128759ecd95589f26bda3c96cd3a247653 Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Thu, 24 Feb 2022 13:59:07 -0600 Subject: [PATCH 30/32] overflow check --- arbos/block_processor.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arbos/block_processor.go b/arbos/block_processor.go index 24376ea23c..5da110f434 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -269,8 +269,13 @@ func ProduceBlockAdvanced( hooks.TxErrors = append(hooks.TxErrors, err) if err != nil { + // we'll still deduct a TxGas's worth from the block even if the tx was invalid log.Debug("error applying transaction", "tx", tx, "err", err) - gasLeft -= params.TxGas + if gasLeft > params.TxGas { + gasLeft -= params.TxGas + } else { + gasLeft = 0 + } continue } From 9fc774628f9c67e006da43b360181e7d9dcbe53b Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 24 Feb 2022 12:03:45 -0600 Subject: [PATCH 31/32] Compare inbox tracker sync to L1 instead of local batch poster count --- arbnode/batch_poster.go | 98 ++++++++++++++++++----------------------- 1 file changed, 43 insertions(+), 55 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 2f59cd627f..07a75eb518 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -24,35 +24,34 @@ import ( type BatchPoster struct { util.StopWaiter - client arbutil.L1Interface - inbox *InboxTracker - streamer *TransactionStreamer - config *BatchPosterConfig - inboxContract *bridgegen.SequencerInbox - sequencesPosted uint64 - gasRefunder common.Address - transactOpts *bind.TransactOpts + client arbutil.L1Interface + inbox *InboxTracker + streamer *TransactionStreamer + config *BatchPosterConfig + inboxContract *bridgegen.SequencerInbox + gasRefunder common.Address + transactOpts *bind.TransactOpts } type BatchPosterConfig struct { - MaxBatchSize int - BatchPollDelay time.Duration - SubmissionSyncDelay time.Duration - CompressionLevel int + MaxBatchSize int + BatchPollDelay time.Duration + PostingErrorDelay time.Duration + CompressionLevel int } var DefaultBatchPosterConfig = BatchPosterConfig{ - MaxBatchSize: 500, - BatchPollDelay: time.Second / 10, - SubmissionSyncDelay: time.Second, - CompressionLevel: brotli.DefaultCompression, + MaxBatchSize: 500, + BatchPollDelay: time.Second / 10, + PostingErrorDelay: time.Second * 5, + CompressionLevel: brotli.DefaultCompression, } var TestBatchPosterConfig = BatchPosterConfig{ - MaxBatchSize: 10000, - BatchPollDelay: time.Millisecond * 10, - SubmissionSyncDelay: time.Millisecond * 10, - CompressionLevel: 2, + MaxBatchSize: 10000, + BatchPollDelay: time.Millisecond * 10, + PostingErrorDelay: time.Millisecond * 10, + CompressionLevel: 2, } func NewBatchPoster(client arbutil.L1Interface, inbox *InboxTracker, streamer *TransactionStreamer, config *BatchPosterConfig, contractAddress common.Address, refunder common.Address, transactOpts *bind.TransactOpts) (*BatchPoster, error) { @@ -61,14 +60,13 @@ func NewBatchPoster(client arbutil.L1Interface, inbox *InboxTracker, streamer *T return nil, err } return &BatchPoster{ - client: client, - inbox: inbox, - streamer: streamer, - config: config, - inboxContract: inboxContract, - sequencesPosted: 1, - transactOpts: transactOpts, - gasRefunder: refunder, + client: client, + inbox: inbox, + streamer: streamer, + config: config, + inboxContract: inboxContract, + transactOpts: transactOpts, + gasRefunder: refunder, }, nil } @@ -272,34 +270,22 @@ func (s *batchSegments) CloseAndGetBytes() ([]byte, error) { return fullMsg, nil } -func (b *BatchPoster) lastSubmissionIsSynced() bool { - batchcount, err := b.inbox.GetBatchCount() +func (b *BatchPoster) postSequencerBatch(ctx context.Context) (*types.Transaction, error) { + batchSeqNum, err := b.inbox.GetBatchCount() if err != nil { - log.Warn("BatchPoster: batchcount failed", "err", err) - return false - } - if batchcount < b.sequencesPosted { - b.sequencesPosted = batchcount - return false + return nil, err } - if batchcount > b.sequencesPosted { - log.Warn("detected unexpected sequences posted", "actual", batchcount, "expected", b.sequencesPosted) - b.sequencesPosted = batchcount - return true + inboxContractCount, err := b.inboxContract.BatchCount(&bind.CallOpts{Context: ctx, Pending: true}) + if err != nil { + return nil, err } - return true -} - -// TODO make sure we detect end of block! -func (b *BatchPoster) postSequencerBatch() (*types.Transaction, error) { - for !b.lastSubmissionIsSynced() { - log.Warn("BatchPoster: not in sync", "sequencedPosted", b.sequencesPosted) - <-time.After(b.config.SubmissionSyncDelay) + if inboxContractCount.Cmp(new(big.Int).SetUint64(batchSeqNum)) != 0 { + return nil, fmt.Errorf("inbox tracker not synced: contract has %v batches but inbox tracker has %v", inboxContractCount, batchSeqNum) } var prevBatchMeta BatchMetadata - if b.sequencesPosted > 0 { + if batchSeqNum > 0 { var err error - prevBatchMeta, err = b.inbox.GetBatchMetadata(b.sequencesPosted - 1) + prevBatchMeta, err = b.inbox.GetBatchMetadata(batchSeqNum - 1) if err != nil { return nil, err } @@ -331,13 +317,14 @@ func (b *BatchPoster) postSequencerBatch() (*types.Transaction, error) { return nil, err } if sequencerMsg == nil { - log.Debug("BatchPoster: batch nil", "sequence nr.", b.sequencesPosted, "from", prevBatchMeta.MessageCount, "prev delayed", prevBatchMeta.DelayedMessageCount) + log.Debug("BatchPoster: batch nil", "sequence nr.", batchSeqNum, "from", prevBatchMeta.MessageCount, "prev delayed", prevBatchMeta.DelayedMessageCount) return nil, nil } - tx, err := b.inboxContract.AddSequencerL2BatchFromOrigin(b.transactOpts, new(big.Int).SetUint64(b.sequencesPosted), sequencerMsg, new(big.Int).SetUint64(segments.delayedMsg), b.gasRefunder) + txOpts := *b.transactOpts + txOpts.Context = ctx + tx, err := b.inboxContract.AddSequencerL2BatchFromOrigin(&txOpts, new(big.Int).SetUint64(batchSeqNum), sequencerMsg, new(big.Int).SetUint64(segments.delayedMsg), b.gasRefunder) if err == nil { - b.sequencesPosted++ - log.Info("BatchPoster: batch sent", "sequence nr.", b.sequencesPosted, "from", prevBatchMeta.MessageCount, "to", msgToPost, "prev delayed", prevBatchMeta.DelayedMessageCount, "current delayed", segments.delayedMsg, "total segments", len(segments.rawSegments)) + log.Info("BatchPoster: batch sent", "sequence nr.", batchSeqNum, "from", prevBatchMeta.MessageCount, "to", msgToPost, "prev delayed", prevBatchMeta.DelayedMessageCount, "current delayed", segments.delayedMsg, "total segments", len(segments.rawSegments)) } return tx, err } @@ -345,9 +332,10 @@ func (b *BatchPoster) postSequencerBatch() (*types.Transaction, error) { func (b *BatchPoster) Start(ctxIn context.Context) { b.StopWaiter.Start(ctxIn) b.CallIteratively(func(ctx context.Context) time.Duration { - tx, err := b.postSequencerBatch() + tx, err := b.postSequencerBatch(ctx) if err != nil { log.Error("error posting batch", "err", err) + return b.config.PostingErrorDelay } if tx != nil { _, err = arbutil.EnsureTxSucceededWithTimeout(ctx, b.client, tx, time.Minute) From fc39d5ea4968dbdaae9549eac0044db7818851fe Mon Sep 17 00:00:00 2001 From: Rachel Franks Date: Thu, 24 Feb 2022 17:43:19 -0600 Subject: [PATCH 32/32] address review comments --- docs/arbos/ArbOS.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/arbos/ArbOS.md b/docs/arbos/ArbOS.md index ae24229b23..715cb6df48 100644 --- a/docs/arbos/ArbOS.md +++ b/docs/arbos/ArbOS.md @@ -122,7 +122,9 @@ While much of this state is accessible through the [`ArbGasInfo`](Precompiles.md ArbOS's larger gas pool [determines][maintain_limit_link] the per-block gas limit, setting a dynamic [upper limit][per_block_limit_link] on the amount of compute gas an L2 block may have. This limit is always enforced, though for the [first transaction][first_transaction_link] it's done in the [GasChargingHook](Geth.md#GasChargingHook) to avoid sharp decreases in the L1 gas price from over-inflating the compute component purchased to above the gas limit. This improves UX by allowing the first tx to succeed rather than requiring a resubmission. Because the first tx lowers the amount of space left in the block, subsequent transactions do not employ this strategy and may fail from such compute-component inflation. This is acceptable because such txes are only present in cases where the system is under heavy load and the result is that the user's tx is dropped without charges since the state transition fails early. Those trusting the sequencer can rely on the tx being automatically resubmitted in such a scenario. -ArbOS's per-block gas limit is distinct from geth's block limit, which ArbOS [sets sufficiently high][geth_pool_set_link] so as to never run out. This is safe since geth's block limit exists to constrain the amount of work done per block, which ArbOS already does via its own per-block gas limit. Though it'll never run out, a block's txes use the [same geth gas pool][same_geth_pool_link] to maintain the invariant that the pool decreases monotonically after each tx. Block headers [use the geth block limit][use_geth_pool_link] for internal consistency and to ensure gas estimation works. +The reason we need a per-block gas limit is that Arbitrator WAVM execution is much slower than native tx execution. This means that there can only be so much gas -- which roughly translates to wall-clock time -- in an L2 block. It also provides an opportunity for ArbOS to limit the size of blocks should demand continue to surge even as the price rises. + +ArbOS's per-block gas limit is distinct from geth's block limit, which ArbOS [sets sufficiently high][geth_pool_set_link] so as to never run out. This is safe since geth's block limit exists to constrain the amount of work done per block, which ArbOS already does via its own per-block gas limit. Though it'll never run out, a block's txes use the [same geth gas pool][same_geth_pool_link] to maintain the invariant that the pool decreases monotonically after each tx. Block headers [use the geth block limit][use_geth_pool_link] for internal consistency and to ensure gas estimation works. These are both distinct from the [`gasLeft`][per_block_limit_link] variable, which ephemerally exists outside of global state to both keep L2 blocks from exceeding ArbOS's per-block gas limit and to [deduct space][deduct_space_link] in situations where the state transition failed or [used negligable amounts][neglibale_amounts_link] of compute gas. ArbOS does not need to persist [`gasLeft`][per_block_limit_link] because it is its _pool_ that induces a revert and because txes use the geth block limit during EVM execution. [l2PricingState_link]: https://github.com/OffchainLabs/nitro/blob/fa36a0f138b8a7e684194f9840315d80c390f324/arbos/l2pricing/l2pricing.go#L14 [block_production_link]: https://github.com/OffchainLabs/nitro/blob/fa36a0f138b8a7e684194f9840315d80c390f324/arbos/block_processor.go#L77 @@ -134,3 +136,5 @@ ArbOS's per-block gas limit is distinct from geth's block limit, which ArbOS [se [geth_pool_set_link]: https://github.com/OffchainLabs/nitro/blob/2ba6d1aa45abcc46c28f3d4f560691ce5a396af8/arbos/block_processor.go#L166 [same_geth_pool_link]: https://github.com/OffchainLabs/nitro/blob/2ba6d1aa45abcc46c28f3d4f560691ce5a396af8/arbos/block_processor.go#L199 [use_geth_pool_link]: https://github.com/OffchainLabs/nitro/blob/2ba6d1aa45abcc46c28f3d4f560691ce5a396af8/arbos/block_processor.go#L67 +[deduct_space_link]: https://github.com/OffchainLabs/nitro/blob/faf55a1da8afcabb1f3c406b291e721bfde71a05/arbos/block_processor.go#L272 +[neglibale_amounts_link]: https://github.com/OffchainLabs/nitro/blob/faf55a1da8afcabb1f3c406b291e721bfde71a05/arbos/block_processor.go#L328