diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index 477021289c..dde0be541e 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -64,14 +64,14 @@ jobs: - name: Setup nodejs uses: actions/setup-node@v3 with: - node-version: '16' + node-version: '18' cache: 'yarn' cache-dependency-path: '**/yarn.lock' - name: Install rust stable uses: dtolnay/rust-toolchain@stable with: - toolchain: "1.75" + toolchain: "1.76" components: 'llvm-tools-preview, rustfmt, clippy' - name: Install rust nightly diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 035f873cbb..f394c795aa 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,7 +32,7 @@ jobs: strategy: fail-fast: false matrix: - test-mode: [defaults, race, challenge, stylus] + test-mode: [defaults, race, challenge, stylus, long] steps: - name: Checkout @@ -48,7 +48,7 @@ jobs: - name: Setup nodejs uses: actions/setup-node@v3 with: - node-version: '16' + node-version: '18' cache: 'yarn' cache-dependency-path: '**/yarn.lock' @@ -193,7 +193,13 @@ jobs: run: | skip_tests=`grep -vE '^\s*#|^\s*$' ci_skip_tests | sed 's/.*/^&$/g' | tr '\n' '|' | sed 's/|$//'` packages=`go list ./...` - stdbuf -oL gotestsum --format short-verbose --packages="$packages" --rerun-fails=1 --no-color=false -- ./... -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -tags=stylustest -run=TestProgramArbitrator -skip "$skip_tests" > >(stdbuf -oL tee full.log | grep -vE "INFO|seal") + stdbuf -oL gotestsum --format short-verbose --packages="$packages" --rerun-fails=1 --no-color=false -- ./... -timeout 60m -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -parallel=8 -tags=stylustest -run="TestProgramArbitrator" -skip "$skip_tests" > >(stdbuf -oL tee full.log | grep -vE "INFO|seal") + + - name: run long stylus tests + if: matrix.test-mode == 'long' + run: | + packages=`go list ./...` + stdbuf -oL gotestsum --format short-verbose --packages="$packages" --rerun-fails=1 --no-color=false -- ./... -timeout 60m -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -parallel=8 -tags=stylustest -run="TestProgramLong" > >(stdbuf -oL tee full.log | grep -vE "INFO|seal") - name: Archive detailed run log uses: actions/upload-artifact@v3 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 5bfaf35886..2163b564fc 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -66,7 +66,7 @@ jobs: - name: Setup nodejs uses: actions/setup-node@v3 with: - node-version: '16' + node-version: '18' cache: 'yarn' cache-dependency-path: '**/yarn.lock' diff --git a/.github/workflows/merge-checks.yml b/.github/workflows/merge-checks.yml new file mode 100644 index 0000000000..6f291bbb22 --- /dev/null +++ b/.github/workflows/merge-checks.yml @@ -0,0 +1,20 @@ +name: Merge Checks + +on: + pull_request: + branches: [ master ] + types: [synchronize, opened, reopened, labeled, unlabeled] + +jobs: + design-approved-check: + if: ${{ !contains(github.event.*.labels.*.name, 'design-approved') }} + name: Design Approved Check + runs-on: ubuntu-latest + steps: + - name: Check for design-approved label + run: | + echo "Pull request is missing the 'design-approved' label" + echo "This workflow fails so that the pull request cannot be merged" + exit 1 + + diff --git a/.golangci.yml b/.golangci.yml index 2828582486..fe4fca0113 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,13 +1,12 @@ # golangci-lint configuration run: - skip-dirs: - - go-ethereum - - fastcache - timeout: 10m issues: + exclude-dirs: + - go-ethereum + - fastcache exclude-rules: - path: _test\.go linters: diff --git a/Dockerfile b/Dockerfile index e8fc999335..4f28aa47f5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,7 +24,7 @@ RUN apt-get update && \ FROM scratch as brotli-library-export COPY --from=brotli-library-builder /workspace/install/ / -FROM node:16-bookworm-slim as contracts-builder +FROM node:18-bookworm-slim as contracts-builder RUN apt-get update && \ apt-get install -y git python3 make g++ curl RUN curl -L https://foundry.paradigm.xyz | bash && . ~/.bashrc && ~/.foundry/bin/foundryup @@ -41,7 +41,8 @@ RUN apt-get update && apt-get install -y curl build-essential=12.9 FROM wasm-base as wasm-libs-builder # clang / lld used by soft-float wasm -RUN apt-get install -y clang=1:14.0-55.7~deb12u1 lld=1:14.0-55.7~deb12u1 wabt +RUN apt-get update && \ + apt-get install -y clang=1:14.0-55.7~deb12u1 lld=1:14.0-55.7~deb12u1 wabt # pinned rust 1.75.0 RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain 1.75.0 --target x86_64-unknown-linux-gnu wasm32-unknown-unknown wasm32-wasi COPY ./Makefile ./ @@ -64,8 +65,8 @@ FROM scratch as wasm-libs-export COPY --from=wasm-libs-builder /workspace/ / FROM wasm-base as wasm-bin-builder - # pinned go version -RUN curl -L https://golang.org/dl/go1.21.7.linux-`dpkg --print-architecture`.tar.gz | tar -C /usr/local -xzf - +# pinned go version +RUN curl -L https://golang.org/dl/go1.21.10.linux-`dpkg --print-architecture`.tar.gz | tar -C /usr/local -xzf - COPY ./Makefile ./go.mod ./go.sum ./ COPY ./arbcompress ./arbcompress COPY ./espressocrypto ./espressocrypto @@ -93,7 +94,7 @@ COPY --from=contracts-builder workspace/contracts/node_modules/@offchainlabs/upg COPY --from=contracts-builder workspace/.make/ .make/ RUN PATH="$PATH:/usr/local/go/bin" NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-wasm-bin -FROM rust:1.75-slim-bullseye as prover-header-builder +FROM rust:1.75-slim-bookworm as prover-header-builder WORKDIR /workspace RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ @@ -120,16 +121,16 @@ RUN NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-prover-header FROM scratch as prover-header-export COPY --from=prover-header-builder /workspace/target/ / -FROM rust:1.75-slim-bullseye as prover-builder +FROM rust:1.75-slim-bookworm as prover-builder WORKDIR /workspace RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ - apt-get install -y make wget gpg software-properties-common zlib1g-dev libstdc++-10-dev wabt \ - wabt clang llvm-dev libclang-common-14-dev libpolly-14-dev + apt-get install -y make wget gpg software-properties-common zlib1g-dev libstdc++-12-dev wabt \ + clang llvm-dev libclang-common-14-dev libpolly-14-dev RUN wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - && \ - add-apt-repository 'deb http://apt.llvm.org/bullseye/ llvm-toolchain-bullseye-15 main' && \ + add-apt-repository 'deb http://apt.llvm.org/bookworm/ llvm-toolchain-bookworm-15 main' && \ apt-get update && \ - apt-get install -y llvm-15-dev libclang-common-15-dev libpolly-15-dev + apt-get install -y llvm-15-dev libclang-common-15-dev COPY --from=brotli-library-export / target/ COPY arbitrator/Cargo.* arbitrator/ COPY arbitrator/arbutil arbitrator/arbutil @@ -211,8 +212,9 @@ COPY ./scripts/download-machine.sh . #RUN ./download-machine.sh consensus-v11 0xf4389b835497a910d7ba3ebfb77aa93da985634f3c052de1290360635be40c4a #RUN ./download-machine.sh consensus-v11.1 0x68e4fe5023f792d4ef584796c84d710303a5e12ea02d6e37e2b5e9c4332507c4 #RUN ./download-machine.sh consensus-v20 0x8b104a2e80ac6165dc58b9048de12f301d70b02a0ab51396c22b4b4b802a16a4 +RUN ./download-machine.sh consensus-v30 0xb0de9cb89e4d944ae6023a3b62276e54804c242fd8c4c2d8e6cc4450f5fa8b1b && true -FROM golang:1.21-bullseye as node-builder +FROM golang:1.21.10-bookworm as node-builder WORKDIR /workspace ARG version="" ARG datetime="" @@ -261,7 +263,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ apt-get install -y \ ca-certificates \ - wabt && \ + wabt \ + sysstat && \ /usr/sbin/update-ca-certificates && \ useradd -s /bin/bash user && \ mkdir -p /home/user/l1keystore && \ @@ -276,11 +279,15 @@ USER user WORKDIR /home/user/ ENTRYPOINT [ "/usr/local/bin/nitro" ] +FROM offchainlabs/nitro-node:v2.3.4-rc.5-b4cc111 as nitro-legacy + FROM nitro-node-slim as nitro-node USER root COPY --from=prover-export /bin/jit /usr/local/bin/ COPY --from=node-builder /workspace/target/bin/daserver /usr/local/bin/ COPY --from=node-builder /workspace/target/bin/datool /usr/local/bin/ +COPY --from=nitro-legacy /home/user/target/machines /home/user/nitro-legacy/machines +RUN rm -rf /workspace/target/legacy-machines/latest RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ apt-get install -y \ @@ -290,10 +297,23 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/* && \ nitro --version +ENTRYPOINT [ "/usr/local/bin/nitro" , "--validation.wasm.allowed-wasm-module-roots", "/home/user/nitro-legacy/machines,/home/user/target/machines"] USER user -FROM nitro-node as nitro-node-dev +FROM nitro-node as nitro-node-validator +USER root +COPY --from=nitro-legacy /usr/local/bin/nitro-val /home/user/nitro-legacy/bin/nitro-val +COPY --from=nitro-legacy /usr/local/bin/jit /home/user/nitro-legacy/bin/jit +RUN export DEBIAN_FRONTEND=noninteractive && \ + apt-get update && \ + apt-get install -y xxd netcat-traditional && \ + rm -rf /var/lib/apt/lists/* /usr/share/doc/* /var/cache/ldconfig/aux-cache /usr/lib/python3.9/__pycache__/ /usr/lib/python3.9/*/__pycache__/ /var/log/* +COPY scripts/split-val-entry.sh /usr/local/bin +ENTRYPOINT [ "/usr/local/bin/split-val-entry.sh" ] +USER user + +FROM nitro-node-validator as nitro-node-dev USER root # Copy in latest WASM module root RUN rm -f /home/user/target/machines/latest @@ -317,15 +337,5 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ USER user -FROM nitro-node-dev as nitro-node-split -USER root - -RUN export DEBIAN_FRONTEND=noninteractive && \ - apt-get update && \ - apt-get install -y xxd netcat-traditional -COPY scripts/split-val-entry.sh /usr/local/bin -ENTRYPOINT [ "/usr/local/bin/split-val-entry.sh" ] -USER user - FROM nitro-node as nitro-node-default # Just to ensure nitro-node-dist is default diff --git a/Makefile b/Makefile index 5f4927c830..621ed155cb 100644 --- a/Makefile +++ b/Makefile @@ -162,6 +162,7 @@ test-go-deps: \ build-replay-env \ $(stylus_test_wasms) \ $(arbitrator_stylus_lib) \ + $(arbitrator_generated_header) \ $(patsubst %,$(arbitrator_cases)/%.wasm, global-state read-inboxmsg-10 global-state-wrapper const read-hotshot-10) build-prover-header: $(arbitrator_generated_header) @@ -293,7 +294,7 @@ $(arbitrator_jit): $(DEP_PREDICATE) $(jit_files) $(arbitrator_cases)/rust/$(wasm32_wasi)/%.wasm: $(arbitrator_cases)/rust/src/bin/%.rs $(arbitrator_cases)/rust/src/lib.rs cargo build --manifest-path $(arbitrator_cases)/rust/Cargo.toml --release --target wasm32-wasi --bin $(patsubst $(arbitrator_cases)/rust/$(wasm32_wasi)/%.wasm,%, $@) -$(arbitrator_cases)/go/testcase.wasm: $(arbitrator_cases)/go/*.go +$(arbitrator_cases)/go/testcase.wasm: $(arbitrator_cases)/go/*.go .make/solgen cd $(arbitrator_cases)/go && GOOS=wasip1 GOARCH=wasm go build -o testcase.wasm $(arbitrator_generated_header): $(DEP_PREDICATE) $(stylus_files) @@ -448,8 +449,12 @@ target/testdata/preimages.bin: contracts/test/prover/proofs/rust-%.json: $(arbitrator_cases)/rust/$(wasm32_wasi)/%.wasm $(prover_bin) $(arbitrator_wasm_libs) target/testdata/preimages.bin $(prover_bin) $< $(arbitrator_wasm_lib_flags) -o $@ -b --allow-hostapi --require-success --inbox-add-stub-headers --inbox $(arbitrator_cases)/rust/data/msg0.bin --inbox $(arbitrator_cases)/rust/data/msg1.bin --delayed-inbox $(arbitrator_cases)/rust/data/msg0.bin --delayed-inbox $(arbitrator_cases)/rust/data/msg1.bin --preimages target/testdata/preimages.bin -contracts/test/prover/proofs/go.json: $(arbitrator_cases)/go/testcase.wasm $(prover_bin) $(arbitrator_wasm_libs) target/testdata/preimages.bin $(arbitrator_tests_link_deps) - $(prover_bin) $< $(arbitrator_wasm_lib_flags) -o $@ -i 50000000 --require-success --preimages target/testdata/preimages.bin +contracts/test/prover/proofs/go.json: $(arbitrator_cases)/go/testcase.wasm $(prover_bin) $(arbitrator_wasm_libs) target/testdata/preimages.bin $(arbitrator_tests_link_deps) $(arbitrator_cases)/user.wasm + $(prover_bin) $< $(arbitrator_wasm_lib_flags) -o $@ -b --require-success --preimages target/testdata/preimages.bin --stylus-modules $(arbitrator_cases)/user.wasm + +# avoid testing user.wasm in onestepproofs. It can only run as stylus program. +contracts/test/prover/proofs/user.json: + echo "[]" > $@ # avoid testing read-inboxmsg-10 in onestepproofs. It's used for go challenge testing. contracts/test/prover/proofs/read-inboxmsg-10.json: diff --git a/arbitrator/prover/src/binary.rs b/arbitrator/prover/src/binary.rs index f6c3e9fe8f..aa5537476c 100644 --- a/arbitrator/prover/src/binary.rs +++ b/arbitrator/prover/src/binary.rs @@ -616,7 +616,10 @@ impl<'a> WasmBinary<'a> { cached_init = cached_init.saturating_add(data_len.saturating_mul(75244) / 100_000); cached_init = cached_init.saturating_add(footprint as u64 * 5); - let mut init = cached_init; + let mut init: u64 = 0; + if compile.version == 1 { + init = cached_init; // in version 1 cached cost is part of init cost + } init = init.saturating_add(funcs.saturating_mul(8252) / 1000); init = init.saturating_add(type_len.saturating_mul(1059) / 1000); init = init.saturating_add(wasm_len.saturating_mul(1286) / 10_000); diff --git a/arbitrator/prover/src/main.rs b/arbitrator/prover/src/main.rs index e74a341174..5aa2b142a0 100644 --- a/arbitrator/prover/src/main.rs +++ b/arbitrator/prover/src/main.rs @@ -269,7 +269,10 @@ fn main() -> Result<()> { if opts.proving_backoff { let mut extra_data = 0; - if matches!(next_opcode, Opcode::ReadInboxMessage | Opcode::ReadPreImage) { + if matches!( + next_opcode, + Opcode::ReadInboxMessage | Opcode::ReadPreImage | Opcode::SwitchThread + ) { extra_data = next_inst.argument_data; } let count_entry = proving_backoff diff --git a/arbitrator/prover/src/programs/config.rs b/arbitrator/prover/src/programs/config.rs index 0b5ce17475..1a37294b04 100644 --- a/arbitrator/prover/src/programs/config.rs +++ b/arbitrator/prover/src/programs/config.rs @@ -162,8 +162,7 @@ impl CompileConfig { match version { 0 => {} - 1 => { - // TODO: settle on reasonable values for the v1 release + 1 | 2 => { config.bounds.heap_bound = Pages(128); // 8 mb config.bounds.max_frame_size = 10 * 1024; config.bounds.max_frame_contention = 4096; diff --git a/arbitrator/prover/test-cases/dynamic.wat b/arbitrator/prover/test-cases/dynamic.wat index 97c55ba80b..8771bde87c 100644 --- a/arbitrator/prover/test-cases/dynamic.wat +++ b/arbitrator/prover/test-cases/dynamic.wat @@ -12,8 +12,7 @@ ;; WAVM Module hash (data (i32.const 0x000) - "\87\12\6b\19\8a\ce\0c\ba\00\6a\ab\9b\b7\45\bb\0a\ac\48\4d\6b\b8\b5\f9\03\a2\99\8f\64\00\9f\e2\04") ;; user - + "\a1\49\cf\81\13\ff\9c\95\f2\c8\c2\a1\42\35\75\36\7d\e8\6d\d4\22\d8\71\14\bb\9e\a4\7b\af\53\5d\d7") ;; user (func $start (local $user i32) (local $internals i32) ;; link in user.wat i32.const 0 diff --git a/arbitrator/prover/test-cases/go/main.go b/arbitrator/prover/test-cases/go/main.go index 0df8010449..1f81553af2 100644 --- a/arbitrator/prover/test-cases/go/main.go +++ b/arbitrator/prover/test-cases/go/main.go @@ -1,6 +1,9 @@ // Copyright 2021-2024, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE +//go:build wasm +// +build wasm + package main import ( @@ -19,6 +22,7 @@ import ( merkletree "github.com/wealdtech/go-merkletree" "github.com/offchainlabs/nitro/arbcompress" + "github.com/offchainlabs/nitro/arbos/programs" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/wavmio" ) @@ -69,11 +73,51 @@ const BYTES_PER_FIELD_ELEMENT = 32 var BLS_MODULUS, _ = new(big.Int).SetString("52435875175126190479447740508185965837690552500527637822603658699938581184513", 10) +var stylusModuleHash = common.HexToHash("a149cf8113ff9c95f2c8c2a1423575367de86dd422d87114bb9ea47baf535dd7") // user.wat + +func callStylusProgram(recurse int) { + evmData := programs.EvmData{} + progParams := programs.ProgParams{ + MaxDepth: 10000, + InkPrice: 1, + DebugMode: true, + } + reqHandler := func(req programs.RequestType, input []byte) ([]byte, []byte, uint64) { + fmt.Printf("got request type %d req %v\n", req, input) + if req == programs.GetBytes32 { + if recurse > 0 { + callStylusProgram(recurse - 1) + } + answer := common.Hash{} + return answer[:], nil, 1 + } + + panic("unsupported call") + } + calldata := common.Hash{}.Bytes() + _, _, err := programs.CallProgramLoop( + stylusModuleHash, + calldata, + 160000000, + &evmData, + &progParams, + reqHandler) + if err != nil { + panic(err) + } +} + func main() { fmt.Printf("starting executable with %v arg(s): %v\n", len(os.Args), os.Args) runtime.GC() time.Sleep(time.Second) + fmt.Printf("Stylus test\n") + + callStylusProgram(5) + + fmt.Printf("Stylus test done!\n") + // Data for the tree data := [][]byte{ []byte("Foo"), diff --git a/arbitrator/prover/test-cases/link.wat b/arbitrator/prover/test-cases/link.wat index e033bf0e98..ef15326481 100644 --- a/arbitrator/prover/test-cases/link.wat +++ b/arbitrator/prover/test-cases/link.wat @@ -30,7 +30,7 @@ (data (i32.const 0x140) "\47\f7\4f\9c\21\51\4f\52\24\ea\d3\37\5c\bf\a9\1b\1a\5f\ef\22\a5\2a\60\30\c5\52\18\90\6b\b1\51\e5") ;; iops (data (i32.const 0x160) - "\87\12\6b\19\8a\ce\0c\ba\00\6a\ab\9b\b7\45\bb\0a\ac\48\4d\6b\b8\b5\f9\03\a2\99\8f\64\00\9f\e2\04") ;; user + "\a1\49\cf\81\13\ff\9c\95\f2\c8\c2\a1\42\35\75\36\7d\e8\6d\d4\22\d8\71\14\bb\9e\a4\7b\af\53\5d\d7") ;; user (data (i32.const 0x180) "\ee\47\08\f6\47\b2\10\88\1f\89\86\e7\e3\79\6b\b2\77\43\f1\4e\ee\cf\45\4a\9b\7c\d7\c4\5b\63\b6\d7") ;; return diff --git a/arbitrator/prover/test-cases/user.wat b/arbitrator/prover/test-cases/user.wat index d159339f66..9ecb4dcc45 100644 --- a/arbitrator/prover/test-cases/user.wat +++ b/arbitrator/prover/test-cases/user.wat @@ -2,6 +2,14 @@ ;; For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE (module + (import "vm_hooks" "storage_load_bytes32" (func $storage_load_bytes32 (param i32 i32))) + + (func $storage_load (result i32) + i32.const 0 + i32.const 32 + call $storage_load_bytes32 + i32.const 0 + ) (func $safe (result i32) i32.const 5 ) @@ -35,6 +43,11 @@ (then (call $out_of_bounds) (return)) ) + (i32.eq (local.get $args_len) (i32.const 32)) + (if + (then (call $storage_load) (return)) + ) + unreachable ) (memory (export "memory") 1 1)) diff --git a/arbitrator/stylus/src/cache.rs b/arbitrator/stylus/src/cache.rs index 2b83c6152f..06739f2219 100644 --- a/arbitrator/stylus/src/cache.rs +++ b/arbitrator/stylus/src/cache.rs @@ -21,7 +21,7 @@ macro_rules! cache { } pub struct InitCache { - arbos: HashMap, + long_term: HashMap, lru: LruCache, } @@ -59,20 +59,31 @@ impl CacheItem { } impl InitCache { + // current implementation only has one tag that stores to the long_term + // future implementations might have more, but 0 is a reserved tag + // that will never modify long_term state + const ARBOS_TAG: u32 = 1; + fn new(size: usize) -> Self { Self { - arbos: HashMap::new(), + long_term: HashMap::new(), lru: LruCache::new(NonZeroUsize::new(size).unwrap()), } } + pub fn set_lru_size(size: u32) { + cache!() + .lru + .resize(NonZeroUsize::new(size.try_into().unwrap()).unwrap()) + } + /// Retrieves a cached value, updating items as necessary. pub fn get(module_hash: Bytes32, version: u16, debug: bool) -> Option<(Module, Store)> { let mut cache = cache!(); let key = CacheKey::new(module_hash, version, debug); // See if the item is in the long term cache - if let Some(item) = cache.arbos.get(&key) { + if let Some(item) = cache.long_term.get(&key) { return Some(item.data()); } @@ -84,18 +95,27 @@ impl InitCache { } /// Inserts an item into the long term cache, cloning from the LRU cache if able. + /// If long_term_tag is 0 will only insert to LRU pub fn insert( module_hash: Bytes32, module: &[u8], version: u16, + long_term_tag: u32, debug: bool, ) -> Result<(Module, Store)> { let key = CacheKey::new(module_hash, version, debug); // if in LRU, add to ArbOS let mut cache = cache!(); + if let Some(item) = cache.long_term.get(&key) { + return Ok(item.data()); + } if let Some(item) = cache.lru.peek(&key).cloned() { - cache.arbos.insert(key, item.clone()); + if long_term_tag == Self::ARBOS_TAG { + cache.long_term.insert(key, item.clone()); + } else { + cache.lru.promote(&key) + } return Ok(item.data()); } drop(cache); @@ -105,37 +125,34 @@ impl InitCache { let item = CacheItem::new(module, engine); let data = item.data(); - cache!().arbos.insert(key, item); + let mut cache = cache!(); + if long_term_tag != Self::ARBOS_TAG { + cache.lru.put(key, item); + } else { + cache.long_term.insert(key, item); + } Ok(data) } - /// Inserts an item into the short-lived LRU cache. - pub fn insert_lru( - module_hash: Bytes32, - module: &[u8], - version: u16, - debug: bool, - ) -> Result<(Module, Store)> { - let engine = CompileConfig::version(version, debug).engine(); - let module = unsafe { Module::deserialize_unchecked(&engine, module)? }; - - let key = CacheKey::new(module_hash, version, debug); - let item = CacheItem::new(module, engine); - cache!().lru.put(key, item.clone()); - Ok(item.data()) - } - /// Evicts an item in the long-term cache. - pub fn evict(module_hash: Bytes32, version: u16, debug: bool) { + pub fn evict(module_hash: Bytes32, version: u16, long_term_tag: u32, debug: bool) { + if long_term_tag != Self::ARBOS_TAG { + return; + } let key = CacheKey::new(module_hash, version, debug); - cache!().arbos.remove(&key); + let mut cache = cache!(); + if let Some(item) = cache.long_term.remove(&key) { + cache.lru.put(key, item); + } } - /// Modifies the cache for reorg, dropping the long-term cache. - pub fn reorg(_block: u64) { + pub fn clear_long_term(long_term_tag: u32) { + if long_term_tag != Self::ARBOS_TAG { + return; + } let mut cache = cache!(); let cache = &mut *cache; - for (key, item) in cache.arbos.drain() { + for (key, item) in cache.long_term.drain() { cache.lru.put(key, item); // not all will fit, just a heuristic } } diff --git a/arbitrator/stylus/src/lib.rs b/arbitrator/stylus/src/lib.rs index 7abfb98bf5..3c53359f8b 100644 --- a/arbitrator/stylus/src/lib.rs +++ b/arbitrator/stylus/src/lib.rs @@ -183,6 +183,7 @@ pub unsafe extern "C" fn stylus_call( debug_chain: bool, output: *mut RustBytes, gas: *mut u64, + long_term_tag: u32, ) -> UserOutcomeKind { let module = module.slice(); let calldata = calldata.slice().to_vec(); @@ -193,7 +194,14 @@ pub unsafe extern "C" fn stylus_call( // Safety: module came from compile_user_wasm and we've paid for memory expansion let instance = unsafe { - NativeInstance::deserialize_cached(module, config.version, evm_api, evm_data, debug_chain) + NativeInstance::deserialize_cached( + module, + config.version, + evm_api, + evm_data, + long_term_tag, + debug_chain, + ) }; let mut instance = match instance { Ok(instance) => instance, @@ -212,33 +220,47 @@ pub unsafe extern "C" fn stylus_call( status } +/// resize lru +#[no_mangle] +pub extern "C" fn stylus_cache_lru_resize(size: u32) { + InitCache::set_lru_size(size); +} + /// Caches an activated user program. /// /// # Safety /// /// `module` must represent a valid module produced from `stylus_activate`. +/// arbos_tag: a tag for arbos cache. 0 won't affect real caching +/// currently only if tag==1 caching will be affected #[no_mangle] pub unsafe extern "C" fn stylus_cache_module( module: GoSliceData, module_hash: Bytes32, version: u16, + arbos_tag: u32, debug: bool, ) { - if let Err(error) = InitCache::insert(module_hash, module.slice(), version, debug) { + if let Err(error) = InitCache::insert(module_hash, module.slice(), version, arbos_tag, debug) { panic!("tried to cache invalid asm!: {error}"); } } /// Evicts an activated user program from the init cache. #[no_mangle] -pub extern "C" fn stylus_evict_module(module_hash: Bytes32, version: u16, debug: bool) { - InitCache::evict(module_hash, version, debug); +pub extern "C" fn stylus_evict_module( + module_hash: Bytes32, + version: u16, + arbos_tag: u32, + debug: bool, +) { + InitCache::evict(module_hash, version, arbos_tag, debug); } /// Reorgs the init cache. This will likely never happen. #[no_mangle] -pub extern "C" fn stylus_reorg_vm(block: u64) { - InitCache::reorg(block); +pub extern "C" fn stylus_reorg_vm(_block: u64, arbos_tag: u32) { + InitCache::clear_long_term(arbos_tag); } /// Frees the vector. Does nothing when the vector is null. diff --git a/arbitrator/stylus/src/native.rs b/arbitrator/stylus/src/native.rs index 6d5e4cd2e9..2858d59fdc 100644 --- a/arbitrator/stylus/src/native.rs +++ b/arbitrator/stylus/src/native.rs @@ -113,6 +113,7 @@ impl> NativeInstance { version: u16, evm: E, evm_data: EvmData, + mut long_term_tag: u32, debug: bool, ) -> Result { let compile = CompileConfig::version(version, debug); @@ -122,10 +123,11 @@ impl> NativeInstance { if let Some((module, store)) = InitCache::get(module_hash, version, debug) { return Self::from_module(module, store, env); } - let (module, store) = match env.evm_data.cached { - true => InitCache::insert(module_hash, module, version, debug)?, - false => InitCache::insert_lru(module_hash, module, version, debug)?, - }; + if !env.evm_data.cached { + long_term_tag = 0; + } + let (module, store) = + InitCache::insert(module_hash, module, version, long_term_tag, debug)?; Self::from_module(module, store, env) } diff --git a/arbitrator/stylus/tests/multicall/Cargo.lock b/arbitrator/stylus/tests/multicall/Cargo.lock index 67b375d746..ca70689bf7 100644 --- a/arbitrator/stylus/tests/multicall/Cargo.lock +++ b/arbitrator/stylus/tests/multicall/Cargo.lock @@ -17,16 +17,29 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e416903084d3392ebd32d94735c395d6709415b76c7728e594d3f996f2b03e65" dependencies = [ + "alloy-rlp", "bytes", - "cfg-if", + "cfg-if 1.0.0", "const-hex", "derive_more", "hex-literal", "itoa", + "proptest", "ruint", + "serde", "tiny-keccak", ] +[[package]] +name = "alloy-rlp" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d58d9f5da7b40e9bfff0b7e7816700be4019db97d4b6359fe7f94a9e22e42ac" +dependencies = [ + "arrayvec", + "bytes", +] + [[package]] name = "alloy-sol-macro" version = "0.3.1" @@ -51,20 +64,48 @@ dependencies = [ "alloy-primitives", "alloy-sol-macro", "const-hex", + "serde", ] +[[package]] +name = "arrayvec" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" + [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" + [[package]] name = "block-buffer" version = "0.10.4" @@ -86,6 +127,12 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + [[package]] name = "cfg-if" version = "1.0.0" @@ -98,7 +145,7 @@ version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "268f52aae268980d03dd9544c1ea591965b2735b038d6998d6e4ab37c8c24445" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "hex", "serde", @@ -184,6 +231,28 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" +[[package]] +name = "errno" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "fastrand" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + [[package]] name = "generic-array" version = "0.14.7" @@ -194,6 +263,17 @@ dependencies = [ "version_check", ] +[[package]] +name = "getrandom" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi", +] + [[package]] name = "heck" version = "0.4.1" @@ -241,9 +321,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.147" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libm" @@ -251,18 +331,44 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +[[package]] +name = "linux-raw-sys" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" + [[package]] name = "memchr" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +[[package]] +name = "memory_units" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8452105ba047068f40ff7093dd1d9da90898e63dd61736462e9cdda6a90ad3c3" + +[[package]] +name = "mini-alloc" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd9993556d3850cdbd0da06a3dc81297edcfa050048952d84d75e8b944e8f5af" +dependencies = [ + "cfg-if 1.0.0", + "wee_alloc", +] + [[package]] name = "multicall" version = "0.1.0" dependencies = [ + "alloy-primitives", + "alloy-sol-types", "hex", + "mini-alloc", "stylus-sdk", + "wee_alloc", ] [[package]] @@ -296,15 +402,26 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e35c06b98bf36aba164cc17cb25f7e232f5c4aeea73baa14b8a9f0d92dbfa65" dependencies = [ - "bitflags", + "bit-set", + "bitflags 1.3.2", "byteorder", + "lazy_static", "num-traits", "rand", "rand_chacha", "rand_xorshift", + "regex-syntax 0.6.29", + "rusty-fork", + "tempfile", "unarray", ] +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quote" version = "1.0.29" @@ -320,6 +437,8 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ + "libc", + "rand_chacha", "rand_core", ] @@ -338,6 +457,9 @@ name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] [[package]] name = "rand_xorshift" @@ -357,7 +479,7 @@ dependencies = [ "aho-corasick", "memchr", "regex-automata", - "regex-syntax", + "regex-syntax 0.7.4", ] [[package]] @@ -368,9 +490,15 @@ checksum = "b7b6d6190b7594385f61bd3911cd1be99dfddcfc365a4160cc2ab5bff4aed294" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.7.4", ] +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + [[package]] name = "regex-syntax" version = "0.7.4" @@ -406,6 +534,31 @@ dependencies = [ "semver", ] +[[package]] +name = "rustix" +version = "0.38.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" +dependencies = [ + "bitflags 2.5.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", +] + +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + [[package]] name = "semver" version = "1.0.17" @@ -417,6 +570,20 @@ name = "serde" version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.171" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.25", +] [[package]] name = "sha3" @@ -434,7 +601,7 @@ version = "0.4.2" dependencies = [ "alloy-primitives", "alloy-sol-types", - "cfg-if", + "cfg-if 1.0.0", "convert_case 0.6.0", "lazy_static", "proc-macro2", @@ -451,7 +618,7 @@ version = "0.4.2" dependencies = [ "alloy-primitives", "alloy-sol-types", - "cfg-if", + "cfg-if 1.0.0", "derivative", "hex", "keccak-const", @@ -492,6 +659,18 @@ dependencies = [ "syn 2.0.25", ] +[[package]] +name = "tempfile" +version = "3.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +dependencies = [ + "cfg-if 1.0.0", + "fastrand", + "rustix", + "windows-sys", +] + [[package]] name = "tiny-keccak" version = "2.0.2" @@ -537,6 +716,121 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wee_alloc" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbb3b5a6b2bb17cb6ad44a2e68a43e8d2722c997da10e928665c72ec6c0a0b8e" +dependencies = [ + "cfg-if 0.1.10", + "libc", + "memory_units", + "winapi", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" + [[package]] name = "zeroize" version = "1.6.0" diff --git a/arbitrator/stylus/tests/multicall/Cargo.toml b/arbitrator/stylus/tests/multicall/Cargo.toml index 2ab5728d12..3bc48c6826 100644 --- a/arbitrator/stylus/tests/multicall/Cargo.toml +++ b/arbitrator/stylus/tests/multicall/Cargo.toml @@ -4,8 +4,12 @@ version = "0.1.0" edition = "2021" [dependencies] +alloy-primitives = "0.3.1" +alloy-sol-types = "0.3.1" +mini-alloc = "0.4.2" stylus-sdk = { path = "../../../langs/rust/stylus-sdk", features = ["reentrant"] } hex = "0.4.3" +wee_alloc = "0.4.5" [profile.release] codegen-units = 1 diff --git a/arbitrator/stylus/tests/multicall/src/main.rs b/arbitrator/stylus/tests/multicall/src/main.rs index 1f255cd99c..fd6929b8f1 100644 --- a/arbitrator/stylus/tests/multicall/src/main.rs +++ b/arbitrator/stylus/tests/multicall/src/main.rs @@ -3,13 +3,28 @@ #![no_main] +extern crate alloc; + use stylus_sdk::{ + storage::{StorageCache, GlobalStorage}, alloy_primitives::{Address, B256}, + alloy_sol_types::sol, call::RawCall, console, + evm, prelude::*, }; +use wee_alloc::WeeAlloc; + +#[global_allocator] +static ALLOC: WeeAlloc = WeeAlloc::INIT; + +sol!{ + event Called(address addr, uint8 count, bool success, bytes return_data); + event Storage(bytes32 slot, bytes32 data, bool write); +} + #[entrypoint] fn user_main(input: Vec) -> Result, Vec> { let mut input = input.as_slice(); @@ -19,7 +34,7 @@ fn user_main(input: Vec) -> Result, Vec> { // combined output of all calls let mut output = vec![]; - console!("Calling {count} contract(s)"); + console!("Performing {count} action(s)"); for _ in 0..count { let length = u32::from_be_bytes(input[..4].try_into().unwrap()) as usize; input = &input[4..]; @@ -30,35 +45,75 @@ fn user_main(input: Vec) -> Result, Vec> { let kind = curr[0]; curr = &curr[1..]; - let mut value = None; - if kind == 0 { - value = Some(B256::try_from(&curr[..32]).unwrap()); - curr = &curr[32..]; - } + if kind & 0xf0 == 0 { + // caller + let mut value = None; + if kind & 0x3 == 0 { + value = Some(B256::try_from(&curr[..32]).unwrap()); + curr = &curr[32..]; + }; - let addr = Address::try_from(&curr[..20]).unwrap(); - let data = &curr[20..]; - match value { - Some(value) if !value.is_zero() => console!( - "Calling {addr} with {} bytes and value {} {kind}", - data.len(), - hex::encode(value) - ), - _ => console!("Calling {addr} with {} bytes {kind}", curr.len()), - } + let addr = Address::try_from(&curr[..20]).unwrap(); + let data = &curr[20..]; + match value { + Some(value) if !value.is_zero() => console!( + "Calling {addr} with {} bytes and value {} {kind}", + data.len(), + hex::encode(value) + ), + _ => console!("Calling {addr} with {} bytes {kind}", curr.len()), + } - let raw_call = match kind { - 0 => RawCall::new_with_value(value.unwrap_or_default().into()), - 1 => RawCall::new_delegate(), - 2 => RawCall::new_static(), - x => panic!("unknown call kind {x}"), - }; - let return_data = unsafe { raw_call.call(addr, data)? }; - - if !return_data.is_empty() { - console!("Contract {addr} returned {} bytes", return_data.len()); + let raw_call = match kind & 0x3 { + 0 => RawCall::new_with_value(value.unwrap_or_default().into()), + 1 => RawCall::new_delegate(), + 2 => RawCall::new_static(), + x => panic!("unknown call kind {x}"), + }; + let (success, return_data) = match unsafe { raw_call.call(addr, data) } { + Ok(return_data) => (true, return_data), + Err(revert_data) => { + if kind & 0x4 == 0 { + return Err(revert_data) + } + (false, vec![]) + }, + }; + + if !return_data.is_empty() { + console!("Contract {addr} returned {} bytes", return_data.len()); + } + if kind & 0x8 != 0 { + evm::log(Called { addr, count, success, return_data: return_data.clone() }) + } + output.extend(return_data); + } else if kind & 0xf0 == 0x10 { + // storage + let slot = B256::try_from(&curr[..32]).unwrap(); + curr = &curr[32..]; + let data; + let write; + if kind & 0x7 == 0 { + console!("writing slot {}", curr.len()); + data = B256::try_from(&curr[..32]).unwrap(); + write = true; + unsafe { StorageCache::set_word(slot.into(), data.into()) }; + StorageCache::flush(); + } else if kind & 0x7 == 1{ + console!("reading slot"); + write = false; + data = StorageCache::get_word(slot.into()); + output.extend(data.clone()); + } else { + panic!("unknown storage kind {kind}") + } + if kind & 0x8 != 0 { + console!("slot: {}, data: {}, write {write}", slot, data); + evm::log(Storage { slot: slot.into(), data: data.into(), write }) + } + } else { + panic!("unknown action {kind}") } - output.extend(return_data); input = next; } diff --git a/arbitrator/stylus/tests/return-size.wat b/arbitrator/stylus/tests/return-size.wat new file mode 100644 index 0000000000..52a2bc8ece --- /dev/null +++ b/arbitrator/stylus/tests/return-size.wat @@ -0,0 +1,71 @@ +;; Copyright 2024, Offchain Labs, Inc. +;; For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +(module + (import "vm_hooks" "pay_for_memory_grow" (func (param i32))) + (import "vm_hooks" "read_args" (func $read_args (param i32))) + (import "vm_hooks" "write_result" (func $write_result (param i32 i32))) + (func (export "user_entrypoint") (param $args_len i32) (result i32) + (local $size i32) + + ;; read input + i32.const 0 + call $read_args + + ;; read the target size from the last 4 bytes of the input big endian + ;; byte 1 + local.get $args_len + i32.const 1 + i32.sub + local.tee $size + i32.load8_u + + ;; byte 2 + local.get $size + i32.const 1 + i32.sub + local.tee $size + i32.load8_u + i32.const 8 + i32.shl + i32.or + + ;; byte 3 + local.get $size + i32.const 1 + i32.sub + local.tee $size + i32.load8_u + i32.const 16 + i32.shl + i32.or + + ;; byte 4 + local.get $size + i32.const 1 + i32.sub + local.tee $size + i32.load8_u + i32.const 32 + i32.shl + i32.or + + local.tee $size + + ;; grow memory enough to handle the output + ;; we start with one page allocated, so no need to round up + i32.const 65536 + i32.div_u + memory.grow + drop + + ;; set return data + i32.const 0 + local.get $size + call $write_result + + ;; return success + i32.const 0 + ) + (memory (export "memory") 1) +) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 49d4a05a5d..7832323cfd 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -62,14 +62,20 @@ var ( baseFeeGauge = metrics.NewRegisteredGauge("arb/batchposter/basefee", nil) blobFeeGauge = metrics.NewRegisteredGauge("arb/batchposter/blobfee", nil) l1GasPriceGauge = metrics.NewRegisteredGauge("arb/batchposter/l1gasprice", nil) - l1GasPriceEstimateGauge = metrics.NewRegisteredGauge("arb/batchposter/l1gasprice/estimate", nil) - latestBatchSurplusGauge = metrics.NewRegisteredGauge("arb/batchposter/latestbatchsurplus", nil) blockGasUsedGauge = metrics.NewRegisteredGauge("arb/batchposter/blockgas/used", nil) blockGasLimitGauge = metrics.NewRegisteredGauge("arb/batchposter/blockgas/limit", nil) blobGasUsedGauge = metrics.NewRegisteredGauge("arb/batchposter/blobgas/used", nil) blobGasLimitGauge = metrics.NewRegisteredGauge("arb/batchposter/blobgas/limit", nil) suggestedTipCapGauge = metrics.NewRegisteredGauge("arb/batchposter/suggestedtipcap", nil) + batchPosterEstimatedBatchBacklogGauge = metrics.NewRegisteredGauge("arb/batchposter/estimated_batch_backlog", nil) + + batchPosterDALastSuccessfulActionGauge = metrics.NewRegisteredGauge("arb/batchPoster/action/da_last_success", nil) + batchPosterDASuccessCounter = metrics.NewRegisteredCounter("arb/batchPoster/action/da_success", nil) + batchPosterDAFailureCounter = metrics.NewRegisteredCounter("arb/batchPoster/action/da_failure", nil) + + batchPosterFailureCounter = metrics.NewRegisteredCounter("arb/batchPoster/action/failure", nil) + usableBytesInBlob = big.NewInt(int64(len(kzg4844.Blob{}) * 31 / 32)) blobTxBlobGasPerBlob = big.NewInt(params.BlobTxBlobGasPerBlob) ) @@ -115,6 +121,7 @@ type BatchPoster struct { batchReverted atomic.Bool // indicates whether data poster batch was reverted nextRevertCheckBlock int64 // the last parent block scanned for reverting batches + postedFirstBatch bool // indicates if batch poster has posted the first batch accessList func(SequencerInboxAccs, AfterDelayedMessagesRead int) types.AccessList @@ -135,6 +142,10 @@ const ( l1BlockBoundIgnore ) +type BatchPosterDangerousConfig struct { + AllowPostingFirstBatchWhenSequencerMessageCountMismatch bool `koanf:"allow-posting-first-batch-when-sequencer-message-count-mismatch"` +} + type BatchPosterConfig struct { Enable bool `koanf:"enable"` DisableDapFallbackStoreDataOnChain bool `koanf:"disable-dap-fallback-store-data-on-chain" reload:"hot"` @@ -164,6 +175,7 @@ type BatchPosterConfig struct { L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` UseAccessLists bool `koanf:"use-access-lists" reload:"hot"` GasEstimateBaseFeeMultipleBips arbmath.Bips `koanf:"gas-estimate-base-fee-multiple-bips"` + Dangerous BatchPosterDangerousConfig `koanf:"dangerous"` gasRefunder common.Address l1BlockBound l1BlockBound @@ -230,8 +242,9 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ Enable: false, DisableDapFallbackStoreDataOnChain: false, // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go - MaxSize: 100000, - Max4844BatchSize: blobs.BlobEncodableData*(params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) - 2000, + MaxSize: 100000, + // Try to fill 3 blobs per batch + Max4844BatchSize: blobs.BlobEncodableData*(params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob)/2 - 2000, PollInterval: time.Second * 10, ErrorDelay: time.Second * 10, MaxDelay: time.Hour, @@ -622,13 +635,11 @@ func (b *BatchPoster) pollForL1PriceData(ctx context.Context) { blockGasLimitGauge.Update(int64(h.GasLimit)) suggestedTipCap, err := b.l1Reader.Client().SuggestGasTipCap(ctx) if err != nil { - log.Error("unable to fetch suggestedTipCap from l1 client to update arb/batchposter/suggestedtipcap metric", "err", err) + log.Warn("unable to fetch suggestedTipCap from l1 client to update arb/batchposter/suggestedtipcap metric", "err", err) } else { suggestedTipCapGauge.Update(suggestedTipCap.Int64()) } - l1GasPriceEstimate := b.streamer.CurrentEstimateOfL1GasPrice() l1GasPriceGauge.Update(int64(l1GasPrice)) - l1GasPriceEstimateGauge.Update(int64(l1GasPriceEstimate)) case <-ctx.Done(): return } @@ -1313,6 +1324,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) // don't post anything for now return false, nil } + sequencerMsg, err := b.building.segments.CloseAndGetBytes() if err != nil { return false, err @@ -1330,18 +1342,54 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) gotNonce, gotMeta, err := b.dataPoster.GetNextNonceAndMeta(ctx) if err != nil { + batchPosterDAFailureCounter.Inc(1) return false, err } if nonce != gotNonce || !bytes.Equal(batchPositionBytes, gotMeta) { + batchPosterDAFailureCounter.Inc(1) return false, fmt.Errorf("%w: nonce changed from %d to %d while creating batch", storage.ErrStorageRace, nonce, gotNonce) } - sequencerMsg, err = b.dapWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), []byte{}, config.DisableDapFallbackStoreDataOnChain) + sequencerMsg, err = b.dapWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), config.DisableDapFallbackStoreDataOnChain) if err != nil { + batchPosterDAFailureCounter.Inc(1) return false, err } - } - data, kzgBlobs, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), batchPosition.MessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg, b.building.use4844) + batchPosterDASuccessCounter.Inc(1) + batchPosterDALastSuccessfulActionGauge.Update(time.Now().Unix()) + } + + prevMessageCount := batchPosition.MessageCount + if b.config().Dangerous.AllowPostingFirstBatchWhenSequencerMessageCountMismatch && !b.postedFirstBatch { + // AllowPostingFirstBatchWhenSequencerMessageCountMismatch can be used when the + // message count stored in batch poster's database gets out + // of sync with the sequencerReportedSubMessageCount stored in the parent chain. + // + // An example of when this out of sync issue can happen: + // 1. Batch poster is running fine, but then it shutdowns for more than 24h. + // 2. While the batch poster is down, someone sends a transaction to the parent chain + // smart contract to move a message from the delayed inbox to the main inbox. + // This will not update sequencerReportedSubMessageCount in the parent chain. + // 3. When batch poster starts again, the inbox reader will update the + // message count that is maintained in the batch poster's database to be equal to + // (sequencerReportedSubMessageCount that is stored in parent chain) + + // (the amount of delayed messages that were moved from the delayed inbox to the main inbox). + // At this moment the message count stored on batch poster's database gets out of sync with + // the sequencerReportedSubMessageCount stored in the parent chain. + + // When the first batch is posted, sequencerReportedSubMessageCount in + // the parent chain will be updated to be equal to the new message count provided + // by the batch poster, which will make this out of sync issue disappear. + // That is why this strategy is only applied for the first batch posted after + // startup. + + // If prevMessageCount is set to zero, sequencer inbox's smart contract allows + // to post a batch even if sequencerReportedSubMessageCount is not equal + // to the provided prevMessageCount + prevMessageCount = 0 + } + + data, kzgBlobs, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), prevMessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg, b.building.use4844) if err != nil { return false, err } @@ -1382,6 +1430,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if err != nil { return false, err } + b.postedFirstBatch = true log.Info( "BatchPoster: batch sent", "sequenceNumber", batchPosition.NextSeqNum, @@ -1393,14 +1442,6 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) "numBlobs", len(kzgBlobs), ) - surplus := arbmath.SaturatingMul( - arbmath.SaturatingSub( - l1GasPriceGauge.Snapshot().Value(), - l1GasPriceEstimateGauge.Snapshot().Value()), - int64(len(sequencerMsg)*16), - ) - latestBatchSurplusGauge.Update(surplus) - recentlyHitL1Bounds := time.Since(b.lastHitL1Bounds) < config.PollInterval*3 postedMessages := b.building.msgCount - batchPosition.MessageCount b.messagesPerBatch.Update(uint64(postedMessages)) @@ -1423,6 +1464,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) messagesPerBatch = 1 } backlog := uint64(unpostedMessages) / messagesPerBatch + batchPosterEstimatedBatchBacklogGauge.Update(int64(backlog)) if backlog > 10 { logLevel := log.Warn if recentlyHitL1Bounds { @@ -1535,6 +1577,7 @@ func (b *BatchPoster) Start(ctxIn context.Context) { logLevel = normalGasEstimationFailedEphemeralErrorHandler.LogLevel(err, logLevel) logLevel = accumulatorNotFoundEphemeralErrorHandler.LogLevel(err, logLevel) logLevel("error posting batch", "err", err) + batchPosterFailureCounter.Inc(1) return b.config().ErrorDelay } else if posted { return 0 diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index b34552a9b9..1229d9f7a6 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -31,10 +31,10 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/go-redis/redis/v8" "github.com/holiman/uint256" "github.com/offchainlabs/nitro/arbnode/dataposter/dbstorage" - "github.com/offchainlabs/nitro/arbnode/dataposter/externalsigner" "github.com/offchainlabs/nitro/arbnode/dataposter/noop" "github.com/offchainlabs/nitro/arbnode/dataposter/slice" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" @@ -217,6 +217,10 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro func rpcClient(ctx context.Context, opts *ExternalSignerCfg) (*rpc.Client, error) { tlsCfg := &tls.Config{ MinVersion: tls.VersionTLS12, + // Dataposter verifies that signed transaction was signed by the account + // that it expects to be signed with. So signer is already authenticated + // on application level and does not need to rely on TLS for authentication. + InsecureSkipVerify: opts.InsecureSkipVerify, // #nosec G402 } if opts.ClientCert != "" && opts.ClientPrivateKey != "" { @@ -251,6 +255,50 @@ func rpcClient(ctx context.Context, opts *ExternalSignerCfg) (*rpc.Client, error ) } +// TxToSignTxArgs converts transaction to SendTxArgs. This is needed for +// external signer to specify From field. +func TxToSignTxArgs(addr common.Address, tx *types.Transaction) (*apitypes.SendTxArgs, error) { + var to *common.MixedcaseAddress + if tx.To() != nil { + to = new(common.MixedcaseAddress) + *to = common.NewMixedcaseAddress(*tx.To()) + } + data := (hexutil.Bytes)(tx.Data()) + val := (*hexutil.Big)(tx.Value()) + if val == nil { + val = (*hexutil.Big)(big.NewInt(0)) + } + al := tx.AccessList() + var ( + blobs []kzg4844.Blob + commitments []kzg4844.Commitment + proofs []kzg4844.Proof + ) + if tx.BlobTxSidecar() != nil { + blobs = tx.BlobTxSidecar().Blobs + commitments = tx.BlobTxSidecar().Commitments + proofs = tx.BlobTxSidecar().Proofs + } + return &apitypes.SendTxArgs{ + From: common.NewMixedcaseAddress(addr), + To: to, + Gas: hexutil.Uint64(tx.Gas()), + GasPrice: (*hexutil.Big)(tx.GasPrice()), + MaxFeePerGas: (*hexutil.Big)(tx.GasFeeCap()), + MaxPriorityFeePerGas: (*hexutil.Big)(tx.GasTipCap()), + Value: *val, + Nonce: hexutil.Uint64(tx.Nonce()), + Data: &data, + AccessList: &al, + ChainID: (*hexutil.Big)(tx.ChainId()), + BlobFeeCap: (*hexutil.Big)(tx.BlobGasFeeCap()), + BlobHashes: tx.BlobHashes(), + Blobs: blobs, + Commitments: commitments, + Proofs: proofs, + }, nil +} + // externalSigner returns signer function and ethereum address of the signer. // Returns an error if address isn't specified or if it can't connect to the // signer RPC server. @@ -269,7 +317,7 @@ func externalSigner(ctx context.Context, opts *ExternalSignerCfg) (signerFn, com // RLP encoded transaction object. // https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_signtransaction var data hexutil.Bytes - args, err := externalsigner.TxToSignTxArgs(addr, tx) + args, err := TxToSignTxArgs(addr, tx) if err != nil { return nil, fmt.Errorf("error converting transaction to sendTxArgs: %w", err) } @@ -281,7 +329,11 @@ func externalSigner(ctx context.Context, opts *ExternalSignerCfg) (signerFn, com return nil, fmt.Errorf("unmarshaling signed transaction: %w", err) } hasher := types.LatestSignerForChainID(tx.ChainId()) - if h := hasher.Hash(args.ToTransaction()); h != hasher.Hash(signedTx) { + gotTx, err := args.ToTransaction() + if err != nil { + return nil, fmt.Errorf("converting transaction arguments into transaction: %w", err) + } + if h := hasher.Hash(gotTx); h != hasher.Hash(signedTx) { return nil, fmt.Errorf("transaction: %x from external signer differs from request: %x", hasher.Hash(signedTx), h) } return signedTx, nil @@ -592,7 +644,7 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u targetBlobCost := arbmath.BigMulByUint(newBlobFeeCap, blobGasUsed) targetNonBlobCost := arbmath.BigSub(targetMaxCost, targetBlobCost) newBaseFeeCap := arbmath.BigDivByUint(targetNonBlobCost, gasLimit) - if lastTx != nil && numBlobs > 0 && arbmath.BigDivToBips(newBaseFeeCap, lastTx.GasFeeCap()) < minRbfIncrease { + if lastTx != nil && numBlobs > 0 && lastTx.GasFeeCap().Sign() > 0 && arbmath.BigDivToBips(newBaseFeeCap, lastTx.GasFeeCap()) < minRbfIncrease { // Increase the non-blob fee cap to the minimum rbf increase newBaseFeeCap = arbmath.BigMulByBips(lastTx.GasFeeCap(), minRbfIncrease) newNonBlobCost := arbmath.BigMulByUint(newBaseFeeCap, gasLimit) @@ -665,6 +717,14 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u return lastTx.GasFeeCap(), lastTx.GasTipCap(), lastTx.BlobGasFeeCap(), nil } + // Ensure we bid at least 1 wei to prevent division by zero + if newBaseFeeCap.Sign() == 0 { + newBaseFeeCap = big.NewInt(1) + } + if newBlobFeeCap.Sign() == 0 { + newBlobFeeCap = big.NewInt(1) + } + return newBaseFeeCap, newTipCap, newBlobFeeCap, nil } @@ -676,6 +736,10 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim p.mutex.Lock() defer p.mutex.Unlock() + if p.config().DisableNewTx { + return nil, fmt.Errorf("posting new transaction is disabled") + } + var weight uint64 = 1 if len(kzgBlobs) > 0 { weight = uint64(len(kzgBlobs)) @@ -839,31 +903,31 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti // different type with a lower nonce. // If we decide not to send this tx yet, just leave it queued and with Sent set to false. // The resending/repricing loop in DataPoster.Start will keep trying. - if !newTx.Sent && newTx.FullTx.Nonce() > 0 { + previouslySent := newTx.Sent || (prevTx != nil && prevTx.Sent) // if we've previously sent this nonce + if !previouslySent && newTx.FullTx.Nonce() > 0 { precedingTx, err := p.queue.Get(ctx, arbmath.SaturatingUSub(newTx.FullTx.Nonce(), 1)) if err != nil { return fmt.Errorf("couldn't get preceding tx in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } if precedingTx != nil { // precedingTx == nil -> the actual preceding tx was already confirmed - var latestBlockNumber, prevBlockNumber, reorgResistantNonce uint64 + var latestBlockNumber, prevBlockNumber, reorgResistantTxCount uint64 if precedingTx.FullTx.Type() != newTx.FullTx.Type() || !precedingTx.Sent { latestBlockNumber, err = p.client.BlockNumber(ctx) if err != nil { return fmt.Errorf("couldn't get block number in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } prevBlockNumber = arbmath.SaturatingUSub(latestBlockNumber, 1) - reorgResistantNonce, err = p.client.NonceAt(ctx, p.Sender(), new(big.Int).SetUint64(prevBlockNumber)) + reorgResistantTxCount, err = p.client.NonceAt(ctx, p.Sender(), new(big.Int).SetUint64(prevBlockNumber)) if err != nil { return fmt.Errorf("couldn't determine reorg resistant nonce in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } - if precedingTx.FullTx.Nonce() > reorgResistantNonce { - log.Info("DataPoster is avoiding creating a mempool nonce gap (the tx remains queued and will be retried)", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent) + if newTx.FullTx.Nonce() > reorgResistantTxCount { + log.Info("DataPoster is avoiding creating a mempool nonce gap (the tx remains queued and will be retried)", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent, "latestBlockNumber", latestBlockNumber, "prevBlockNumber", prevBlockNumber, "reorgResistantTxCount", reorgResistantTxCount) return nil } - } else { - log.Info("DataPoster will send previously unsent batch tx", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent, "latestBlockNumber", latestBlockNumber, "prevBlockNumber", prevBlockNumber, "reorgResistantNonce", reorgResistantNonce) } + log.Debug("DataPoster will send previously unsent batch tx", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent, "latestBlockNumber", latestBlockNumber, "prevBlockNumber", prevBlockNumber, "reorgResistantTxCount", reorgResistantTxCount) } } @@ -934,8 +998,8 @@ func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransa } newTx := *prevTx - if arbmath.BigDivToBips(newFeeCap, prevTx.FullTx.GasFeeCap()) < minRbfIncrease || - (prevTx.FullTx.BlobGasFeeCap() != nil && arbmath.BigDivToBips(newBlobFeeCap, prevTx.FullTx.BlobGasFeeCap()) < minRbfIncrease) { + if (prevTx.FullTx.GasFeeCap().Sign() > 0 && arbmath.BigDivToBips(newFeeCap, prevTx.FullTx.GasFeeCap()) < minRbfIncrease) || + (prevTx.FullTx.BlobGasFeeCap() != nil && prevTx.FullTx.BlobGasFeeCap().Sign() > 0 && arbmath.BigDivToBips(newBlobFeeCap, prevTx.FullTx.BlobGasFeeCap()) < minRbfIncrease) { log.Debug( "no need to replace by fee transaction", "nonce", prevTx.FullTx.Nonce(), @@ -1196,6 +1260,9 @@ type DataPosterConfig struct { MaxFeeCapFormula string `koanf:"max-fee-cap-formula" reload:"hot"` ElapsedTimeBase time.Duration `koanf:"elapsed-time-base" reload:"hot"` ElapsedTimeImportance float64 `koanf:"elapsed-time-importance" reload:"hot"` + // When set, dataposter will not post new batches, but will keep running to + // get existing batches confirmed. + DisableNewTx bool `koanf:"disable-new-tx" reload:"hot"` } type ExternalSignerCfg struct { @@ -1215,6 +1282,8 @@ type ExternalSignerCfg struct { // (Optional) Client certificate key for mtls. // This is required when client-cert is set. ClientPrivateKey string `koanf:"client-private-key"` + // TLS config option, when enabled skips certificate verification of external signer. + InsecureSkipVerify bool `koanf:"insecure-skip-verify"` } type DangerousConfig struct { @@ -1255,6 +1324,7 @@ func DataPosterConfigAddOptions(prefix string, f *pflag.FlagSet, defaultDataPost signature.SimpleHmacConfigAddOptions(prefix+".redis-signer", f) addDangerousOptions(prefix+".dangerous", f) addExternalSignerOptions(prefix+".external-signer", f) + f.Bool(prefix+".disable-new-tx", defaultDataPosterConfig.DisableNewTx, "disable posting new transactions, data poster will still keep confirming existing batches") } func addDangerousOptions(prefix string, f *pflag.FlagSet) { @@ -1268,6 +1338,7 @@ func addExternalSignerOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".root-ca", DefaultDataPosterConfig.ExternalSigner.RootCA, "external signer root CA") f.String(prefix+".client-cert", DefaultDataPosterConfig.ExternalSigner.ClientCert, "rpc client cert") f.String(prefix+".client-private-key", DefaultDataPosterConfig.ExternalSigner.ClientPrivateKey, "rpc client private key") + f.Bool(prefix+".insecure-skip-verify", DefaultDataPosterConfig.ExternalSigner.InsecureSkipVerify, "skip TLS certificate verification") } var DefaultDataPosterConfig = DataPosterConfig{ @@ -1289,10 +1360,11 @@ var DefaultDataPosterConfig = DataPosterConfig{ UseNoOpStorage: false, LegacyStorageEncoding: false, Dangerous: DangerousConfig{ClearDBStorage: false}, - ExternalSigner: ExternalSignerCfg{Method: "eth_signTransaction"}, + ExternalSigner: ExternalSignerCfg{Method: "eth_signTransaction", InsecureSkipVerify: false}, MaxFeeCapFormula: "((BacklogOfBatches * UrgencyGWei) ** 2) + ((ElapsedTime/ElapsedTimeBase) ** 2) * ElapsedTimeImportance + TargetPriceGWei", ElapsedTimeBase: 10 * time.Minute, ElapsedTimeImportance: 10, + DisableNewTx: false, } var DefaultDataPosterConfigForValidator = func() DataPosterConfig { @@ -1322,10 +1394,11 @@ var TestDataPosterConfig = DataPosterConfig{ UseDBStorage: false, UseNoOpStorage: false, LegacyStorageEncoding: false, - ExternalSigner: ExternalSignerCfg{Method: "eth_signTransaction"}, + ExternalSigner: ExternalSignerCfg{Method: "eth_signTransaction", InsecureSkipVerify: true}, MaxFeeCapFormula: "((BacklogOfBatches * UrgencyGWei) ** 2) + ((ElapsedTime/ElapsedTimeBase) ** 2) * ElapsedTimeImportance + TargetPriceGWei", ElapsedTimeBase: 10 * time.Minute, ElapsedTimeImportance: 10, + DisableNewTx: false, } var TestDataPosterConfigForValidator = func() DataPosterConfig { diff --git a/arbnode/dataposter/dataposter_test.go b/arbnode/dataposter/dataposter_test.go index a8e2e110a0..f840d8c84e 100644 --- a/arbnode/dataposter/dataposter_test.go +++ b/arbnode/dataposter/dataposter_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "math/big" - "net/http" "testing" "time" @@ -14,11 +13,11 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" "github.com/google/go-cmp/cmp" "github.com/holiman/uint256" - "github.com/offchainlabs/nitro/arbnode/dataposter/externalsigner" "github.com/offchainlabs/nitro/arbnode/dataposter/externalsignertest" "github.com/offchainlabs/nitro/util/arbmath" ) @@ -58,14 +57,14 @@ func TestParseReplacementTimes(t *testing.T) { } } -func signerTestCfg(addr common.Address) (*ExternalSignerCfg, error) { +func signerTestCfg(addr common.Address, url string) (*ExternalSignerCfg, error) { cp, err := externalsignertest.CertPaths() if err != nil { return nil, fmt.Errorf("getting certificates path: %w", err) } return &ExternalSignerCfg{ Address: common.Bytes2Hex(addr.Bytes()), - URL: externalsignertest.SignerURL, + URL: url, Method: externalsignertest.SignerMethod, RootCA: cp.ServerCert, ClientCert: cp.ClientCert, @@ -106,15 +105,14 @@ var ( ) func TestExternalSigner(t *testing.T) { - httpSrv, srv := externalsignertest.NewServer(t) - cert, key := "./testdata/localhost.crt", "./testdata/localhost.key" + srv := externalsignertest.NewServer(t) go func() { - if err := httpSrv.ListenAndServeTLS(cert, key); err != nil && err != http.ErrServerClosed { - t.Errorf("ListenAndServeTLS() unexpected error: %v", err) + if err := srv.Start(); err != nil { + log.Error("Failed to start external signer server:", err) return } }() - signerCfg, err := signerTestCfg(srv.Address) + signerCfg, err := signerTestCfg(srv.Address, srv.URL()) if err != nil { t.Fatalf("Error getting signer test config: %v", err) } @@ -143,11 +141,7 @@ func TestExternalSigner(t *testing.T) { if err != nil { t.Fatalf("Error signing transaction with external signer: %v", err) } - args, err := externalsigner.TxToSignTxArgs(addr, tc.tx) - if err != nil { - t.Fatalf("Error converting transaction to sendTxArgs: %v", err) - } - want, err := srv.SignerFn(addr, args.ToTransaction()) + want, err := srv.SignerFn(addr, tc.tx) if err != nil { t.Fatalf("Error signing transaction: %v", err) } diff --git a/arbnode/dataposter/externalsigner/externalsigner.go b/arbnode/dataposter/externalsigner/externalsigner.go deleted file mode 100644 index 10d9754cdf..0000000000 --- a/arbnode/dataposter/externalsigner/externalsigner.go +++ /dev/null @@ -1,115 +0,0 @@ -package externalsigner - -import ( - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/ethereum/go-ethereum/signer/core/apitypes" - "github.com/holiman/uint256" -) - -type SignTxArgs struct { - *apitypes.SendTxArgs - - // Feilds for BlobTx type transactions. - BlobFeeCap *hexutil.Big `json:"maxFeePerBlobGas"` - BlobHashes []common.Hash `json:"blobVersionedHashes,omitempty"` - - // Blob sidecar fields for BlobTx type transactions. - // These are optional if BlobHashes are already present, since these - // are not included in the hash/signature. - Blobs []kzg4844.Blob `json:"blobs"` - Commitments []kzg4844.Commitment `json:"commitments"` - Proofs []kzg4844.Proof `json:"proofs"` -} - -func (a *SignTxArgs) ToTransaction() *types.Transaction { - if !a.isEIP4844() { - return a.SendTxArgs.ToTransaction() - } - to := common.Address{} - if a.To != nil { - to = a.To.Address() - } - var input []byte - if a.Input != nil { - input = *a.Input - } else if a.Data != nil { - input = *a.Data - } - al := types.AccessList{} - if a.AccessList != nil { - al = *a.AccessList - } - return types.NewTx(&types.BlobTx{ - To: to, - Nonce: uint64(a.SendTxArgs.Nonce), - Gas: uint64(a.Gas), - GasFeeCap: uint256.NewInt(a.MaxFeePerGas.ToInt().Uint64()), - GasTipCap: uint256.NewInt(a.MaxPriorityFeePerGas.ToInt().Uint64()), - Value: uint256.NewInt(a.Value.ToInt().Uint64()), - Data: input, - AccessList: al, - BlobFeeCap: uint256.NewInt(a.BlobFeeCap.ToInt().Uint64()), - BlobHashes: a.BlobHashes, - Sidecar: &types.BlobTxSidecar{ - Blobs: a.Blobs, - Commitments: a.Commitments, - Proofs: a.Proofs, - }, - ChainID: uint256.NewInt(a.ChainID.ToInt().Uint64()), - }) -} - -func (a *SignTxArgs) isEIP4844() bool { - return a.BlobHashes != nil || a.BlobFeeCap != nil -} - -// TxToSignTxArgs converts transaction to SendTxArgs. This is needed for -// external signer to specify From field. -func TxToSignTxArgs(addr common.Address, tx *types.Transaction) (*SignTxArgs, error) { - var to *common.MixedcaseAddress - if tx.To() != nil { - to = new(common.MixedcaseAddress) - *to = common.NewMixedcaseAddress(*tx.To()) - } - data := (hexutil.Bytes)(tx.Data()) - val := (*hexutil.Big)(tx.Value()) - if val == nil { - val = (*hexutil.Big)(big.NewInt(0)) - } - al := tx.AccessList() - var ( - blobs []kzg4844.Blob - commitments []kzg4844.Commitment - proofs []kzg4844.Proof - ) - if tx.BlobTxSidecar() != nil { - blobs = tx.BlobTxSidecar().Blobs - commitments = tx.BlobTxSidecar().Commitments - proofs = tx.BlobTxSidecar().Proofs - } - return &SignTxArgs{ - SendTxArgs: &apitypes.SendTxArgs{ - From: common.NewMixedcaseAddress(addr), - To: to, - Gas: hexutil.Uint64(tx.Gas()), - GasPrice: (*hexutil.Big)(tx.GasPrice()), - MaxFeePerGas: (*hexutil.Big)(tx.GasFeeCap()), - MaxPriorityFeePerGas: (*hexutil.Big)(tx.GasTipCap()), - Value: *val, - Nonce: hexutil.Uint64(tx.Nonce()), - Data: &data, - AccessList: &al, - ChainID: (*hexutil.Big)(tx.ChainId()), - }, - BlobFeeCap: (*hexutil.Big)(tx.BlobGasFeeCap()), - BlobHashes: tx.BlobHashes(), - Blobs: blobs, - Commitments: commitments, - Proofs: proofs, - }, nil -} diff --git a/arbnode/dataposter/externalsigner/externalsigner_test.go b/arbnode/dataposter/externalsigner/externalsigner_test.go deleted file mode 100644 index abd5acedcf..0000000000 --- a/arbnode/dataposter/externalsigner/externalsigner_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package externalsigner - -import ( - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/holiman/uint256" -) - -var ( - blobTx = types.NewTx( - &types.BlobTx{ - ChainID: uint256.NewInt(1337), - Nonce: 13, - GasTipCap: uint256.NewInt(1), - GasFeeCap: uint256.NewInt(1), - Gas: 3, - To: common.Address{}, - Value: uint256.NewInt(1), - Data: []byte{0x01, 0x02, 0x03}, - BlobHashes: []common.Hash{ - common.BigToHash(big.NewInt(1)), - common.BigToHash(big.NewInt(2)), - common.BigToHash(big.NewInt(3)), - }, - Sidecar: &types.BlobTxSidecar{}, - }, - ) - dynamicFeeTx = types.NewTx( - &types.DynamicFeeTx{ - ChainID: big.NewInt(1337), - Nonce: 13, - GasTipCap: big.NewInt(1), - GasFeeCap: big.NewInt(1), - Gas: 3, - To: nil, - Value: big.NewInt(1), - Data: []byte{0x01, 0x02, 0x03}, - }, - ) -) - -// TestToTranssaction tests that tranasction converted to SignTxArgs and then -// back to Transaction results in the same hash. -func TestToTranssaction(t *testing.T) { - for _, tc := range []struct { - desc string - tx *types.Transaction - }{ - { - desc: "blob transaction", - tx: blobTx, - }, - { - desc: "dynamic fee transaction", - tx: dynamicFeeTx, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - signTxArgs, err := TxToSignTxArgs(common.Address{}, tc.tx) - if err != nil { - t.Fatalf("TxToSignTxArgs() unexpected error: %v", err) - } - got := signTxArgs.ToTransaction() - hasher := types.LatestSignerForChainID(nil) - if h, g := hasher.Hash(tc.tx), hasher.Hash(got); h != g { - t.Errorf("ToTransaction() got hash: %v want: %v", g, h) - } - }) - } - -} diff --git a/arbnode/dataposter/externalsignertest/externalsignertest.go b/arbnode/dataposter/externalsignertest/externalsignertest.go index 73a5760fbe..554defc764 100644 --- a/arbnode/dataposter/externalsignertest/externalsignertest.go +++ b/arbnode/dataposter/externalsignertest/externalsignertest.go @@ -4,8 +4,10 @@ import ( "context" "crypto/tls" "crypto/x509" + "errors" "fmt" "math/big" + "net" "net/http" "os" "path/filepath" @@ -19,16 +21,14 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/rpc" - "github.com/offchainlabs/nitro/arbnode/dataposter/externalsigner" + "github.com/ethereum/go-ethereum/signer/core/apitypes" + "github.com/offchainlabs/nitro/util/testhelpers" ) var ( dataPosterPath = "arbnode/dataposter" selfPath = filepath.Join(dataPosterPath, "externalsignertest") - - SignerPort = 1234 - SignerURL = fmt.Sprintf("https://localhost:%v", SignerPort) - SignerMethod = "test_signTransaction" + SignerMethod = "test_signTransaction" ) type CertAbsPaths struct { @@ -38,6 +38,12 @@ type CertAbsPaths struct { ClientKey string } +type SignerServer struct { + *http.Server + *SignerAPI + listener net.Listener +} + func basePath() (string, error) { _, file, _, ok := runtime.Caller(1) if !ok { @@ -71,7 +77,7 @@ func CertPaths() (*CertAbsPaths, error) { }, nil } -func NewServer(t *testing.T) (*http.Server, *SignerAPI) { +func NewServer(t *testing.T) *SignerServer { rpcServer := rpc.NewServer() signer, address, err := setupAccount("/tmp/keystore") if err != nil { @@ -94,8 +100,13 @@ func NewServer(t *testing.T) (*http.Server, *SignerAPI) { pool := x509.NewCertPool() pool.AppendCertsFromPEM(clientCert) + ln, err := testhelpers.FreeTCPPortListener() + if err != nil { + t.Fatalf("Error getting a listener on a free TCP port: %v", err) + } + httpServer := &http.Server{ - Addr: fmt.Sprintf(":%d", SignerPort), + Addr: ln.Addr().String(), Handler: rpcServer, ReadTimeout: 30 * time.Second, ReadHeaderTimeout: 30 * time.Second, @@ -109,12 +120,36 @@ func NewServer(t *testing.T) (*http.Server, *SignerAPI) { } t.Cleanup(func() { - if err := httpServer.Close(); err != nil { + if err := httpServer.Close(); err != nil && !errors.Is(err, http.ErrServerClosed) { t.Fatalf("Error shutting down http server: %v", err) } + // Explicitly close the listner in case the server was never started. + if err := ln.Close(); err != nil && !errors.Is(err, net.ErrClosed) { + t.Fatalf("Error closing listener: %v", err) + } }) - return httpServer, s + return &SignerServer{httpServer, s, ln} +} + +// URL returns the URL of the signer server. +// +// Note: The server must return "localhost" for the hostname part of +// the URL to match the expectations from the TLS certificate. +func (s *SignerServer) URL() string { + port := strings.Split(s.Addr, ":")[1] + return fmt.Sprintf("https://localhost:%s", port) +} + +func (s *SignerServer) Start() error { + cp, err := CertPaths() + if err != nil { + return err + } + if err := s.ServeTLS(s.listener, cp.ServerCert, cp.ServerKey); err != nil && !errors.Is(err, http.ErrServerClosed) { + return err + } + return nil } // setupAccount creates a new account in a given directory, unlocks it, creates @@ -144,11 +179,15 @@ type SignerAPI struct { Address common.Address } -func (a *SignerAPI) SignTransaction(ctx context.Context, req *externalsigner.SignTxArgs) (hexutil.Bytes, error) { +func (a *SignerAPI) SignTransaction(ctx context.Context, req *apitypes.SendTxArgs) (hexutil.Bytes, error) { if req == nil { return nil, fmt.Errorf("nil request") } - signedTx, err := a.SignerFn(a.Address, req.ToTransaction()) + tx, err := req.ToTransaction() + if err != nil { + return nil, fmt.Errorf("converting send transaction arguments to transaction: %w", err) + } + signedTx, err := a.SignerFn(a.Address, tx) if err != nil { return nil, fmt.Errorf("signing transaction: %w", err) } diff --git a/arbnode/dataposter/storage_test.go b/arbnode/dataposter/storage_test.go index f98c120f38..e2aa321e0d 100644 --- a/arbnode/dataposter/storage_test.go +++ b/arbnode/dataposter/storage_test.go @@ -19,6 +19,7 @@ import ( "github.com/offchainlabs/nitro/arbnode/dataposter/redis" "github.com/offchainlabs/nitro/arbnode/dataposter/slice" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" + "github.com/offchainlabs/nitro/cmd/conf" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/redisutil" "github.com/offchainlabs/nitro/util/signature" @@ -44,7 +45,7 @@ func newLevelDBStorage(t *testing.T, encF storage.EncoderDecoderF) *dbstorage.St func newPebbleDBStorage(t *testing.T, encF storage.EncoderDecoderF) *dbstorage.Storage { t.Helper() - db, err := rawdb.NewPebbleDBDatabase(path.Join(t.TempDir(), "pebble.db"), 0, 0, "default", false, true) + db, err := rawdb.NewPebbleDBDatabase(path.Join(t.TempDir(), "pebble.db"), 0, 0, "default", false, true, conf.PersistentConfigDefault.Pebble.ExtraOptions("pebble")) if err != nil { t.Fatalf("NewPebbleDBDatabase() unexpected error: %v", err) } diff --git a/arbnode/delayed_seq_reorg_test.go b/arbnode/delayed_seq_reorg_test.go index 9ad984ae6c..699eb3e8f6 100644 --- a/arbnode/delayed_seq_reorg_test.go +++ b/arbnode/delayed_seq_reorg_test.go @@ -19,7 +19,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { defer cancel() exec, streamer, db, _ := NewTransactionStreamerForTest(t, common.Address{}) - tracker, err := NewInboxTracker(db, streamer, nil) + tracker, err := NewInboxTracker(db, streamer, nil, DefaultSnapSyncConfig) Require(t, err) err = streamer.Start(ctx) diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index a1f1a1a930..3ba9aa78f3 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -139,6 +139,9 @@ func (r *InboxReader) Start(ctxIn context.Context) error { return err } if batchCount > 0 { + if r.tracker.snapSyncConfig.Enabled { + break + } // Validate the init message matches our L2 blockchain message, err := r.tracker.GetDelayedMessage(0) if err != nil { diff --git a/arbnode/inbox_test.go b/arbnode/inbox_test.go index 5c879743a4..ef4acd038c 100644 --- a/arbnode/inbox_test.go +++ b/arbnode/inbox_test.go @@ -37,6 +37,11 @@ type execClientWrapper struct { func (w *execClientWrapper) Pause() { w.t.Error("not supported") } func (w *execClientWrapper) Activate() { w.t.Error("not supported") } func (w *execClientWrapper) ForwardTo(url string) error { w.t.Error("not supported"); return nil } +func (w *execClientWrapper) Synced() bool { w.t.Error("not supported"); return false } +func (w *execClientWrapper) FullSyncProgressMap() map[string]interface{} { + w.t.Error("not supported") + return nil +} func NewTransactionStreamerForTest(t *testing.T, ownerAddress common.Address) (*gethexec.ExecutionEngine, *TransactionStreamer, ethdb.Database, *core.BlockChain) { chainConfig := params.ArbitrumDevTestChainConfig() @@ -65,8 +70,9 @@ func NewTransactionStreamerForTest(t *testing.T, ownerAddress common.Address) (* if err != nil { Fail(t, err) } + execEngine.Initialize(gethexec.DefaultCachingConfig.StylusLRUCache) execSeq := &execClientWrapper{execEngine, t} - inbox, err := NewTransactionStreamer(arbDb, bc.Config(), execSeq, nil, make(chan error, 1), transactionStreamerConfigFetcher) + inbox, err := NewTransactionStreamer(arbDb, bc.Config(), execSeq, nil, make(chan error, 1), transactionStreamerConfigFetcher, &DefaultSnapSyncConfig) if err != nil { Fail(t, err) } diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index ba1b875ec8..b950c1e1ef 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -34,22 +34,24 @@ var ( ) type InboxTracker struct { - db ethdb.Database - txStreamer *TransactionStreamer - mutex sync.Mutex - validator *staker.BlockValidator - dapReaders []daprovider.Reader + db ethdb.Database + txStreamer *TransactionStreamer + mutex sync.Mutex + validator *staker.BlockValidator + dapReaders []daprovider.Reader + snapSyncConfig SnapSyncConfig batchMetaMutex sync.Mutex batchMeta *containers.LruCache[uint64, BatchMetadata] } -func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, dapReaders []daprovider.Reader) (*InboxTracker, error) { +func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, dapReaders []daprovider.Reader, snapSyncConfig SnapSyncConfig) (*InboxTracker, error) { tracker := &InboxTracker{ - db: db, - txStreamer: txStreamer, - dapReaders: dapReaders, - batchMeta: containers.NewLruCache[uint64, BatchMetadata](1000), + db: db, + txStreamer: txStreamer, + dapReaders: dapReaders, + batchMeta: containers.NewLruCache[uint64, BatchMetadata](1000), + snapSyncConfig: snapSyncConfig, } return tracker, nil } @@ -385,16 +387,40 @@ func (t *InboxTracker) GetDelayedMessageBytes(seqNum uint64) ([]byte, error) { } func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage, hardReorg bool) error { + var nextAcc common.Hash + firstDelayedMsgToKeep := uint64(0) if len(messages) == 0 { return nil } - t.mutex.Lock() - defer t.mutex.Unlock() - pos, err := messages[0].Message.Header.SeqNum() if err != nil { return err } + if t.snapSyncConfig.Enabled && pos < t.snapSyncConfig.DelayedCount { + firstDelayedMsgToKeep = t.snapSyncConfig.DelayedCount + if firstDelayedMsgToKeep > 0 { + firstDelayedMsgToKeep-- + } + for { + if len(messages) == 0 { + return nil + } + pos, err = messages[0].Message.Header.SeqNum() + if err != nil { + return err + } + if pos+1 == firstDelayedMsgToKeep { + nextAcc = messages[0].AfterInboxAcc() + } + if pos < firstDelayedMsgToKeep { + messages = messages[1:] + } else { + break + } + } + } + t.mutex.Lock() + defer t.mutex.Unlock() if !hardReorg { // This math is safe to do as we know len(messages) > 0 @@ -409,8 +435,7 @@ func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage, hardR } } - var nextAcc common.Hash - if pos > 0 { + if pos > firstDelayedMsgToKeep { var err error nextAcc, err = t.GetDelayedAcc(pos - 1) if err != nil { @@ -598,17 +623,44 @@ func (b *multiplexerBackend) ReadDelayedInbox(seqNum uint64) (*arbostypes.L1Inco var delayedMessagesMismatch = errors.New("sequencer batch delayed messages missing or different") func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L1Interface, batches []*SequencerInboxBatch) error { + var nextAcc common.Hash + var prevbatchmeta BatchMetadata + sequenceNumberToKeep := uint64(0) if len(batches) == 0 { return nil } + if t.snapSyncConfig.Enabled && batches[0].SequenceNumber < t.snapSyncConfig.BatchCount { + sequenceNumberToKeep = t.snapSyncConfig.BatchCount + if sequenceNumberToKeep > 0 { + sequenceNumberToKeep-- + } + for { + if len(batches) == 0 { + return nil + } + if batches[0].SequenceNumber+1 == sequenceNumberToKeep { + nextAcc = batches[0].AfterInboxAcc + prevbatchmeta = BatchMetadata{ + Accumulator: batches[0].AfterInboxAcc, + DelayedMessageCount: batches[0].AfterDelayedCount, + MessageCount: arbutil.MessageIndex(t.snapSyncConfig.PrevBatchMessageCount), + ParentChainBlock: batches[0].ParentChainBlockNumber, + } + } + if batches[0].SequenceNumber < sequenceNumberToKeep { + batches = batches[1:] + } else { + break + } + } + } t.mutex.Lock() defer t.mutex.Unlock() pos := batches[0].SequenceNumber startPos := pos - var nextAcc common.Hash - var prevbatchmeta BatchMetadata - if pos > 0 { + + if pos > sequenceNumberToKeep { var err error prevbatchmeta, err = t.GetBatchMetadata(pos - 1) nextAcc = prevbatchmeta.Accumulator diff --git a/arbnode/message_pruner.go b/arbnode/message_pruner.go index 31bf1a63ff..5d18341a27 100644 --- a/arbnode/message_pruner.go +++ b/arbnode/message_pruner.go @@ -23,13 +23,14 @@ import ( type MessagePruner struct { stopwaiter.StopWaiter - transactionStreamer *TransactionStreamer - inboxTracker *InboxTracker - config MessagePrunerConfigFetcher - pruningLock sync.Mutex - lastPruneDone time.Time - cachedPrunedMessages uint64 - cachedPrunedDelayedMessages uint64 + transactionStreamer *TransactionStreamer + inboxTracker *InboxTracker + config MessagePrunerConfigFetcher + pruningLock sync.Mutex + lastPruneDone time.Time + cachedPrunedMessages uint64 + cachedPrunedBlockHashesInputFeed uint64 + cachedPrunedDelayedMessages uint64 } type MessagePrunerConfig struct { @@ -115,7 +116,15 @@ func (m *MessagePruner) prune(ctx context.Context, count arbutil.MessageIndex, g } func (m *MessagePruner) deleteOldMessagesFromDB(ctx context.Context, messageCount arbutil.MessageIndex, delayedMessageCount uint64) error { - prunedKeysRange, err := deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messagePrefix, &m.cachedPrunedMessages, uint64(messageCount)) + prunedKeysRange, err := deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, blockHashInputFeedPrefix, &m.cachedPrunedBlockHashesInputFeed, uint64(messageCount)) + if err != nil { + return fmt.Errorf("error deleting expected block hashes: %w", err) + } + if len(prunedKeysRange) > 0 { + log.Info("Pruned expected block hashes:", "first pruned key", prunedKeysRange[0], "last pruned key", prunedKeysRange[len(prunedKeysRange)-1]) + } + + prunedKeysRange, err = deleteFromLastPrunedUptoEndKey(ctx, m.transactionStreamer.db, messagePrefix, &m.cachedPrunedMessages, uint64(messageCount)) if err != nil { return fmt.Errorf("error deleting last batch messages: %w", err) } diff --git a/arbnode/message_pruner_test.go b/arbnode/message_pruner_test.go index 0212ed2364..ed85c0ebce 100644 --- a/arbnode/message_pruner_test.go +++ b/arbnode/message_pruner_test.go @@ -22,8 +22,8 @@ func TestMessagePrunerWithPruningEligibleMessagePresent(t *testing.T) { Require(t, err) checkDbKeys(t, messagesCount, transactionStreamerDb, messagePrefix) + checkDbKeys(t, messagesCount, transactionStreamerDb, blockHashInputFeedPrefix) checkDbKeys(t, messagesCount, inboxTrackerDb, rlpDelayedMessagePrefix) - } func TestMessagePrunerTwoHalves(t *testing.T) { @@ -71,16 +71,18 @@ func TestMessagePrunerWithNoPruningEligibleMessagePresent(t *testing.T) { Require(t, err) checkDbKeys(t, uint64(messagesCount), transactionStreamerDb, messagePrefix) + checkDbKeys(t, uint64(messagesCount), transactionStreamerDb, blockHashInputFeedPrefix) checkDbKeys(t, messagesCount, inboxTrackerDb, rlpDelayedMessagePrefix) } func setupDatabase(t *testing.T, messageCount, delayedMessageCount uint64) (ethdb.Database, ethdb.Database, *MessagePruner) { - transactionStreamerDb := rawdb.NewMemoryDatabase() for i := uint64(0); i < uint64(messageCount); i++ { err := transactionStreamerDb.Put(dbKey(messagePrefix, i), []byte{}) Require(t, err) + err = transactionStreamerDb.Put(dbKey(blockHashInputFeedPrefix, i), []byte{}) + Require(t, err) } inboxTrackerDb := rawdb.NewMemoryDatabase() diff --git a/arbnode/node.go b/arbnode/node.go index 5fc2f7eaa8..7c2ed13431 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -95,6 +95,8 @@ type Config struct { TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"` Maintenance MaintenanceConfig `koanf:"maintenance" reload:"hot"` ResourceMgmt resourcemanager.Config `koanf:"resource-mgmt" reload:"hot"` + // SnapSyncConfig is only used for testing purposes, these should not be configured in production. + SnapSyncTest SnapSyncConfig } func (c *Config) Validate() error { @@ -182,6 +184,7 @@ var ConfigDefault = Config{ TransactionStreamer: DefaultTransactionStreamerConfig, ResourceMgmt: resourcemanager.DefaultConfig, Maintenance: DefaultMaintenanceConfig, + SnapSyncTest: DefaultSnapSyncConfig, } func ConfigDefaultL1Test() *Config { @@ -280,6 +283,22 @@ type Node struct { ctx context.Context } +type SnapSyncConfig struct { + Enabled bool + PrevBatchMessageCount uint64 + PrevDelayedRead uint64 + BatchCount uint64 + DelayedCount uint64 +} + +var DefaultSnapSyncConfig = SnapSyncConfig{ + Enabled: false, + PrevBatchMessageCount: 0, + BatchCount: 0, + DelayedCount: 0, + PrevDelayedRead: 0, +} + type ConfigFetcher interface { Get() *Config Start(context.Context) @@ -417,7 +436,7 @@ func createNodeImpl( } transactionStreamerConfigFetcher := func() *TransactionStreamerConfig { return &configFetcher.Get().TransactionStreamer } - txStreamer, err := NewTransactionStreamer(arbDb, l2Config, exec, broadcastServer, fatalErrChan, transactionStreamerConfigFetcher) + txStreamer, err := NewTransactionStreamer(arbDb, l2Config, exec, broadcastServer, fatalErrChan, transactionStreamerConfigFetcher, &configFetcher.Get().SnapSyncTest) if err != nil { return nil, err } @@ -511,14 +530,15 @@ func createNodeImpl( var daWriter das.DataAvailabilityServiceWriter var daReader das.DataAvailabilityServiceReader var dasLifecycleManager *das.LifecycleManager + var dasKeysetFetcher *das.KeysetFetcher if config.DataAvailability.Enable { if config.BatchPoster.Enable { - daWriter, daReader, dasLifecycleManager, err = das.CreateBatchPosterDAS(ctx, &config.DataAvailability, dataSigner, l1client, deployInfo.SequencerInbox) + daWriter, daReader, dasKeysetFetcher, dasLifecycleManager, err = das.CreateBatchPosterDAS(ctx, &config.DataAvailability, dataSigner, l1client, deployInfo.SequencerInbox) if err != nil { return nil, err } } else { - daReader, dasLifecycleManager, err = das.CreateDAReaderForNode(ctx, &config.DataAvailability, l1Reader, &deployInfo.SequencerInbox) + daReader, dasKeysetFetcher, dasLifecycleManager, err = das.CreateDAReaderForNode(ctx, &config.DataAvailability, l1Reader, &deployInfo.SequencerInbox) if err != nil { return nil, err } @@ -542,12 +562,12 @@ func createNodeImpl( } var dapReaders []daprovider.Reader if daReader != nil { - dapReaders = append(dapReaders, daprovider.NewReaderForDAS(daReader)) + dapReaders = append(dapReaders, daprovider.NewReaderForDAS(daReader, dasKeysetFetcher)) } if blobReader != nil { dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(blobReader)) } - inboxTracker, err := NewInboxTracker(arbDb, txStreamer, dapReaders) + inboxTracker, err := NewInboxTracker(arbDb, txStreamer, dapReaders, config.SnapSyncTest) if err != nil { return nil, err } @@ -605,6 +625,7 @@ func createNodeImpl( var stakerObj *staker.Staker var messagePruner *MessagePruner + var stakerAddr common.Address if config.Staker.Enable { dp, err := StakerDataposter( @@ -662,17 +683,14 @@ func createNodeImpl( if err := wallet.Initialize(ctx); err != nil { return nil, err } - var validatorAddr string - if txOptsValidator != nil { - validatorAddr = txOptsValidator.From.String() - } else { - validatorAddr = config.Staker.DataPoster.ExternalSigner.Address + if dp != nil { + stakerAddr = dp.Sender() } whitelisted, err := stakerObj.IsWhitelisted(ctx) if err != nil { return nil, err } - log.Info("running as validator", "txSender", validatorAddr, "actingAsWallet", wallet.Address(), "whitelisted", whitelisted, "strategy", config.Staker.Strategy) + log.Info("running as validator", "txSender", stakerAddr, "actingAsWallet", wallet.Address(), "whitelisted", whitelisted, "strategy", config.Staker.Strategy) } var batchPoster *BatchPoster @@ -701,6 +719,11 @@ func createNodeImpl( if err != nil { return nil, err } + + // Check if staker and batch poster are using the same address + if stakerAddr != (common.Address{}) && !strings.EqualFold(config.Staker.Strategy, "watchtower") && stakerAddr == batchPoster.dataPoster.Sender() { + return nil, fmt.Errorf("staker and batch poster are using the same address which is not allowed: %v", stakerAddr) + } } // always create DelayedSequencer, it won't do anything if it is disabled @@ -786,17 +809,6 @@ func CreateNode( return currentNode, nil } -func (n *Node) CacheL1PriceDataOfMsg(pos arbutil.MessageIndex, callDataUnits uint64, l1GasCharged uint64) { - n.TxStreamer.CacheL1PriceDataOfMsg(pos, callDataUnits, l1GasCharged) -} - -func (n *Node) BacklogL1GasCharged() uint64 { - return n.TxStreamer.BacklogL1GasCharged() -} -func (n *Node) BacklogCallDataUnits() uint64 { - return n.TxStreamer.BacklogCallDataUnits() -} - func (n *Node) Start(ctx context.Context) error { execClient, ok := n.Execution.(*gethexec.ExecutionNode) if !ok { diff --git a/arbnode/schema.go b/arbnode/schema.go index ddc7cf54fd..2854b7e785 100644 --- a/arbnode/schema.go +++ b/arbnode/schema.go @@ -5,6 +5,7 @@ package arbnode var ( messagePrefix []byte = []byte("m") // maps a message sequence number to a message + blockHashInputFeedPrefix []byte = []byte("b") // maps a message sequence number to a block hash received through the input feed legacyDelayedMessagePrefix []byte = []byte("d") // maps a delayed sequence number to an accumulator and a message as serialized on L1 rlpDelayedMessagePrefix []byte = []byte("e") // maps a delayed sequence number to an accumulator and an RLP encoded message parentChainBlockNumberPrefix []byte = []byte("p") // maps a delayed sequence number to a parent chain block number diff --git a/arbnode/seq_coordinator.go b/arbnode/seq_coordinator.go index ecf38ddf42..cdf1011b11 100644 --- a/arbnode/seq_coordinator.go +++ b/arbnode/seq_coordinator.go @@ -39,7 +39,6 @@ type SeqCoordinator struct { redisutil.RedisCoordinator - sync *SyncMonitor streamer *TransactionStreamer sequencer execution.ExecutionSequencer delayedSequencer *DelayedSequencer @@ -150,7 +149,6 @@ func NewSeqCoordinator( } coordinator := &SeqCoordinator{ RedisCoordinator: *redisCoordinator, - sync: sync, streamer: streamer, sequencer: sequencer, config: config, @@ -607,9 +605,10 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { return c.noRedisError() } - syncProgress := c.sync.SyncProgressMap() - synced := len(syncProgress) == 0 + // Sequencer should want lockout if and only if- its synced, not avoiding lockout and execution processed every message that consensus had 1 second ago + synced := c.sequencer.Synced() if !synced { + syncProgress := c.sequencer.FullSyncProgressMap() var detailsList []interface{} for key, value := range syncProgress { detailsList = append(detailsList, key, value) @@ -849,7 +848,7 @@ func (c *SeqCoordinator) SeekLockout(ctx context.Context) { defer c.wantsLockoutMutex.Unlock() c.avoidLockout-- log.Info("seeking lockout", "myUrl", c.config.Url()) - if c.sync.Synced() { + if c.sequencer.Synced() { // Even if this errors we still internally marked ourselves as wanting the lockout err := c.wantsLockoutUpdateWithMutex(ctx) if err != nil { diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index edf5810a72..317231b6b9 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -51,9 +51,10 @@ type TransactionStreamer struct { execLastMsgCount arbutil.MessageIndex validator *staker.BlockValidator - db ethdb.Database - fatalErrChan chan<- error - config TransactionStreamerConfigFetcher + db ethdb.Database + fatalErrChan chan<- error + config TransactionStreamerConfigFetcher + snapSyncConfig *SnapSyncConfig insertionMutex sync.Mutex // cannot be acquired while reorgMutex is held reorgMutex sync.RWMutex @@ -61,7 +62,7 @@ type TransactionStreamer struct { nextAllowedFeedReorgLog time.Time - broadcasterQueuedMessages []arbostypes.MessageWithMetadata + broadcasterQueuedMessages []arbostypes.MessageWithMetadataAndBlockHash broadcasterQueuedMessagesPos uint64 broadcasterQueuedMessagesActiveReorg bool @@ -69,9 +70,6 @@ type TransactionStreamer struct { broadcastServer *broadcaster.Broadcaster inboxReader *InboxReader delayedBridge *DelayedBridge - - cachedL1PriceDataMutex sync.RWMutex - cachedL1PriceData *L1PriceData } type TransactionStreamerConfig struct { @@ -107,6 +105,7 @@ func NewTransactionStreamer( broadcastServer *broadcaster.Broadcaster, fatalErrChan chan<- error, config TransactionStreamerConfigFetcher, + snapSyncConfig *SnapSyncConfig, ) (*TransactionStreamer, error) { streamer := &TransactionStreamer{ exec: exec, @@ -116,9 +115,7 @@ func NewTransactionStreamer( broadcastServer: broadcastServer, fatalErrChan: fatalErrChan, config: config, - cachedL1PriceData: &L1PriceData{ - msgToL1PriceData: []L1PriceDataOfMsg{}, - }, + snapSyncConfig: snapSyncConfig, } err := streamer.cleanupInconsistentState() if err != nil { @@ -127,119 +124,15 @@ func NewTransactionStreamer( return streamer, nil } -type L1PriceDataOfMsg struct { - callDataUnits uint64 - cummulativeCallDataUnits uint64 - l1GasCharged uint64 - cummulativeL1GasCharged uint64 -} - -type L1PriceData struct { - startOfL1PriceDataCache arbutil.MessageIndex - endOfL1PriceDataCache arbutil.MessageIndex - msgToL1PriceData []L1PriceDataOfMsg - currentEstimateOfL1GasPrice uint64 -} - -func (s *TransactionStreamer) CurrentEstimateOfL1GasPrice() uint64 { - s.cachedL1PriceDataMutex.Lock() - defer s.cachedL1PriceDataMutex.Unlock() - - currentEstimate, err := s.exec.GetL1GasPriceEstimate() - if err != nil { - log.Error("error fetching current L2 estimate of L1 gas price hence reusing cached estimate", "err", err) - } else { - s.cachedL1PriceData.currentEstimateOfL1GasPrice = currentEstimate - } - return s.cachedL1PriceData.currentEstimateOfL1GasPrice -} - -func (s *TransactionStreamer) BacklogCallDataUnits() uint64 { - s.cachedL1PriceDataMutex.RLock() - defer s.cachedL1PriceDataMutex.RUnlock() - - size := len(s.cachedL1PriceData.msgToL1PriceData) - if size == 0 { - return 0 - } - return (s.cachedL1PriceData.msgToL1PriceData[size-1].cummulativeCallDataUnits - - s.cachedL1PriceData.msgToL1PriceData[0].cummulativeCallDataUnits + - s.cachedL1PriceData.msgToL1PriceData[0].callDataUnits) -} - -func (s *TransactionStreamer) BacklogL1GasCharged() uint64 { - s.cachedL1PriceDataMutex.RLock() - defer s.cachedL1PriceDataMutex.RUnlock() - - size := len(s.cachedL1PriceData.msgToL1PriceData) - if size == 0 { - return 0 - } - return (s.cachedL1PriceData.msgToL1PriceData[size-1].cummulativeL1GasCharged - - s.cachedL1PriceData.msgToL1PriceData[0].cummulativeL1GasCharged + - s.cachedL1PriceData.msgToL1PriceData[0].l1GasCharged) -} - -func (s *TransactionStreamer) TrimCache(to arbutil.MessageIndex) { - s.cachedL1PriceDataMutex.Lock() - defer s.cachedL1PriceDataMutex.Unlock() - - if to < s.cachedL1PriceData.startOfL1PriceDataCache { - log.Info("trying to trim older cache which doesnt exist anymore") - } else if to >= s.cachedL1PriceData.endOfL1PriceDataCache { - s.cachedL1PriceData.startOfL1PriceDataCache = 0 - s.cachedL1PriceData.endOfL1PriceDataCache = 0 - s.cachedL1PriceData.msgToL1PriceData = []L1PriceDataOfMsg{} - } else { - newStart := to - s.cachedL1PriceData.startOfL1PriceDataCache + 1 - s.cachedL1PriceData.msgToL1PriceData = s.cachedL1PriceData.msgToL1PriceData[newStart:] - s.cachedL1PriceData.startOfL1PriceDataCache = to + 1 - } +// Represents a block's hash in the database. +// Necessary because RLP decoder doesn't produce nil values by default. +type blockHashDBValue struct { + BlockHash *common.Hash `rlp:"nil"` } -func (s *TransactionStreamer) CacheL1PriceDataOfMsg(seqNum arbutil.MessageIndex, callDataUnits uint64, l1GasCharged uint64) { - s.cachedL1PriceDataMutex.Lock() - defer s.cachedL1PriceDataMutex.Unlock() - - resetCache := func() { - s.cachedL1PriceData.startOfL1PriceDataCache = seqNum - s.cachedL1PriceData.endOfL1PriceDataCache = seqNum - s.cachedL1PriceData.msgToL1PriceData = []L1PriceDataOfMsg{{ - callDataUnits: callDataUnits, - cummulativeCallDataUnits: callDataUnits, - l1GasCharged: l1GasCharged, - cummulativeL1GasCharged: l1GasCharged, - }} - } - size := len(s.cachedL1PriceData.msgToL1PriceData) - if size == 0 || - s.cachedL1PriceData.startOfL1PriceDataCache == 0 || - s.cachedL1PriceData.endOfL1PriceDataCache == 0 || - arbutil.MessageIndex(size) != s.cachedL1PriceData.endOfL1PriceDataCache-s.cachedL1PriceData.startOfL1PriceDataCache+1 { - resetCache() - return - } - if seqNum != s.cachedL1PriceData.endOfL1PriceDataCache+1 { - if seqNum > s.cachedL1PriceData.endOfL1PriceDataCache+1 { - log.Info("message position higher then current end of l1 price data cache, resetting cache to this message") - resetCache() - } else if seqNum < s.cachedL1PriceData.startOfL1PriceDataCache { - log.Info("message position lower than start of l1 price data cache, ignoring") - } else { - log.Info("message position already seen in l1 price data cache, ignoring") - } - } else { - cummulativeCallDataUnits := s.cachedL1PriceData.msgToL1PriceData[size-1].cummulativeCallDataUnits - cummulativeL1GasCharged := s.cachedL1PriceData.msgToL1PriceData[size-1].cummulativeL1GasCharged - s.cachedL1PriceData.msgToL1PriceData = append(s.cachedL1PriceData.msgToL1PriceData, L1PriceDataOfMsg{ - callDataUnits: callDataUnits, - cummulativeCallDataUnits: cummulativeCallDataUnits + callDataUnits, - l1GasCharged: l1GasCharged, - cummulativeL1GasCharged: cummulativeL1GasCharged + l1GasCharged, - }) - s.cachedL1PriceData.endOfL1PriceDataCache = seqNum - } -} +const ( + BlockHashMismatchLogMsg = "BlockHash from feed doesn't match locally computed hash. Check feed source." +) // Encodes a uint64 as bytes in a lexically sortable manner for database iteration. // Generally this is only used for database keys, which need sorted. @@ -372,7 +265,7 @@ func deleteFromRange(ctx context.Context, db ethdb.Database, prefix []byte, star // The insertion mutex must be held. This acquires the reorg mutex. // Note: oldMessages will be empty if reorgHook is nil -func (s *TransactionStreamer) reorg(batch ethdb.Batch, count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata) error { +func (s *TransactionStreamer) reorg(batch ethdb.Batch, count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadataAndBlockHash) error { if count == 0 { return errors.New("cannot reorg out init message") } @@ -466,14 +359,14 @@ func (s *TransactionStreamer) reorg(batch ethdb.Batch, count arbutil.MessageInde return err } - messagesWithBlockHash := make([]broadcaster.MessageWithMetadataAndBlockHash, 0, len(messagesResults)) + messagesWithComputedBlockHash := make([]arbostypes.MessageWithMetadataAndBlockHash, 0, len(messagesResults)) for i := 0; i < len(messagesResults); i++ { - messagesWithBlockHash = append(messagesWithBlockHash, broadcaster.MessageWithMetadataAndBlockHash{ - Message: newMessages[i], - BlockHash: &messagesResults[i].BlockHash, + messagesWithComputedBlockHash = append(messagesWithComputedBlockHash, arbostypes.MessageWithMetadataAndBlockHash{ + MessageWithMeta: newMessages[i].MessageWithMeta, + BlockHash: &messagesResults[i].BlockHash, }) } - s.broadcastMessages(messagesWithBlockHash, count) + s.broadcastMessages(messagesWithComputedBlockHash, count) if s.validator != nil { err = s.validator.Reorg(s.GetContext(), count) @@ -482,6 +375,10 @@ func (s *TransactionStreamer) reorg(batch ethdb.Batch, count arbutil.MessageInde } } + err = deleteStartingAt(s.db, batch, blockHashInputFeedPrefix, uint64ToKey(uint64(count))) + if err != nil { + return err + } err = deleteStartingAt(s.db, batch, messagePrefix, uint64ToKey(uint64(count))) if err != nil { return err @@ -511,6 +408,10 @@ func dbKey(prefix []byte, pos uint64) []byte { return key } +func isErrNotFound(err error) bool { + return errors.Is(err, leveldb.ErrNotFound) || errors.Is(err, pebble.ErrNotFound) +} + // Note: if changed to acquire the mutex, some internal users may need to be updated to a non-locking version. func (s *TransactionStreamer) GetMessage(seqNum arbutil.MessageIndex) (*arbostypes.MessageWithMetadata, error) { key := dbKey(messagePrefix, uint64(seqNum)) @@ -527,6 +428,36 @@ func (s *TransactionStreamer) GetMessage(seqNum arbutil.MessageIndex) (*arbostyp return &message, nil } +func (s *TransactionStreamer) getMessageWithMetadataAndBlockHash(seqNum arbutil.MessageIndex) (*arbostypes.MessageWithMetadataAndBlockHash, error) { + msg, err := s.GetMessage(seqNum) + if err != nil { + return nil, err + } + + // Get block hash. + // To keep it backwards compatible, since it is possible that a message related + // to a sequence number exists in the database, but the block hash doesn't. + key := dbKey(blockHashInputFeedPrefix, uint64(seqNum)) + var blockHash *common.Hash + data, err := s.db.Get(key) + if err == nil { + var blockHashDBVal blockHashDBValue + err = rlp.DecodeBytes(data, &blockHashDBVal) + if err != nil { + return nil, err + } + blockHash = blockHashDBVal.BlockHash + } else if !isErrNotFound(err) { + return nil, err + } + + msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ + MessageWithMeta: *msg, + BlockHash: blockHash, + } + return &msgWithBlockHash, nil +} + // Note: if changed to acquire the mutex, some internal users may need to be updated to a non-locking version. func (s *TransactionStreamer) GetMessageCount() (arbutil.MessageIndex, error) { posBytes, err := s.db.Get(messageCountKey) @@ -580,7 +511,7 @@ func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*m.BroadcastFe return nil } broadcastStartPos := feedMessages[0].SequenceNumber - var messages []arbostypes.MessageWithMetadata + var messages []arbostypes.MessageWithMetadataAndBlockHash broadcastAfterPos := broadcastStartPos for _, feedMessage := range feedMessages { if broadcastAfterPos != feedMessage.SequenceNumber { @@ -589,7 +520,11 @@ func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*m.BroadcastFe if feedMessage.Message.Message == nil || feedMessage.Message.Message.Header == nil { return fmt.Errorf("invalid feed message at sequence number %v", feedMessage.SequenceNumber) } - messages = append(messages, feedMessage.Message) + msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ + MessageWithMeta: feedMessage.Message, + BlockHash: feedMessage.BlockHash, + } + messages = append(messages, msgWithBlockHash) broadcastAfterPos++ } @@ -608,7 +543,7 @@ func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*m.BroadcastFe messages = messages[dups:] broadcastStartPos += arbutil.MessageIndex(dups) if oldMsg != nil { - s.logReorg(broadcastStartPos, oldMsg, &messages[0], false) + s.logReorg(broadcastStartPos, oldMsg, &messages[0].MessageWithMeta, false) } if len(messages) == 0 { // No new messages received @@ -658,7 +593,7 @@ func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*m.BroadcastFe if broadcastStartPos > 0 { _, err := s.GetMessage(broadcastStartPos - 1) if err != nil { - if !errors.Is(err, leveldb.ErrNotFound) && !errors.Is(err, pebble.ErrNotFound) { + if !isErrNotFound(err) { return err } // Message before current message doesn't exist in database, so don't add current messages yet @@ -710,11 +645,18 @@ func endBatch(batch ethdb.Batch) error { } func (s *TransactionStreamer) AddMessagesAndEndBatch(pos arbutil.MessageIndex, messagesAreConfirmed bool, messages []arbostypes.MessageWithMetadata, batch ethdb.Batch) error { + messagesWithBlockHash := make([]arbostypes.MessageWithMetadataAndBlockHash, 0, len(messages)) + for _, message := range messages { + messagesWithBlockHash = append(messagesWithBlockHash, arbostypes.MessageWithMetadataAndBlockHash{ + MessageWithMeta: message, + }) + } + if messagesAreConfirmed { // Trim confirmed messages from l1pricedataCache - s.TrimCache(pos + arbutil.MessageIndex(len(messages))) + s.exec.MarkFeedStart(pos + arbutil.MessageIndex(len(messages))) s.reorgMutex.RLock() - dups, _, _, err := s.countDuplicateMessages(pos, messages, &batch) + dups, _, _, err := s.countDuplicateMessages(pos, messagesWithBlockHash, &batch) s.reorgMutex.RUnlock() if err != nil { return err @@ -731,10 +673,13 @@ func (s *TransactionStreamer) AddMessagesAndEndBatch(pos arbutil.MessageIndex, m s.insertionMutex.Lock() defer s.insertionMutex.Unlock() - return s.addMessagesAndEndBatchImpl(pos, messagesAreConfirmed, messages, batch) + return s.addMessagesAndEndBatchImpl(pos, messagesAreConfirmed, messagesWithBlockHash, batch) } func (s *TransactionStreamer) getPrevPrevDelayedRead(pos arbutil.MessageIndex) (uint64, error) { + if s.snapSyncConfig.Enabled && uint64(pos) == s.snapSyncConfig.PrevBatchMessageCount { + return s.snapSyncConfig.PrevDelayedRead, nil + } var prevDelayedRead uint64 if pos > 0 { prevMsg, err := s.GetMessage(pos - 1) @@ -749,7 +694,7 @@ func (s *TransactionStreamer) getPrevPrevDelayedRead(pos arbutil.MessageIndex) ( func (s *TransactionStreamer) countDuplicateMessages( pos arbutil.MessageIndex, - messages []arbostypes.MessageWithMetadata, + messages []arbostypes.MessageWithMetadataAndBlockHash, batch *ethdb.Batch, ) (int, bool, *arbostypes.MessageWithMetadata, error) { curMsg := 0 @@ -770,7 +715,7 @@ func (s *TransactionStreamer) countDuplicateMessages( return 0, false, nil, err } nextMessage := messages[curMsg] - wantMessage, err := rlp.EncodeToBytes(nextMessage) + wantMessage, err := rlp.EncodeToBytes(nextMessage.MessageWithMeta) if err != nil { return 0, false, nil, err } @@ -786,12 +731,12 @@ func (s *TransactionStreamer) countDuplicateMessages( return curMsg, true, nil, nil } var duplicateMessage bool - if nextMessage.Message != nil { - if dbMessageParsed.Message.BatchGasCost == nil || nextMessage.Message.BatchGasCost == nil { + if nextMessage.MessageWithMeta.Message != nil { + if dbMessageParsed.Message.BatchGasCost == nil || nextMessage.MessageWithMeta.Message.BatchGasCost == nil { // Remove both of the batch gas costs and see if the messages still differ - nextMessageCopy := nextMessage + nextMessageCopy := nextMessage.MessageWithMeta nextMessageCopy.Message = new(arbostypes.L1IncomingMessage) - *nextMessageCopy.Message = *nextMessage.Message + *nextMessageCopy.Message = *nextMessage.MessageWithMeta.Message batchGasCostBkup := dbMessageParsed.Message.BatchGasCost dbMessageParsed.Message.BatchGasCost = nil nextMessageCopy.Message.BatchGasCost = nil @@ -799,7 +744,7 @@ func (s *TransactionStreamer) countDuplicateMessages( // Actually this isn't a reorg; only the batch gas costs differed duplicateMessage = true // If possible - update the message in the database to add the gas cost cache. - if batch != nil && nextMessage.Message.BatchGasCost != nil { + if batch != nil && nextMessage.MessageWithMeta.Message.BatchGasCost != nil { if *batch == nil { *batch = s.db.NewBatch() } @@ -810,13 +755,14 @@ func (s *TransactionStreamer) countDuplicateMessages( } dbMessageParsed.Message.BatchGasCost = batchGasCostBkup } + msg := nextMessage.MessageWithMeta.Message if arbos.IsEspressoMsg(dbMessageParsed.Message) && - arbos.IsEspressoMsg(nextMessage.Message) && - !bytes.Equal(nextMessage.Message.L2msg, dbMessageParsed.Message.L2msg) { + arbos.IsEspressoMsg(msg) && + !bytes.Equal(msg.L2msg, dbMessageParsed.Message.L2msg) { // Check to see if the difference is the existence of block merkle proof in the new message, // The batcher can append this on the fly (see AddEspressoBlockMerkleProof). // If this is the case, update the database with the block merkle justification. - _, newJst, err := arbos.ParseEspressoMsg(nextMessage.Message) + _, newJst, err := arbos.ParseEspressoMsg(msg) if err != nil { return 0, false, nil, err @@ -872,7 +818,7 @@ func (s *TransactionStreamer) logReorg(pos arbutil.MessageIndex, dbMsg *arbostyp } -func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil.MessageIndex, messagesAreConfirmed bool, messages []arbostypes.MessageWithMetadata, batch ethdb.Batch) error { +func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil.MessageIndex, messagesAreConfirmed bool, messages []arbostypes.MessageWithMetadataAndBlockHash, batch ethdb.Batch) error { var confirmedReorg bool var oldMsg *arbostypes.MessageWithMetadata var lastDelayedRead uint64 @@ -890,7 +836,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil return err } if duplicates > 0 { - lastDelayedRead = messages[duplicates-1].DelayedMessagesRead + lastDelayedRead = messages[duplicates-1].MessageWithMeta.DelayedMessagesRead messages = messages[duplicates:] messageStartPos += arbutil.MessageIndex(duplicates) } @@ -928,13 +874,13 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil return err } if duplicates > 0 { - lastDelayedRead = messages[duplicates-1].DelayedMessagesRead + lastDelayedRead = messages[duplicates-1].MessageWithMeta.DelayedMessagesRead messages = messages[duplicates:] messageStartPos += arbutil.MessageIndex(duplicates) } } if oldMsg != nil { - s.logReorg(messageStartPos, oldMsg, &messages[0], confirmedReorg) + s.logReorg(messageStartPos, oldMsg, &messages[0].MessageWithMeta, confirmedReorg) } if feedReorg { @@ -954,12 +900,12 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil // Validate delayed message counts of remaining messages for i, msg := range messages { msgPos := messageStartPos + arbutil.MessageIndex(i) - diff := msg.DelayedMessagesRead - lastDelayedRead + diff := msg.MessageWithMeta.DelayedMessagesRead - lastDelayedRead if diff != 0 && diff != 1 { - return fmt.Errorf("attempted to insert jump from %v delayed messages read to %v delayed messages read at message index %v", lastDelayedRead, msg.DelayedMessagesRead, msgPos) + return fmt.Errorf("attempted to insert jump from %v delayed messages read to %v delayed messages read at message index %v", lastDelayedRead, msg.MessageWithMeta.DelayedMessagesRead, msgPos) } - lastDelayedRead = msg.DelayedMessagesRead - if msg.Message == nil { + lastDelayedRead = msg.MessageWithMeta.DelayedMessagesRead + if msg.MessageWithMeta.Message == nil { return fmt.Errorf("attempted to insert nil message at position %v", msgPos) } } @@ -1037,15 +983,15 @@ func (s *TransactionStreamer) WriteMessageFromSequencer( } } - if err := s.writeMessages(pos, []arbostypes.MessageWithMetadata{msgWithMeta}, nil); err != nil { - return err + msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ + MessageWithMeta: msgWithMeta, + BlockHash: &msgResult.BlockHash, } - msgWithBlockHash := broadcaster.MessageWithMetadataAndBlockHash{ - Message: msgWithMeta, - BlockHash: &msgResult.BlockHash, + if err := s.writeMessages(pos, []arbostypes.MessageWithMetadataAndBlockHash{msgWithBlockHash}, nil); err != nil { + return err } - s.broadcastMessages([]broadcaster.MessageWithMetadataAndBlockHash{msgWithBlockHash}, pos) + s.broadcastMessages([]arbostypes.MessageWithMetadataAndBlockHash{msgWithBlockHash}, pos) return nil } @@ -1066,9 +1012,23 @@ func (s *TransactionStreamer) PopulateFeedBacklog() error { return s.inboxReader.tracker.PopulateFeedBacklog(s.broadcastServer) } -func (s *TransactionStreamer) writeMessage(pos arbutil.MessageIndex, msg arbostypes.MessageWithMetadata, batch ethdb.Batch) error { +func (s *TransactionStreamer) writeMessage(pos arbutil.MessageIndex, msg arbostypes.MessageWithMetadataAndBlockHash, batch ethdb.Batch) error { + // write message with metadata key := dbKey(messagePrefix, uint64(pos)) - msgBytes, err := rlp.EncodeToBytes(msg) + msgBytes, err := rlp.EncodeToBytes(msg.MessageWithMeta) + if err != nil { + return err + } + if err := batch.Put(key, msgBytes); err != nil { + return err + } + + // write block hash + blockHashDBVal := blockHashDBValue{ + BlockHash: msg.BlockHash, + } + key = dbKey(blockHashInputFeedPrefix, uint64(pos)) + msgBytes, err = rlp.EncodeToBytes(blockHashDBVal) if err != nil { return err } @@ -1076,7 +1036,7 @@ func (s *TransactionStreamer) writeMessage(pos arbutil.MessageIndex, msg arbosty } func (s *TransactionStreamer) broadcastMessages( - msgs []broadcaster.MessageWithMetadataAndBlockHash, + msgs []arbostypes.MessageWithMetadataAndBlockHash, pos arbutil.MessageIndex, ) { if s.broadcastServer == nil { @@ -1089,7 +1049,7 @@ func (s *TransactionStreamer) broadcastMessages( // The mutex must be held, and pos must be the latest message count. // `batch` may be nil, which initializes a new batch. The batch is closed out in this function. -func (s *TransactionStreamer) writeMessages(pos arbutil.MessageIndex, messages []arbostypes.MessageWithMetadata, batch ethdb.Batch) error { +func (s *TransactionStreamer) writeMessages(pos arbutil.MessageIndex, messages []arbostypes.MessageWithMetadataAndBlockHash, batch ethdb.Batch) error { if batch == nil { batch = s.db.NewBatch() } @@ -1125,6 +1085,20 @@ func (s *TransactionStreamer) ResultAtCount(count arbutil.MessageIndex) (*execut return s.exec.ResultAtPos(count - 1) } +func (s *TransactionStreamer) checkResult(msgResult *execution.MessageResult, expectedBlockHash *common.Hash) { + if expectedBlockHash == nil { + return + } + if msgResult.BlockHash != *expectedBlockHash { + log.Error( + BlockHashMismatchLogMsg, + "expected", expectedBlockHash, + "actual", msgResult.BlockHash, + ) + return + } +} + // exposed for testing // return value: true if should be called again immediately func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context, exec execution.ExecutionSequencer) bool { @@ -1151,7 +1125,7 @@ func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context, exec execution if pos >= msgCount { return false } - msg, err := s.GetMessage(pos) + msgAndBlockHash, err := s.getMessageWithMetadataAndBlockHash(pos) if err != nil { log.Error("feedOneMsg failed to readMessage", "err", err, "pos", pos) return false @@ -1165,7 +1139,7 @@ func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context, exec execution } msgForPrefetch = msg } - msgResult, err := s.exec.DigestMessage(pos, msg, msgForPrefetch) + msgResult, err := s.exec.DigestMessage(pos, &msgAndBlockHash.MessageWithMeta, msgForPrefetch) if err != nil { logger := log.Warn if prevMessageCount < msgCount { @@ -1175,11 +1149,13 @@ func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context, exec execution return false } - msgWithBlockHash := broadcaster.MessageWithMetadataAndBlockHash{ - Message: *msg, - BlockHash: &msgResult.BlockHash, + s.checkResult(msgResult, msgAndBlockHash.BlockHash) + + msgWithBlockHash := arbostypes.MessageWithMetadataAndBlockHash{ + MessageWithMeta: msgAndBlockHash.MessageWithMeta, + BlockHash: &msgResult.BlockHash, } - s.broadcastMessages([]broadcaster.MessageWithMetadataAndBlockHash{msgWithBlockHash}, pos) + s.broadcastMessages([]arbostypes.MessageWithMetadataAndBlockHash{msgWithBlockHash}, pos) return pos+1 < msgCount } diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index 0f3c019f74..9ff3dd3aa5 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -74,8 +74,8 @@ func OpenArbosState(stateDB vm.StateDB, burner burn.Burner) (*ArbosState, error) } return &ArbosState{ arbosVersion, - 20, - 30, + 31, + 31, backingStorage.OpenStorageBackedUint64(uint64(upgradeVersionOffset)), backingStorage.OpenStorageBackedUint64(uint64(upgradeTimestampOffset)), backingStorage.OpenStorageBackedAddress(uint64(networkFeeAccountOffset)), @@ -316,16 +316,14 @@ func (state *ArbosState) UpgradeArbosVersion( // these versions are left to Orbit chains for custom upgrades. case 30: - if !chainConfig.DebugMode() { - // This upgrade isn't finalized so we only want to support it for testing - return fmt.Errorf( - "the chain is upgrading to unsupported ArbOS version %v, %w", - nextArbosVersion, - ErrFatalNodeOutOfDate, - ) - } programs.Initialize(state.backingStorage.OpenSubStorage(programsSubspace)) + case 31: + params, err := state.Programs().Params() + ensure(err) + ensure(params.UpgradeToVersion(2)) + ensure(params.Save()) + default: return fmt.Errorf( "the chain is upgrading to unsupported ArbOS version %v, %w", diff --git a/arbos/arbostypes/messagewithmeta.go b/arbos/arbostypes/messagewithmeta.go index a3d4f5e3c3..79b7c4f9d2 100644 --- a/arbos/arbostypes/messagewithmeta.go +++ b/arbos/arbostypes/messagewithmeta.go @@ -18,6 +18,11 @@ type MessageWithMetadata struct { DelayedMessagesRead uint64 `json:"delayedMessagesRead"` } +type MessageWithMetadataAndBlockHash struct { + MessageWithMeta MessageWithMetadata + BlockHash *common.Hash +} + var EmptyTestMessageWithMetadata = MessageWithMetadata{ Message: &EmptyTestIncomingMessage, } diff --git a/arbos/programs/api.go b/arbos/programs/api.go index c8241a72b5..787f127ea4 100644 --- a/arbos/programs/api.go +++ b/arbos/programs/api.go @@ -266,6 +266,10 @@ func newApiClosures( } captureHostio := func(name string, args, outs []byte, startInk, endInk uint64) { tracingInfo.Tracer.CaptureStylusHostio(name, args, outs, startInk, endInk) + if name == "evm_gas_left" || name == "evm_ink_left" { + tracingInfo.Tracer.CaptureState(0, vm.GAS, 0, 0, scope, []byte{}, depth, nil) + tracingInfo.Tracer.CaptureState(0, vm.POP, 0, 0, scope, []byte{}, depth, nil) + } } return func(req RequestType, input []byte) ([]byte, []byte, uint64) { diff --git a/arbos/programs/data_pricer.go b/arbos/programs/data_pricer.go index b0184d7dc7..ed7c98556d 100644 --- a/arbos/programs/data_pricer.go +++ b/arbos/programs/data_pricer.go @@ -27,12 +27,14 @@ const ( inertiaOffset ) +const ArbitrumStartTime = 1421388000 // the day it all began + const initialDemand = 0 // no demand const InitialHourlyBytes = 1 * (1 << 40) / (365 * 24) // 1Tb total footprint const initialBytesPerSecond = InitialHourlyBytes / (60 * 60) // refill each second -const initialLastUpdateTime = 1421388000 // the day it all began -const initialMinPrice = 82928201 // 5Mb = $1 -const initialInertia = 21360419 // expensive at 1Tb +const initialLastUpdateTime = ArbitrumStartTime +const initialMinPrice = 82928201 // 5Mb = $1 +const initialInertia = 21360419 // expensive at 1Tb func initDataPricer(sto *storage.Storage) { demand := sto.OpenStorageBackedUint32(demandOffset) diff --git a/arbos/programs/native.go b/arbos/programs/native.go index 09989f3380..f8e2696aad 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" @@ -53,6 +54,24 @@ func activateProgram( debug bool, burner burn.Burner, ) (*activationInfo, error) { + info, asm, module, err := activateProgramInternal(db, program, codehash, wasm, page_limit, version, debug, burner.GasLeft()) + if err != nil { + return nil, err + } + db.ActivateWasm(info.moduleHash, asm, module) + return info, nil +} + +func activateProgramInternal( + db vm.StateDB, + addressForLogging common.Address, + codehash common.Hash, + wasm []byte, + page_limit uint16, + version uint16, + debug bool, + gasLeft *uint64, +) (*activationInfo, []byte, []byte, error) { output := &rustBytes{} asmLen := usize(0) moduleHash := &bytes32{} @@ -69,18 +88,18 @@ func activateProgram( &codeHash, moduleHash, stylusData, - (*u64)(burner.GasLeft()), + (*u64)(gasLeft), )) data, msg, err := status.toResult(output.intoBytes(), debug) if err != nil { if debug { - log.Warn("activation failed", "err", err, "msg", msg, "program", program) + log.Warn("activation failed", "err", err, "msg", msg, "program", addressForLogging) } if errors.Is(err, vm.ErrExecutionReverted) { - return nil, fmt.Errorf("%w: %s", ErrProgramActivation, msg) + return nil, nil, nil, fmt.Errorf("%w: %s", ErrProgramActivation, msg) } - return nil, err + return nil, nil, nil, err } hash := moduleHash.toHash() @@ -95,26 +114,69 @@ func activateProgram( asmEstimate: uint32(stylusData.asm_estimate), footprint: uint16(stylusData.footprint), } - db.ActivateWasm(hash, asm, module) - return info, err + return info, asm, module, err +} + +func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, addressForLogging common.Address, code []byte, codeHash common.Hash, pagelimit uint16, time uint64, debugMode bool, program Program) ([]byte, error) { + localAsm, err := statedb.TryGetActivatedAsm(moduleHash) + if err == nil && len(localAsm) > 0 { + return localAsm, nil + } + + // addressForLogging may be empty or may not correspond to the code, so we need to be careful to use the code passed in separately + wasm, err := getWasmFromContractCode(code) + if err != nil { + log.Error("Failed to reactivate program: getWasm", "address", addressForLogging, "expected moduleHash", moduleHash, "err", err) + return nil, fmt.Errorf("failed to reactivate program address: %v err: %w", addressForLogging, err) + } + + unlimitedGas := uint64(0xffffffffffff) + // we know program is activated, so it must be in correct version and not use too much memory + info, asm, module, err := activateProgramInternal(statedb, addressForLogging, codeHash, wasm, pagelimit, program.version, debugMode, &unlimitedGas) + if err != nil { + log.Error("failed to reactivate program", "address", addressForLogging, "expected moduleHash", moduleHash, "err", err) + return nil, fmt.Errorf("failed to reactivate program address: %v err: %w", addressForLogging, err) + } + + if info.moduleHash != moduleHash { + log.Error("failed to reactivate program", "address", addressForLogging, "expected moduleHash", moduleHash, "got", info.moduleHash) + return nil, fmt.Errorf("failed to reactivate program. address: %v, expected ModuleHash: %v", addressForLogging, moduleHash) + } + + currentHoursSince := hoursSinceArbitrum(time) + if currentHoursSince > program.activatedAt { + // stylus program is active on-chain, and was activated in the past + // so we store it directly to database + batch := statedb.Database().WasmStore().NewBatch() + rawdb.WriteActivation(batch, moduleHash, asm, module) + if err := batch.Write(); err != nil { + log.Error("failed writing re-activation to state", "address", addressForLogging, "err", err) + } + } else { + // program activated recently, possibly in this eth_call + // store it to statedb. It will be stored to database if statedb is commited + statedb.ActivateWasm(info.moduleHash, asm, module) + } + return asm, nil } func callProgram( address common.Address, moduleHash common.Hash, + localAsm []byte, scope *vm.ScopeContext, interpreter *vm.EVMInterpreter, tracingInfo *util.TracingInfo, calldata []byte, - evmData *evmData, - stylusParams *goParams, + evmData *EvmData, + stylusParams *ProgParams, memoryModel *MemoryModel, + arbos_tag uint32, ) ([]byte, error) { db := interpreter.Evm().StateDB - asm := db.GetActivatedAsm(moduleHash) - debug := stylusParams.debugMode + debug := stylusParams.DebugMode - if len(asm) == 0 { + if len(localAsm) == 0 { log.Error("missing asm", "program", address, "module", moduleHash) panic("missing asm") } @@ -128,7 +190,7 @@ func callProgram( output := &rustBytes{} status := userStatus(C.stylus_call( - goSlice(asm), + goSlice(localAsm), goSlice(calldata), stylusParams.encode(), evmApi.cNative, @@ -136,6 +198,7 @@ func callProgram( cbool(debug), output, (*u64)(&scope.Contract.Gas), + u32(arbos_tag), )) depth := interpreter.Depth() @@ -159,11 +222,16 @@ func handleReqImpl(apiId usize, req_type u32, data *rustSlice, costPtr *u64, out // Caches a program in Rust. We write a record so that we can undo on revert. // For gas estimation and eth_call, we ignore permanent updates and rely on Rust's LRU. -func cacheProgram(db vm.StateDB, module common.Hash, version uint16, debug bool, runMode core.MessageRunMode) { +func cacheProgram(db vm.StateDB, module common.Hash, program Program, addressForLogging common.Address, code []byte, codeHash common.Hash, params *StylusParams, debug bool, time uint64, runMode core.MessageRunMode) { if runMode == core.MessageCommitMode { - asm := db.GetActivatedAsm(module) - state.CacheWasmRust(asm, module, version, debug) - db.RecordCacheWasm(state.CacheWasm{ModuleHash: module, Version: version, Debug: debug}) + // address is only used for logging + asm, err := getLocalAsm(db, module, addressForLogging, code, codeHash, params.PageLimit, time, debug, program) + if err != nil { + panic("unable to recreate wasm") + } + tag := db.Database().WasmCacheTag() + state.CacheWasmRust(asm, module, program.version, tag, debug) + db.RecordCacheWasm(state.CacheWasm{ModuleHash: module, Version: program.version, Tag: tag, Debug: debug}) } } @@ -171,22 +239,27 @@ func cacheProgram(db vm.StateDB, module common.Hash, version uint16, debug bool, // For gas estimation and eth_call, we ignore permanent updates and rely on Rust's LRU. func evictProgram(db vm.StateDB, module common.Hash, version uint16, debug bool, runMode core.MessageRunMode, forever bool) { if runMode == core.MessageCommitMode { - state.EvictWasmRust(module, version, debug) + tag := db.Database().WasmCacheTag() + state.EvictWasmRust(module, version, tag, debug) if !forever { - db.RecordEvictWasm(state.EvictWasm{ModuleHash: module, Version: version, Debug: debug}) + db.RecordEvictWasm(state.EvictWasm{ModuleHash: module, Version: version, Tag: tag, Debug: debug}) } } } func init() { - state.CacheWasmRust = func(asm []byte, moduleHash common.Hash, version uint16, debug bool) { - C.stylus_cache_module(goSlice(asm), hashToBytes32(moduleHash), u16(version), cbool(debug)) + state.CacheWasmRust = func(asm []byte, moduleHash common.Hash, version uint16, tag uint32, debug bool) { + C.stylus_cache_module(goSlice(asm), hashToBytes32(moduleHash), u16(version), u32(tag), cbool(debug)) } - state.EvictWasmRust = func(moduleHash common.Hash, version uint16, debug bool) { - C.stylus_evict_module(hashToBytes32(moduleHash), u16(version), cbool(debug)) + state.EvictWasmRust = func(moduleHash common.Hash, version uint16, tag uint32, debug bool) { + C.stylus_evict_module(hashToBytes32(moduleHash), u16(version), u32(tag), cbool(debug)) } } +func ResizeWasmLruCache(size uint32) { + C.stylus_cache_lru_resize(u32(size)) +} + func (value bytes32) toHash() common.Hash { hash := common.Hash{} for index, b := range value.bytes { @@ -236,18 +309,18 @@ func goSlice(slice []byte) C.GoSliceData { } } -func (params *goParams) encode() C.StylusConfig { +func (params *ProgParams) encode() C.StylusConfig { pricing := C.PricingParams{ - ink_price: u32(params.inkPrice.ToUint32()), + ink_price: u32(params.InkPrice.ToUint32()), } return C.StylusConfig{ - version: u16(params.version), - max_depth: u32(params.maxDepth), + version: u16(params.Version), + max_depth: u32(params.MaxDepth), pricing: pricing, } } -func (data *evmData) encode() C.EvmData { +func (data *EvmData) encode() C.EvmData { return C.EvmData{ block_basefee: hashToBytes32(data.blockBasefee), chainid: u64(data.chainId), diff --git a/arbos/programs/params.go b/arbos/programs/params.go index 6138e36033..a0b8acd95c 100644 --- a/arbos/programs/params.go +++ b/arbos/programs/params.go @@ -5,6 +5,7 @@ package programs import ( "errors" + "fmt" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -29,6 +30,8 @@ const initialExpiryDays = 365 // deactivate after 1 year. const initialKeepaliveDays = 31 // wait a month before allowing reactivation. const initialRecentCacheSize = 32 // cache the 32 most recent programs. +const v2MinInitGas = 69 // charge 69 * 128 = 8832 gas (minCachedGas will also be charged in v2). + const MinCachedGasUnits = 32 /// 32 gas for each unit const MinInitGasUnits = 128 // 128 gas for each unit const CostScalarPercent = 2 // 2% for each unit @@ -137,6 +140,18 @@ func (p *StylusParams) Save() error { return nil } +func (p *StylusParams) UpgradeToVersion(version uint16) error { + if version != 2 { + return fmt.Errorf("dest version not supported for upgrade") + } + if p.Version != 1 { + return fmt.Errorf("existing version not supported for upgrade") + } + p.Version = 2 + p.MinInitGas = v2MinInitGas + return nil +} + func initStylusParams(sto *storage.Storage) { params := &StylusParams{ backingStorage: sto, diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index 779f2d6c67..12102bac84 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" + gethParams "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbcompress" "github.com/offchainlabs/nitro/arbos/addressSet" "github.com/offchainlabs/nitro/arbos/storage" @@ -120,14 +121,13 @@ func (p Programs) ActivateProgram(evm *vm.EVM, address common.Address, runMode c return 0, codeHash, common.Hash{}, nil, true, err } - // replace the cached asm + // remove prev asm if cached { oldModuleHash, err := p.moduleHashes.Get(codeHash) if err != nil { return 0, codeHash, common.Hash{}, nil, true, err } evictProgram(statedb, oldModuleHash, currentVersion, debugMode, runMode, expired) - cacheProgram(statedb, info.moduleHash, stylusVersion, debugMode, runMode) } if err := p.moduleHashes.Set(codeHash, info.moduleHash); err != nil { return 0, codeHash, common.Hash{}, nil, true, err @@ -152,20 +152,29 @@ func (p Programs) ActivateProgram(evm *vm.EVM, address common.Address, runMode c activatedAt: hoursSinceArbitrum(time), cached: cached, } + // replace the cached asm + if cached { + code := statedb.GetCode(address) + cacheProgram(statedb, info.moduleHash, programData, address, code, codeHash, params, debugMode, time, runMode) + } + return stylusVersion, codeHash, info.moduleHash, dataFee, false, p.setProgram(codeHash, programData) } func (p Programs) CallProgram( scope *vm.ScopeContext, statedb vm.StateDB, + arbosVersion uint64, interpreter *vm.EVMInterpreter, tracingInfo *util.TracingInfo, calldata []byte, reentrant bool, + runmode core.MessageRunMode, ) ([]byte, error) { evm := interpreter.Evm() contract := scope.Contract codeHash := contract.CodeHash + startingGas := contract.Gas debugMode := evm.ChainConfig().DebugMode() params, err := p.Params() @@ -181,7 +190,7 @@ func (p Programs) CallProgram( if err != nil { return nil, err } - goParams := p.goParams(program.version, debugMode, params) + goParams := p.progParams(program.version, debugMode, params) l1BlockNumber, err := evm.ProcessingHook.L1BlockNumber(evm.Context) if err != nil { return nil, err @@ -194,9 +203,10 @@ func (p Programs) CallProgram( // pay for program init cached := program.cached || statedb.GetRecentWasms().Insert(codeHash, params.BlockCacheSize) - if cached { + if cached || program.version > 1 { // in version 1 cached cost is part of init cost callCost = am.SaturatingUAdd(callCost, program.cachedGas(params)) - } else { + } + if !cached { callCost = am.SaturatingUAdd(callCost, program.initGas(params)) } if err := contract.BurnGas(callCost); err != nil { @@ -205,7 +215,13 @@ func (p Programs) CallProgram( statedb.AddStylusPages(program.footprint) defer statedb.SetStylusPagesOpen(open) - evmData := &evmData{ + localAsm, err := getLocalAsm(statedb, moduleHash, contract.Address(), contract.Code, contract.CodeHash, params.PageLimit, evm.Context.Time, debugMode, program) + if err != nil { + log.Crit("failed to get local wasm for activated program", "program", contract.Address()) + return nil, err + } + + evmData := &EvmData{ blockBasefee: common.BigToHash(evm.Context.BaseFee), chainId: evm.ChainConfig().ChainID.Uint64(), blockCoinbase: evm.Context.Coinbase, @@ -227,11 +243,38 @@ func (p Programs) CallProgram( if contract.CodeAddr != nil { address = *contract.CodeAddr } - return callProgram(address, moduleHash, scope, interpreter, tracingInfo, calldata, evmData, goParams, model) + var arbos_tag uint32 + if runmode == core.MessageCommitMode { + arbos_tag = statedb.Database().WasmCacheTag() + } + ret, err := callProgram(address, moduleHash, localAsm, scope, interpreter, tracingInfo, calldata, evmData, goParams, model, arbos_tag) + if len(ret) > 0 && arbosVersion >= gethParams.ArbosVersion_StylusFixes { + // Ensure that return data costs as least as much as it would in the EVM. + evmCost := evmMemoryCost(uint64(len(ret))) + if startingGas < evmCost { + contract.Gas = 0 + return nil, vm.ErrOutOfGas + } + maxGasToReturn := startingGas - evmCost + contract.Gas = am.MinInt(contract.Gas, maxGasToReturn) + } + return ret, err +} + +func evmMemoryCost(size uint64) uint64 { + // It would take 100GB to overflow this calculation, so no need to worry about that + words := (size + 31) / 32 + linearCost := words * gethParams.MemoryGas + squareCost := (words * words) / gethParams.QuadCoeffDiv + return linearCost + squareCost } func getWasm(statedb vm.StateDB, program common.Address) ([]byte, error) { prefixedWasm := statedb.GetCode(program) + return getWasmFromContractCode(prefixedWasm) +} + +func getWasmFromContractCode(prefixedWasm []byte) ([]byte, error) { if prefixedWasm == nil { return nil, ProgramNotWasmError() } @@ -342,10 +385,13 @@ func (p Programs) ProgramCached(codeHash common.Hash) (bool, error) { } // Sets whether a program is cached. Errors if trying to cache an expired program. +// `address` must be present if setting cache to true as of ArbOS 31, +// and if `address` is present it must have the specified codeHash. func (p Programs) SetProgramCached( emitEvent func() error, db vm.StateDB, codeHash common.Hash, + address common.Address, cache bool, time uint64, params *StylusParams, @@ -358,8 +404,8 @@ func (p Programs) SetProgramCached( } expired := program.ageSeconds > am.DaysToSeconds(params.ExpiryDays) - if program.version == 0 && cache { - return ProgramNeedsUpgradeError(0, params.Version) + if program.version != params.Version && cache { + return ProgramNeedsUpgradeError(program.version, params.Version) } if expired && cache { return ProgramExpiredError(program.ageSeconds) @@ -380,7 +426,12 @@ func (p Programs) SetProgramCached( return err } if cache { - cacheProgram(db, moduleHash, program.version, debug, runMode) + // Not passing in an address is supported pre-Verkle, as in Blockchain's ContractCodeWithPrefix method. + code, err := db.Database().ContractCode(common.Address{}, codeHash) + if err != nil { + return err + } + cacheProgram(db, moduleHash, program, address, code, codeHash, params, debug, time, runMode) } else { evictProgram(db, moduleHash, program.version, debug, runMode, expired) } @@ -412,7 +463,12 @@ func (p Programs) ProgramTimeLeft(codeHash common.Hash, time uint64, params *Sty func (p Programs) ProgramInitGas(codeHash common.Hash, time uint64, params *StylusParams) (uint64, uint64, error) { program, err := p.getActiveProgram(codeHash, time, params) - return program.initGas(params), program.cachedGas(params), err + cachedGas := program.cachedGas(params) + initGas := program.initGas(params) + if params.Version > 1 { + initGas += cachedGas + } + return initGas, cachedGas, err } func (p Programs) ProgramMemoryFootprint(codeHash common.Hash, time uint64, params *StylusParams) (uint16, error) { @@ -444,23 +500,23 @@ func (p Program) cachedGas(params *StylusParams) uint64 { return am.SaturatingUAdd(base, am.DivCeil(dyno, 100)) } -type goParams struct { - version uint16 - maxDepth uint32 - inkPrice uint24 - debugMode bool +type ProgParams struct { + Version uint16 + MaxDepth uint32 + InkPrice uint24 + DebugMode bool } -func (p Programs) goParams(version uint16, debug bool, params *StylusParams) *goParams { - return &goParams{ - version: version, - maxDepth: params.MaxStackDepth, - inkPrice: params.InkPrice, - debugMode: debug, +func (p Programs) progParams(version uint16, debug bool, params *StylusParams) *ProgParams { + return &ProgParams{ + Version: version, + MaxDepth: params.MaxStackDepth, + InkPrice: params.InkPrice, + DebugMode: debug, } } -type evmData struct { +type EvmData struct { blockBasefee common.Hash chainId uint64 blockCoinbase common.Address @@ -517,12 +573,12 @@ func (status userStatus) toResult(data []byte, debug bool) ([]byte, string, erro // Hours since Arbitrum began, rounded down. func hoursSinceArbitrum(time uint64) uint24 { - return uint24((time - lastUpdateTimeOffset) / 3600) + return am.SaturatingUUCast[uint24]((am.SaturatingUSub(time, ArbitrumStartTime)) / 3600) } // Computes program age in seconds from the hours passed since Arbitrum began. func hoursToAge(time uint64, hours uint24) uint64 { seconds := am.SaturatingUMul(uint64(hours), 3600) - activatedAt := am.SaturatingUAdd(lastUpdateTimeOffset, seconds) + activatedAt := am.SaturatingUAdd(ArbitrumStartTime, seconds) return am.SaturatingUSub(time, activatedAt) } diff --git a/arbos/programs/wasm.go b/arbos/programs/wasm.go index 77eb7e0f2f..f7191dca8f 100644 --- a/arbos/programs/wasm.go +++ b/arbos/programs/wasm.go @@ -95,7 +95,7 @@ func activateProgram( } // stub any non-consensus, Rust-side caching updates -func cacheProgram(db vm.StateDB, module common.Hash, version uint16, debug bool, mode core.MessageRunMode) { +func cacheProgram(db vm.StateDB, module common.Hash, program Program, addressForLogging common.Address, code []byte, codeHash common.Hash, params *StylusParams, debug bool, time uint64, runMode core.MessageRunMode) { } func evictProgram(db vm.StateDB, module common.Hash, version uint16, debug bool, mode core.MessageRunMode, forever bool) { } @@ -128,21 +128,39 @@ func startProgram(module uint32) uint32 //go:wasmimport programs send_response func sendResponse(req_id uint32) uint32 +func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, addressForLogging common.Address, code []byte, codeHash common.Hash, pagelimit uint16, time uint64, debugMode bool, program Program) ([]byte, error) { + return nil, nil +} + func callProgram( address common.Address, moduleHash common.Hash, + _localAsm []byte, scope *vm.ScopeContext, interpreter *vm.EVMInterpreter, tracingInfo *util.TracingInfo, calldata []byte, - evmData *evmData, - params *goParams, + evmData *EvmData, + params *ProgParams, memoryModel *MemoryModel, + _arbos_tag uint32, ) ([]byte, error) { reqHandler := newApiClosures(interpreter, tracingInfo, scope, memoryModel) + gasLeft, retData, err := CallProgramLoop(moduleHash, calldata, scope.Contract.Gas, evmData, params, reqHandler) + scope.Contract.Gas = gasLeft + return retData, err +} + +func CallProgramLoop( + moduleHash common.Hash, + calldata []byte, + gas uint64, + evmData *EvmData, + params *ProgParams, + reqHandler RequestHandler) (uint64, []byte, error) { configHandler := params.createHandler() dataHandler := evmData.createHandler() - debug := params.debugMode + debug := params.DebugMode module := newProgram( unsafe.Pointer(&moduleHash[0]), @@ -150,7 +168,7 @@ func callProgram( uint32(len(calldata)), configHandler, dataHandler, - scope.Contract.Gas, + gas, ) reqId := startProgram(module) for { @@ -162,12 +180,11 @@ func callProgram( popProgram() status := userStatus(reqTypeId) gasLeft := arbmath.BytesToUint(reqData[:8]) - scope.Contract.Gas = gasLeft data, msg, err := status.toResult(reqData[8:], debug) if status == userFailure && debug { - log.Warn("program failure", "err", err, "msg", msg, "program", address) + log.Warn("program failure", "err", err, "msg", msg, "moduleHash", moduleHash) } - return data, err + return gasLeft, data, err } reqType := RequestType(reqTypeId - EvmApiMethodReqOffset) diff --git a/arbos/programs/wasm_api.go b/arbos/programs/wasm_api.go index fb0f731402..d7bac056c0 100644 --- a/arbos/programs/wasm_api.go +++ b/arbos/programs/wasm_api.go @@ -38,12 +38,12 @@ func createEvmData( reentrant uint32, ) evmDataHandler -func (params *goParams) createHandler() stylusConfigHandler { - debug := arbmath.BoolToUint32(params.debugMode) - return createStylusConfig(uint32(params.version), params.maxDepth, params.inkPrice.ToUint32(), debug) +func (params *ProgParams) createHandler() stylusConfigHandler { + debug := arbmath.BoolToUint32(params.DebugMode) + return createStylusConfig(uint32(params.Version), params.MaxDepth, params.InkPrice.ToUint32(), debug) } -func (data *evmData) createHandler() evmDataHandler { +func (data *EvmData) createHandler() evmDataHandler { return createEvmData( arbutil.SliceToUnsafePointer(data.blockBasefee[:]), data.chainId, diff --git a/arbos/programs/wasmstorehelper.go b/arbos/programs/wasmstorehelper.go new file mode 100644 index 0000000000..9e69178694 --- /dev/null +++ b/arbos/programs/wasmstorehelper.go @@ -0,0 +1,80 @@ +// Copyright 2022-2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +//go:build !wasm +// +build !wasm + +package programs + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/log" +) + +// SaveActiveProgramToWasmStore is used to save active stylus programs to wasm store during rebuilding +func (p Programs) SaveActiveProgramToWasmStore(statedb *state.StateDB, codeHash common.Hash, code []byte, time uint64, debugMode bool, rebuildingStartBlockTime uint64) error { + params, err := p.Params() + if err != nil { + return err + } + + program, err := p.getActiveProgram(codeHash, time, params) + if err != nil { + // The program is not active so return early + log.Info("program is not active, getActiveProgram returned error, hence do not include in rebuilding", "err", err) + return nil + } + + // It might happen that node crashed some time after rebuilding commenced and before it completed, hence when rebuilding + // resumes after node is restarted the latest diskdb derived from statedb might now have codehashes that were activated + // during the last rebuilding session. In such cases we don't need to fetch moduleshashes but instead return early + // since they would already be added to the wasm store + currentHoursSince := hoursSinceArbitrum(rebuildingStartBlockTime) + if currentHoursSince < program.activatedAt { + return nil + } + + moduleHash, err := p.moduleHashes.Get(codeHash) + if err != nil { + return err + } + + // If already in wasm store then return early + localAsm, err := statedb.TryGetActivatedAsm(moduleHash) + if err == nil && len(localAsm) > 0 { + return nil + } + + wasm, err := getWasmFromContractCode(code) + if err != nil { + log.Error("Failed to reactivate program while rebuilding wasm store: getWasmFromContractCode", "expected moduleHash", moduleHash, "err", err) + return fmt.Errorf("failed to reactivate program while rebuilding wasm store: %w", err) + } + + unlimitedGas := uint64(0xffffffffffff) + // We know program is activated, so it must be in correct version and not use too much memory + // Empty program address is supplied because we dont have access to this during rebuilding of wasm store + info, asm, module, err := activateProgramInternal(statedb, common.Address{}, codeHash, wasm, params.PageLimit, program.version, debugMode, &unlimitedGas) + if err != nil { + log.Error("failed to reactivate program while rebuilding wasm store", "expected moduleHash", moduleHash, "err", err) + return fmt.Errorf("failed to reactivate program while rebuilding wasm store: %w", err) + } + + if info.moduleHash != moduleHash { + log.Error("failed to reactivate program while rebuilding wasm store", "expected moduleHash", moduleHash, "got", info.moduleHash) + return fmt.Errorf("failed to reactivate program while rebuilding wasm store, expected ModuleHash: %v", moduleHash) + } + + batch := statedb.Database().WasmStore().NewBatch() + rawdb.WriteActivation(batch, moduleHash, asm, module) + if err := batch.Write(); err != nil { + log.Error("failed writing re-activation to state while rebuilding wasm store", "err", err) + return err + } + + return nil +} diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index b5fb64f695..b08c7c5d30 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -123,10 +123,12 @@ func (p *TxProcessor) ExecuteWASM(scope *vm.ScopeContext, input []byte, interpre return p.state.Programs().CallProgram( scope, p.evm.StateDB, + p.state.ArbOSVersion(), interpreter, tracingInfo, input, reentrant, + p.RunMode(), ) } @@ -282,15 +284,10 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r } balance := statedb.GetBalance(tx.From) + // evm.Context.BaseFee is already lowered to 0 when vm runs with NoBaseFee flag and 0 gas price effectiveBaseFee := evm.Context.BaseFee usergas := p.msg.GasLimit - if p.msg.TxRunMode != core.MessageCommitMode && p.msg.GasFeeCap.BitLen() == 0 { - // In gas estimation or eth_call mode, we permit a zero gas fee cap. - // This matches behavior with normal tx gas estimation and eth_call. - effectiveBaseFee = common.Big0 - } - maxGasCost := arbmath.BigMulByUint(tx.GasFeeCap, usergas) maxFeePerGasTooLow := arbmath.BigLessThan(tx.GasFeeCap, effectiveBaseFee) if arbmath.BigLessThan(balance.ToBig(), maxGasCost) || usergas < params.TxGas || maxFeePerGasTooLow { @@ -432,16 +429,21 @@ func (p *TxProcessor) GasChargingHook(gasRemaining *uint64) (common.Address, err var gasNeededToStartEVM uint64 tipReceipient, _ := p.state.NetworkFeeAccount() - basefee := p.evm.Context.BaseFee + var basefee *big.Int + if p.evm.Context.BaseFeeInBlock != nil { + basefee = p.evm.Context.BaseFeeInBlock + } else { + basefee = p.evm.Context.BaseFee + } var poster common.Address - if p.msg.TxRunMode != core.MessageCommitMode { + if !p.msg.TxRunMode.ExecutedOnChain() { poster = l1pricing.BatchPosterAddress } else { poster = p.evm.Context.Coinbase } - if p.msg.TxRunMode == core.MessageCommitMode { + if p.msg.TxRunMode.ExecutedOnChain() { p.msg.SkipL1Charging = false } if basefee.Sign() > 0 && !p.msg.SkipL1Charging { @@ -508,7 +510,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { if underlyingTx != nil && underlyingTx.Type() == types.ArbitrumRetryTxType { inner, _ := underlyingTx.GetInner().(*types.ArbitrumRetryTx) effectiveBaseFee := inner.GasFeeCap - if p.msg.TxRunMode == core.MessageCommitMode && !arbmath.BigEquals(effectiveBaseFee, p.evm.Context.BaseFee) { + if p.msg.TxRunMode.ExecutedOnChain() && !arbmath.BigEquals(effectiveBaseFee, p.evm.Context.BaseFee) { log.Error( "ArbitrumRetryTx GasFeeCap doesn't match basefee in commit mode", "txHash", underlyingTx.Hash(), @@ -593,7 +595,12 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { return } - basefee := p.evm.Context.BaseFee + var basefee *big.Int + if p.evm.Context.BaseFeeInBlock != nil { + basefee = p.evm.Context.BaseFeeInBlock + } else { + basefee = p.evm.Context.BaseFee + } totalCost := arbmath.BigMul(basefee, arbmath.UintToBig(gasUsed)) // total cost = price of gas * gas burnt computeCost := arbmath.BigSub(totalCost, p.PosterFee) // total cost = network's compute + poster's L1 costs if computeCost.Sign() < 0 { @@ -655,15 +662,10 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { func (p *TxProcessor) ScheduledTxes() types.Transactions { scheduled := types.Transactions{} time := p.evm.Context.Time + // p.evm.Context.BaseFee is already lowered to 0 when vm runs with NoBaseFee flag and 0 gas price effectiveBaseFee := p.evm.Context.BaseFee chainID := p.evm.ChainConfig().ChainID - if p.msg.TxRunMode != core.MessageCommitMode && p.msg.GasFeeCap.BitLen() == 0 { - // In gas estimation or eth_call mode, we permit a zero gas fee cap. - // This matches behavior with normal tx gas estimation and eth_call. - effectiveBaseFee = common.Big0 - } - logs := p.evm.StateDB.GetCurrentTxLogs() for _, log := range logs { if log.Address != ArbRetryableTxAddress || log.Topics[0] != RedeemScheduledEventID { @@ -737,10 +739,8 @@ func (p *TxProcessor) GetPaidGasPrice() *big.Int { gasPrice := p.evm.GasPrice version := p.state.ArbOSVersion() if version != 9 { + // p.evm.Context.BaseFee is already lowered to 0 when vm runs with NoBaseFee flag and 0 gas price gasPrice = p.evm.Context.BaseFee - if p.msg.TxRunMode != core.MessageCommitMode && p.msg.GasFeeCap.Sign() == 0 { - gasPrice = common.Big0 - } } return gasPrice } diff --git a/arbstate/daprovider/reader.go b/arbstate/daprovider/reader.go index 560af3af1d..488b156454 100644 --- a/arbstate/daprovider/reader.go +++ b/arbstate/daprovider/reader.go @@ -30,12 +30,16 @@ type Reader interface { // NewReaderForDAS is generally meant to be only used by nitro. // DA Providers should implement methods in the Reader interface independently -func NewReaderForDAS(dasReader DASReader) *readerForDAS { - return &readerForDAS{dasReader: dasReader} +func NewReaderForDAS(dasReader DASReader, keysetFetcher DASKeysetFetcher) *readerForDAS { + return &readerForDAS{ + dasReader: dasReader, + keysetFetcher: keysetFetcher, + } } type readerForDAS struct { - dasReader DASReader + dasReader DASReader + keysetFetcher DASKeysetFetcher } func (d *readerForDAS) IsValidHeaderByte(headerByte byte) bool { @@ -50,7 +54,7 @@ func (d *readerForDAS) RecoverPayloadFromBatch( preimageRecorder PreimageRecorder, validateSeqMsg bool, ) ([]byte, error) { - return RecoverPayloadFromDasBatch(ctx, batchNum, sequencerMsg, d.dasReader, preimageRecorder, validateSeqMsg) + return RecoverPayloadFromDasBatch(ctx, batchNum, sequencerMsg, d.dasReader, d.keysetFetcher, preimageRecorder, validateSeqMsg) } // NewReaderForBlobReader is generally meant to be only used by nitro. diff --git a/arbstate/daprovider/util.go b/arbstate/daprovider/util.go index 054bde5503..8f880b9228 100644 --- a/arbstate/daprovider/util.go +++ b/arbstate/daprovider/util.go @@ -30,10 +30,14 @@ type DASReader interface { type DASWriter interface { // Store requests that the message be stored until timeout (UTC time in unix epoch seconds). - Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*DataAvailabilityCertificate, error) + Store(ctx context.Context, message []byte, timeout uint64) (*DataAvailabilityCertificate, error) fmt.Stringer } +type DASKeysetFetcher interface { + GetKeysetByHash(context.Context, common.Hash) ([]byte, error) +} + type BlobReader interface { GetBlobs( ctx context.Context, @@ -138,6 +142,7 @@ func RecoverPayloadFromDasBatch( batchNum uint64, sequencerMsg []byte, dasReader DASReader, + keysetFetcher DASKeysetFetcher, preimageRecorder PreimageRecorder, validateSeqMsg bool, ) ([]byte, error) { @@ -181,7 +186,7 @@ func RecoverPayloadFromDasBatch( return preimage, nil } - keysetPreimage, err := getByHash(ctx, cert.KeysetHash) + keysetPreimage, err := keysetFetcher.GetKeysetByHash(ctx, cert.KeysetHash) if err != nil { log.Error("Couldn't get keyset", "err", err) return nil, err diff --git a/arbstate/daprovider/writer.go b/arbstate/daprovider/writer.go index 75b356c4b8..a26e53c94d 100644 --- a/arbstate/daprovider/writer.go +++ b/arbstate/daprovider/writer.go @@ -17,7 +17,6 @@ type Writer interface { ctx context.Context, message []byte, timeout uint64, - sig []byte, disableFallbackStoreDataOnChain bool, ) ([]byte, error) } @@ -32,8 +31,8 @@ type writerForDAS struct { dasWriter DASWriter } -func (d *writerForDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte, disableFallbackStoreDataOnChain bool) ([]byte, error) { - cert, err := d.dasWriter.Store(ctx, message, timeout, []byte{}) +func (d *writerForDAS) Store(ctx context.Context, message []byte, timeout uint64, disableFallbackStoreDataOnChain bool) ([]byte, error) { + cert, err := d.dasWriter.Store(ctx, message, timeout) if errors.Is(err, ErrBatchToDasFailed) { if disableFallbackStoreDataOnChain { return nil, errors.New("unable to batch to DAS and fallback storing data on chain is disabled") diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go index bb6de00cad..1e4a06fe90 100644 --- a/blocks_reexecutor/blocks_reexecutor.go +++ b/blocks_reexecutor/blocks_reexecutor.go @@ -35,17 +35,16 @@ func (c *Config) Validate() error { if c.EndBlock < c.StartBlock { return errors.New("invalid block range for blocks re-execution") } - if c.Room == 0 { - return errors.New("room for blocks re-execution cannot be zero") + if c.Room <= 0 { + return errors.New("room for blocks re-execution should be greater than 0") } return nil } var DefaultConfig = Config{ - Enable: false, - Mode: "random", - Room: runtime.NumCPU(), - BlocksPerThread: 10000, + Enable: false, + Mode: "random", + Room: runtime.NumCPU(), } var TestConfig = Config{ @@ -66,13 +65,14 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) { type BlocksReExecutor struct { stopwaiter.StopWaiter - config *Config - blockchain *core.BlockChain - stateFor arbitrum.StateForHeaderFunction - done chan struct{} - fatalErrChan chan error - startBlock uint64 - currentBlock uint64 + config *Config + blockchain *core.BlockChain + stateFor arbitrum.StateForHeaderFunction + done chan struct{} + fatalErrChan chan error + startBlock uint64 + currentBlock uint64 + blocksPerThread uint64 } func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *BlocksReExecutor { @@ -84,32 +84,47 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block start = chainStart end = chainEnd } - if start < chainStart { - log.Warn("state reexecutor's start block number is lower than genesis, resetting to genesis") + if start < chainStart || start > chainEnd { + log.Warn("invalid state reexecutor's start block number, resetting to genesis", "start", start, "genesis", chainStart) start = chainStart } - if end > chainEnd { - log.Warn("state reexecutor's end block number is greater than latest, resetting to latest") + if end > chainEnd || end < chainStart { + log.Warn("invalid state reexecutor's end block number, resetting to latest", "end", end, "latest", chainEnd) end = chainEnd } + blocksPerThread := uint64(10000) + if c.BlocksPerThread != 0 { + blocksPerThread = c.BlocksPerThread + } if c.Mode == "random" && end != start { - if c.BlocksPerThread > end-start { - c.BlocksPerThread = end - start + // Reexecute a range of 10000 or (non-zero) c.BlocksPerThread number of blocks between start to end picked randomly + rng := blocksPerThread + if rng > end-start { + rng = end - start } - start += uint64(rand.Intn(int(end - start - c.BlocksPerThread + 1))) - end = start + c.BlocksPerThread + start += uint64(rand.Intn(int(end - start - rng + 1))) + end = start + rng } - // inclusive of block reexecution [start, end] - if start > 0 { + // Inclusive of block reexecution [start, end] + // Do not reexecute genesis block i,e chainStart + if start > 0 && start != chainStart { start-- } + // Divide work equally among available threads when BlocksPerThread is zero + if c.BlocksPerThread == 0 { + work := (end - start) / uint64(c.Room) + if work > 0 { + blocksPerThread = work + } + } return &BlocksReExecutor{ - config: c, - blockchain: blockchain, - currentBlock: end, - startBlock: start, - done: make(chan struct{}, c.Room), - fatalErrChan: fatalErrChan, + config: c, + blockchain: blockchain, + currentBlock: end, + startBlock: start, + blocksPerThread: blocksPerThread, + done: make(chan struct{}, c.Room), + fatalErrChan: fatalErrChan, stateFor: func(header *types.Header) (*state.StateDB, arbitrum.StateReleaseFunc, error) { state, err := blockchain.StateAt(header.Root) return state, arbitrum.NoopStateRelease, err @@ -119,17 +134,17 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block // LaunchBlocksReExecution launches the thread to apply blocks of range [currentBlock-s.config.BlocksPerThread, currentBlock] to the last available valid state func (s *BlocksReExecutor) LaunchBlocksReExecution(ctx context.Context, currentBlock uint64) uint64 { - start := arbmath.SaturatingUSub(currentBlock, s.config.BlocksPerThread) + start := arbmath.SaturatingUSub(currentBlock, s.blocksPerThread) if start < s.startBlock { start = s.startBlock } - // we don't use state release pattern here - // TODO do we want to use release pattern here? - startState, startHeader, _, err := arbitrum.FindLastAvailableState(ctx, s.blockchain, s.stateFor, s.blockchain.GetHeaderByNumber(start), nil, -1) + startState, startHeader, release, err := arbitrum.FindLastAvailableState(ctx, s.blockchain, s.stateFor, s.blockchain.GetHeaderByNumber(start), nil, -1) if err != nil { s.fatalErrChan <- fmt.Errorf("blocksReExecutor failed to get last available state while searching for state at %d, err: %w", start, err) return s.startBlock } + // NoOp + defer release() start = startHeader.Number.Uint64() s.LaunchThread(func(ctx context.Context) { _, err := arbitrum.AdvanceStateUpToBlock(ctx, s.blockchain, startState, s.blockchain.GetHeaderByNumber(currentBlock), startHeader, nil) @@ -169,9 +184,14 @@ func (s *BlocksReExecutor) Impl(ctx context.Context) { log.Info("BlocksReExecutor successfully completed re-execution of blocks against historic state", "stateAt", s.startBlock, "startBlock", s.startBlock+1, "endBlock", end) } -func (s *BlocksReExecutor) Start(ctx context.Context) { +func (s *BlocksReExecutor) Start(ctx context.Context, done chan struct{}) { s.StopWaiter.Start(ctx, s) - s.LaunchThread(s.Impl) + s.LaunchThread(func(ctx context.Context) { + s.Impl(ctx) + if done != nil { + close(done) + } + }) } func (s *BlocksReExecutor) StopAndWait() { diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index ac5c6c39da..ba95f2d8af 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -22,11 +22,6 @@ import ( "github.com/offchainlabs/nitro/wsbroadcastserver" ) -type MessageWithMetadataAndBlockHash struct { - Message arbostypes.MessageWithMetadata - BlockHash *common.Hash -} - type Broadcaster struct { server *wsbroadcastserver.WSBroadcastServer backlog backlog.Backlog @@ -98,7 +93,7 @@ func (b *Broadcaster) BroadcastSingleFeedMessage(bfm *m.BroadcastFeedMessage) { } func (b *Broadcaster) BroadcastMessages( - messagesWithBlockHash []MessageWithMetadataAndBlockHash, + messagesWithBlockHash []arbostypes.MessageWithMetadataAndBlockHash, seq arbutil.MessageIndex, ) (err error) { defer func() { @@ -109,7 +104,7 @@ func (b *Broadcaster) BroadcastMessages( }() var feedMessages []*m.BroadcastFeedMessage for i, msg := range messagesWithBlockHash { - bfm, err := b.NewBroadcastFeedMessage(msg.Message, seq+arbutil.MessageIndex(i), msg.BlockHash) + bfm, err := b.NewBroadcastFeedMessage(msg.MessageWithMeta, seq+arbutil.MessageIndex(i), msg.BlockHash) if err != nil { return err } diff --git a/cmd/conf/chain.go b/cmd/conf/chain.go index 531945b4d6..b85f7727b1 100644 --- a/cmd/conf/chain.go +++ b/cmd/conf/chain.go @@ -15,22 +15,21 @@ import ( type ParentChainConfig struct { ID uint64 `koanf:"id"` Connection rpcclient.ClientConfig `koanf:"connection" reload:"hot"` - Wallet genericconf.WalletConfig `koanf:"wallet"` BlobClient headerreader.BlobClientConfig `koanf:"blob-client"` } var L1ConnectionConfigDefault = rpcclient.ClientConfig{ - URL: "", - Retries: 2, - Timeout: time.Minute, - ConnectionWait: time.Minute, - ArgLogLimit: 2048, + URL: "", + Retries: 2, + Timeout: time.Minute, + ConnectionWait: time.Minute, + ArgLogLimit: 2048, + WebsocketMessageSizeLimit: 256 * 1024 * 1024, } var L1ConfigDefault = ParentChainConfig{ ID: 0, Connection: L1ConnectionConfigDefault, - Wallet: DefaultL1WalletConfig, BlobClient: headerreader.DefaultBlobClientConfig, } @@ -45,14 +44,9 @@ var DefaultL1WalletConfig = genericconf.WalletConfig{ func L1ConfigAddOptions(prefix string, f *flag.FlagSet) { f.Uint64(prefix+".id", L1ConfigDefault.ID, "if set other than 0, will be used to validate database and L1 connection") rpcclient.RPCClientAddOptions(prefix+".connection", f, &L1ConfigDefault.Connection) - genericconf.WalletConfigAddOptions(prefix+".wallet", f, L1ConfigDefault.Wallet.Pathname) headerreader.BlobClientAddOptions(prefix+".blob-client", f) } -func (c *ParentChainConfig) ResolveDirectoryNames(chain string) { - c.Wallet.ResolveDirectoryNames(chain) -} - func (c *ParentChainConfig) Validate() error { return c.Connection.Validate() } diff --git a/cmd/conf/database.go b/cmd/conf/database.go index b049375d66..a75cca77d5 100644 --- a/cmd/conf/database.go +++ b/cmd/conf/database.go @@ -5,20 +5,25 @@ package conf import ( "fmt" + "math" "os" "path" "path/filepath" + "runtime" + "time" + "github.com/ethereum/go-ethereum/ethdb/pebble" flag "github.com/spf13/pflag" ) type PersistentConfig struct { - GlobalConfig string `koanf:"global-config"` - Chain string `koanf:"chain"` - LogDir string `koanf:"log-dir"` - Handles int `koanf:"handles"` - Ancient string `koanf:"ancient"` - DBEngine string `koanf:"db-engine"` + GlobalConfig string `koanf:"global-config"` + Chain string `koanf:"chain"` + LogDir string `koanf:"log-dir"` + Handles int `koanf:"handles"` + Ancient string `koanf:"ancient"` + DBEngine string `koanf:"db-engine"` + Pebble PebbleConfig `koanf:"pebble"` } var PersistentConfigDefault = PersistentConfig{ @@ -27,7 +32,8 @@ var PersistentConfigDefault = PersistentConfig{ LogDir: "", Handles: 512, Ancient: "", - DBEngine: "leveldb", + DBEngine: "", // auto-detect database type based on the db dir contents + Pebble: PebbleConfigDefault, } func PersistentConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -36,7 +42,8 @@ func PersistentConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".log-dir", PersistentConfigDefault.LogDir, "directory to store log file") f.Int(prefix+".handles", PersistentConfigDefault.Handles, "number of file descriptor handles to use for the database") f.String(prefix+".ancient", PersistentConfigDefault.Ancient, "directory of ancient where the chain freezer can be opened") - f.String(prefix+".db-engine", PersistentConfigDefault.DBEngine, "backing database implementation to use ('leveldb' or 'pebble')") + f.String(prefix+".db-engine", PersistentConfigDefault.DBEngine, "backing database implementation to use. If set to empty string the database type will be autodetected and if no pre-existing database is found it will default to creating new pebble database ('leveldb', 'pebble' or '' = auto-detect)") + PebbleConfigAddOptions(prefix+".pebble", f) } func (c *PersistentConfig) ResolveDirectoryNames() error { @@ -90,9 +97,179 @@ func DatabaseInDirectory(path string) bool { } func (c *PersistentConfig) Validate() error { - // we are validating .db-engine here to avoid unintended behaviour as empty string value also has meaning in geth's node.Config.DBEngine - if c.DBEngine != "leveldb" && c.DBEngine != "pebble" { - return fmt.Errorf(`invalid .db-engine choice: %q, allowed "leveldb" or "pebble"`, c.DBEngine) + if c.DBEngine != "leveldb" && c.DBEngine != "pebble" && c.DBEngine != "" { + return fmt.Errorf(`invalid .db-engine choice: %q, allowed "leveldb", "pebble" or ""`, c.DBEngine) + } + // if DBEngine == "" then we may end up opening pebble database, so we want to validate the Pebble config + // if pre-existing database is leveldb backed, then user shouldn't change the Pebble config defaults => this check should also succeed + if c.DBEngine == "pebble" || c.DBEngine == "" { + if err := c.Pebble.Validate(); err != nil { + return err + } } return nil } + +type PebbleConfig struct { + MaxConcurrentCompactions int `koanf:"max-concurrent-compactions"` + Experimental PebbleExperimentalConfig `koanf:"experimental"` +} + +var PebbleConfigDefault = PebbleConfig{ + MaxConcurrentCompactions: runtime.NumCPU(), + Experimental: PebbleExperimentalConfigDefault, +} + +func PebbleConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Int(prefix+".max-concurrent-compactions", PebbleConfigDefault.MaxConcurrentCompactions, "maximum number of concurrent compactions") + PebbleExperimentalConfigAddOptions(prefix+".experimental", f) +} + +func (c *PebbleConfig) Validate() error { + if c.MaxConcurrentCompactions < 1 { + return fmt.Errorf("invalid .max-concurrent-compactions value: %d, has to be greater then 0", c.MaxConcurrentCompactions) + } + if err := c.Experimental.Validate(); err != nil { + return err + } + return nil +} + +type PebbleExperimentalConfig struct { + BytesPerSync int `koanf:"bytes-per-sync"` + L0CompactionFileThreshold int `koanf:"l0-compaction-file-threshold"` + L0CompactionThreshold int `koanf:"l0-compaction-threshold"` + L0StopWritesThreshold int `koanf:"l0-stop-writes-threshold"` + LBaseMaxBytes int64 `koanf:"l-base-max-bytes"` + MemTableStopWritesThreshold int `koanf:"mem-table-stop-writes-threshold"` + DisableAutomaticCompactions bool `koanf:"disable-automatic-compactions"` + WALBytesPerSync int `koanf:"wal-bytes-per-sync"` + WALDir string `koanf:"wal-dir"` + WALMinSyncInterval int `koanf:"wal-min-sync-interval"` + TargetByteDeletionRate int `koanf:"target-byte-deletion-rate"` + + // level specific + BlockSize int `koanf:"block-size"` + IndexBlockSize int `koanf:"index-block-size"` + TargetFileSize int64 `koanf:"target-file-size"` + TargetFileSizeEqualLevels bool `koanf:"target-file-size-equal-levels"` + + // pebble experimental + L0CompactionConcurrency int `koanf:"l0-compaction-concurrency"` + CompactionDebtConcurrency uint64 `koanf:"compaction-debt-concurrency"` + ReadCompactionRate int64 `koanf:"read-compaction-rate"` + ReadSamplingMultiplier int64 `koanf:"read-sampling-multiplier"` + MaxWriterConcurrency int `koanf:"max-writer-concurrency"` + ForceWriterParallelism bool `koanf:"force-writer-parallelism"` +} + +var PebbleExperimentalConfigDefault = PebbleExperimentalConfig{ + BytesPerSync: 512 << 10, // 512 KB + L0CompactionFileThreshold: 500, + L0CompactionThreshold: 4, + L0StopWritesThreshold: 12, + LBaseMaxBytes: 64 << 20, // 64 MB + MemTableStopWritesThreshold: 2, + DisableAutomaticCompactions: false, + WALBytesPerSync: 0, // no background syncing + WALDir: "", // use same dir as for sstables + WALMinSyncInterval: 0, // no artificial delay + TargetByteDeletionRate: 0, // deletion pacing disabled + + BlockSize: 4 << 10, // 4 KB + IndexBlockSize: 4 << 10, // 4 KB + TargetFileSize: 2 << 20, // 2 MB + TargetFileSizeEqualLevels: true, + + L0CompactionConcurrency: 10, + CompactionDebtConcurrency: 1 << 30, // 1GB + ReadCompactionRate: 16000, // see ReadSamplingMultiplier comment + ReadSamplingMultiplier: -1, // geth default, disables read sampling and disables read triggered compaction + MaxWriterConcurrency: 0, + ForceWriterParallelism: false, +} + +func PebbleExperimentalConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Int(prefix+".bytes-per-sync", PebbleExperimentalConfigDefault.BytesPerSync, "number of bytes to write to a SSTable before calling Sync on it in the background") + f.Int(prefix+".l0-compaction-file-threshold", PebbleExperimentalConfigDefault.L0CompactionFileThreshold, "count of L0 files necessary to trigger an L0 compaction") + f.Int(prefix+".l0-compaction-threshold", PebbleExperimentalConfigDefault.L0CompactionThreshold, "amount of L0 read-amplification necessary to trigger an L0 compaction") + f.Int(prefix+".l0-stop-writes-threshold", PebbleExperimentalConfigDefault.L0StopWritesThreshold, "hard limit on L0 read-amplification, computed as the number of L0 sublevels. Writes are stopped when this threshold is reached") + f.Int64(prefix+".l-base-max-bytes", PebbleExperimentalConfigDefault.LBaseMaxBytes, "The maximum number of bytes for LBase. The base level is the level which L0 is compacted into. The base level is determined dynamically based on the existing data in the LSM. The maximum number of bytes for other levels is computed dynamically based on the base level's maximum size. When the maximum number of bytes for a level is exceeded, compaction is requested.") + f.Int(prefix+".mem-table-stop-writes-threshold", PebbleExperimentalConfigDefault.MemTableStopWritesThreshold, "hard limit on the number of queued of MemTables") + f.Bool(prefix+".disable-automatic-compactions", PebbleExperimentalConfigDefault.DisableAutomaticCompactions, "disables automatic compactions") + f.Int(prefix+".wal-bytes-per-sync", PebbleExperimentalConfigDefault.WALBytesPerSync, "number of bytes to write to a write-ahead log (WAL) before calling Sync on it in the background") + f.String(prefix+".wal-dir", PebbleExperimentalConfigDefault.WALDir, "absolute path of directory to store write-ahead logs (WALs) in. If empty, WALs will be stored in the same directory as sstables") + f.Int(prefix+".wal-min-sync-interval", PebbleExperimentalConfigDefault.WALMinSyncInterval, "minimum duration in microseconds between syncs of the WAL. If WAL syncs are requested faster than this interval, they will be artificially delayed.") + f.Int(prefix+".target-byte-deletion-rate", PebbleExperimentalConfigDefault.TargetByteDeletionRate, "rate (in bytes per second) at which sstable file deletions are limited to (under normal circumstances).") + f.Int(prefix+".block-size", PebbleExperimentalConfigDefault.BlockSize, "target uncompressed size in bytes of each table block") + f.Int(prefix+".index-block-size", PebbleExperimentalConfigDefault.IndexBlockSize, fmt.Sprintf("target uncompressed size in bytes of each index block. When the index block size is larger than this target, two-level indexes are automatically enabled. Setting this option to a large value (such as %d) disables the automatic creation of two-level indexes.", math.MaxInt32)) + f.Int64(prefix+".target-file-size", PebbleExperimentalConfigDefault.TargetFileSize, "target file size for the level 0") + f.Bool(prefix+".target-file-size-equal-levels", PebbleExperimentalConfigDefault.TargetFileSizeEqualLevels, "if true same target-file-size will be uses for all levels, otherwise target size for layer n = 2 * target size for layer n - 1") + + f.Int(prefix+".l0-compaction-concurrency", PebbleExperimentalConfigDefault.L0CompactionConcurrency, "threshold of L0 read-amplification at which compaction concurrency is enabled (if compaction-debt-concurrency was not already exceeded). Every multiple of this value enables another concurrent compaction up to max-concurrent-compactions.") + f.Uint64(prefix+".compaction-debt-concurrency", PebbleExperimentalConfigDefault.CompactionDebtConcurrency, "controls the threshold of compaction debt at which additional compaction concurrency slots are added. For every multiple of this value in compaction debt bytes, an additional concurrent compaction is added. This works \"on top\" of l0-compaction-concurrency, so the higher of the count of compaction concurrency slots as determined by the two options is chosen.") + f.Int64(prefix+".read-compaction-rate", PebbleExperimentalConfigDefault.ReadCompactionRate, "controls the frequency of read triggered compactions by adjusting `AllowedSeeks` in manifest.FileMetadata: AllowedSeeks = FileSize / ReadCompactionRate") + f.Int64(prefix+".read-sampling-multiplier", PebbleExperimentalConfigDefault.ReadSamplingMultiplier, "a multiplier for the readSamplingPeriod in iterator.maybeSampleRead() to control the frequency of read sampling to trigger a read triggered compaction. A value of -1 prevents sampling and disables read triggered compactions. Geth default is -1. The pebble default is 1 << 4. which gets multiplied with a constant of 1 << 16 to yield 1 << 20 (1MB).") + f.Int(prefix+".max-writer-concurrency", PebbleExperimentalConfigDefault.MaxWriterConcurrency, "maximum number of compression workers the compression queue is allowed to use. If max-writer-concurrency > 0, then the Writer will use parallelism, to compress and write blocks to disk. Otherwise, the writer will compress and write blocks to disk synchronously.") + f.Bool(prefix+".force-writer-parallelism", PebbleExperimentalConfigDefault.ForceWriterParallelism, "force parallelism in the sstable Writer for the metamorphic tests. Even with the MaxWriterConcurrency option set, pebble only enables parallelism in the sstable Writer if there is enough CPU available, and this option bypasses that.") +} + +func (c *PebbleExperimentalConfig) Validate() error { + if c.WALDir != "" && !filepath.IsAbs(c.WALDir) { + return fmt.Errorf("invalid .wal-dir directory (%s) - has to be an absolute path", c.WALDir) + } + // TODO + return nil +} + +func (c *PebbleConfig) ExtraOptions(namespace string) *pebble.ExtraOptions { + var maxConcurrentCompactions func() int + if c.MaxConcurrentCompactions > 0 { + maxConcurrentCompactions = func() int { return c.MaxConcurrentCompactions } + } + var walMinSyncInterval func() time.Duration + if c.Experimental.WALMinSyncInterval > 0 { + walMinSyncInterval = func() time.Duration { + return time.Microsecond * time.Duration(c.Experimental.WALMinSyncInterval) + } + } + var levels []pebble.ExtraLevelOptions + for i := 0; i < 7; i++ { + targetFileSize := c.Experimental.TargetFileSize + if !c.Experimental.TargetFileSizeEqualLevels { + targetFileSize = targetFileSize << i + } + levels = append(levels, pebble.ExtraLevelOptions{ + BlockSize: c.Experimental.BlockSize, + IndexBlockSize: c.Experimental.IndexBlockSize, + TargetFileSize: targetFileSize, + }) + } + walDir := c.Experimental.WALDir + if walDir != "" { + walDir = path.Join(walDir, namespace) + } + return &pebble.ExtraOptions{ + BytesPerSync: c.Experimental.BytesPerSync, + L0CompactionFileThreshold: c.Experimental.L0CompactionFileThreshold, + L0CompactionThreshold: c.Experimental.L0CompactionThreshold, + L0StopWritesThreshold: c.Experimental.L0StopWritesThreshold, + LBaseMaxBytes: c.Experimental.LBaseMaxBytes, + MemTableStopWritesThreshold: c.Experimental.MemTableStopWritesThreshold, + MaxConcurrentCompactions: maxConcurrentCompactions, + DisableAutomaticCompactions: c.Experimental.DisableAutomaticCompactions, + WALBytesPerSync: c.Experimental.WALBytesPerSync, + WALDir: walDir, + WALMinSyncInterval: walMinSyncInterval, + TargetByteDeletionRate: c.Experimental.TargetByteDeletionRate, + Experimental: pebble.ExtraOptionsExperimental{ + L0CompactionConcurrency: c.Experimental.L0CompactionConcurrency, + CompactionDebtConcurrency: c.Experimental.CompactionDebtConcurrency, + ReadCompactionRate: c.Experimental.ReadCompactionRate, + ReadSamplingMultiplier: c.Experimental.ReadSamplingMultiplier, + MaxWriterConcurrency: c.Experimental.MaxWriterConcurrency, + ForceWriterParallelism: c.Experimental.ForceWriterParallelism, + }, + Levels: levels, + } +} diff --git a/cmd/conf/init.go b/cmd/conf/init.go index 8a6c5096fb..4bea00f9f2 100644 --- a/cmd/conf/init.go +++ b/cmd/conf/init.go @@ -1,15 +1,22 @@ package conf import ( + "fmt" + "runtime" + "strings" "time" "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/spf13/pflag" ) type InitConfig struct { Force bool `koanf:"force"` Url string `koanf:"url"` + Latest string `koanf:"latest"` + LatestBase string `koanf:"latest-base"` + ValidateChecksum bool `koanf:"validate-checksum"` DownloadPath string `koanf:"download-path"` DownloadPoll time.Duration `koanf:"download-poll"` DevInit bool `koanf:"dev-init"` @@ -21,13 +28,19 @@ type InitConfig struct { ThenQuit bool `koanf:"then-quit"` Prune string `koanf:"prune"` PruneBloomSize uint64 `koanf:"prune-bloom-size"` + PruneThreads int `koanf:"prune-threads"` + PruneTrieCleanCache int `koanf:"prune-trie-clean-cache"` ResetToMessage int64 `koanf:"reset-to-message"` RecreateMissingStateFrom uint64 `koanf:"recreate-missing-state-from"` + RebuildLocalWasm bool `koanf:"rebuild-local-wasm"` } var InitConfigDefault = InitConfig{ Force: false, Url: "", + Latest: "", + LatestBase: "https://snapshot.arbitrum.foundation/", + ValidateChecksum: true, DownloadPath: "/tmp/", DownloadPoll: time.Minute, DevInit: false, @@ -39,13 +52,19 @@ var InitConfigDefault = InitConfig{ ThenQuit: false, Prune: "", PruneBloomSize: 2048, + PruneThreads: runtime.NumCPU(), + PruneTrieCleanCache: gethexec.DefaultCachingConfig.TrieCleanCache, ResetToMessage: -1, RecreateMissingStateFrom: 0, // 0 = disabled + RebuildLocalWasm: true, } func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Bool(prefix+".force", InitConfigDefault.Force, "if true: in case database exists init code will be reexecuted and genesis block compared to database") - f.String(prefix+".url", InitConfigDefault.Url, "url to download initializtion data - will poll if download fails") + f.String(prefix+".url", InitConfigDefault.Url, "url to download initialization data - will poll if download fails") + f.String(prefix+".latest", InitConfigDefault.Latest, "if set, searches for the latest snapshot of the given kind "+acceptedSnapshotKindsStr) + f.String(prefix+".latest-base", InitConfigDefault.LatestBase, "base url used when searching for the latest") + f.Bool(prefix+".validate-checksum", InitConfigDefault.ValidateChecksum, "if true: validate the checksum after downloading the snapshot") f.String(prefix+".download-path", InitConfigDefault.DownloadPath, "path to save temp downloaded file") f.Duration(prefix+".download-poll", InitConfigDefault.DownloadPoll, "how long to wait between polling attempts") f.Bool(prefix+".dev-init", InitConfigDefault.DevInit, "init with dev data (1 account with balance) instead of file import") @@ -57,13 +76,39 @@ func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Uint(prefix+".accounts-per-sync", InitConfigDefault.AccountsPerSync, "during init - sync database every X accounts. Lower value for low-memory systems. 0 disables.") f.String(prefix+".prune", InitConfigDefault.Prune, "pruning for a given use: \"full\" for full nodes serving RPC requests, or \"validator\" for validators") f.Uint64(prefix+".prune-bloom-size", InitConfigDefault.PruneBloomSize, "the amount of memory in megabytes to use for the pruning bloom filter (higher values prune better)") + f.Int(prefix+".prune-threads", InitConfigDefault.PruneThreads, "the number of threads to use when pruning") + f.Int(prefix+".prune-trie-clean-cache", InitConfigDefault.PruneTrieCleanCache, "amount of memory in megabytes to cache unchanged state trie nodes with when traversing state database during pruning") f.Int64(prefix+".reset-to-message", InitConfigDefault.ResetToMessage, "forces a reset to an old message height. Also set max-reorg-resequence-depth=0 to force re-reading messages") f.Uint64(prefix+".recreate-missing-state-from", InitConfigDefault.RecreateMissingStateFrom, "block number to start recreating missing states from (0 = disabled)") + f.Bool(prefix+".rebuild-local-wasm", InitConfigDefault.RebuildLocalWasm, "rebuild local wasm database on boot if needed (otherwise-will be done lazily)") } func (c *InitConfig) Validate() error { if c.Force && c.RecreateMissingStateFrom > 0 { log.Warn("force init enabled, recreate-missing-state-from will have no effect") } + if c.Latest != "" && !isAcceptedSnapshotKind(c.Latest) { + return fmt.Errorf("invalid value for latest option: \"%s\" %s", c.Latest, acceptedSnapshotKindsStr) + } + if c.Prune != "" && c.PruneThreads <= 0 { + return fmt.Errorf("invalid number of pruning threads: %d, has to be greater then 0", c.PruneThreads) + } + if c.PruneTrieCleanCache < 0 { + return fmt.Errorf("invalid trie clean cache size: %d, has to be greater or equal 0", c.PruneTrieCleanCache) + } return nil } + +var ( + acceptedSnapshotKinds = []string{"archive", "pruned", "genesis"} + acceptedSnapshotKindsStr = "(accepted values: \"" + strings.Join(acceptedSnapshotKinds, "\" | \"") + "\")" +) + +func isAcceptedSnapshotKind(kind string) bool { + for _, valid := range acceptedSnapshotKinds { + if kind == valid { + return true + } + } + return false +} diff --git a/cmd/daserver/daserver.go b/cmd/daserver/daserver.go index 8036487d26..3c164066d3 100644 --- a/cmd/daserver/daserver.go +++ b/cmd/daserver/daserver.go @@ -31,10 +31,11 @@ import ( ) type DAServerConfig struct { - EnableRPC bool `koanf:"enable-rpc"` - RPCAddr string `koanf:"rpc-addr"` - RPCPort uint64 `koanf:"rpc-port"` - RPCServerTimeouts genericconf.HTTPServerTimeoutConfig `koanf:"rpc-server-timeouts"` + EnableRPC bool `koanf:"enable-rpc"` + RPCAddr string `koanf:"rpc-addr"` + RPCPort uint64 `koanf:"rpc-port"` + RPCServerTimeouts genericconf.HTTPServerTimeoutConfig `koanf:"rpc-server-timeouts"` + RPCServerBodyLimit int `koanf:"rpc-server-body-limit"` EnableREST bool `koanf:"enable-rest"` RESTAddr string `koanf:"rest-addr"` @@ -58,6 +59,7 @@ var DefaultDAServerConfig = DAServerConfig{ RPCAddr: "localhost", RPCPort: 9876, RPCServerTimeouts: genericconf.HTTPServerTimeoutConfigDefault, + RPCServerBodyLimit: genericconf.HTTPServerBodyLimitDefault, EnableREST: false, RESTAddr: "localhost", RESTPort: 9877, @@ -88,6 +90,7 @@ func parseDAServer(args []string) (*DAServerConfig, error) { f.Bool("enable-rpc", DefaultDAServerConfig.EnableRPC, "enable the HTTP-RPC server listening on rpc-addr and rpc-port") f.String("rpc-addr", DefaultDAServerConfig.RPCAddr, "HTTP-RPC server listening interface") f.Uint64("rpc-port", DefaultDAServerConfig.RPCPort, "HTTP-RPC server listening port") + f.Int("rpc-server-body-limit", DefaultDAServerConfig.RPCServerBodyLimit, "HTTP-RPC server maximum request body size in bytes; the default (0) uses geth's 5MB limit") genericconf.HTTPServerTimeoutConfigAddOptions("rpc-server-timeouts", f) f.Bool("enable-rest", DefaultDAServerConfig.EnableREST, "enable the REST server listening on rest-addr and rest-port") @@ -235,7 +238,7 @@ func startup() error { return errors.New("sequencer-inbox-address must be set to a valid L1 URL and contract address, or 'none'") } - daReader, daWriter, daHealthChecker, dasLifecycleManager, err := das.CreateDAComponentsForDaserver(ctx, &serverConfig.DataAvailability, l1Reader, seqInboxAddress) + daReader, daWriter, signatureVerifier, daHealthChecker, dasLifecycleManager, err := das.CreateDAComponentsForDaserver(ctx, &serverConfig.DataAvailability, l1Reader, seqInboxAddress) if err != nil { return err } @@ -250,7 +253,7 @@ func startup() error { if serverConfig.EnableRPC { log.Info("Starting HTTP-RPC server", "addr", serverConfig.RPCAddr, "port", serverConfig.RPCPort, "revision", vcsRevision, "vcs.time", vcsTime) - rpcServer, err = das.StartDASRPCServer(ctx, serverConfig.RPCAddr, serverConfig.RPCPort, serverConfig.RPCServerTimeouts, daReader, daWriter, daHealthChecker) + rpcServer, err = das.StartDASRPCServer(ctx, serverConfig.RPCAddr, serverConfig.RPCPort, serverConfig.RPCServerTimeouts, serverConfig.RPCServerBodyLimit, daReader, daWriter, daHealthChecker, signatureVerifier) if err != nil { return err } diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index 3f64a990ca..ba60cbbd4d 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -91,6 +91,7 @@ type ClientStoreConfig struct { SigningKey string `koanf:"signing-key"` SigningWallet string `koanf:"signing-wallet"` SigningWalletPassword string `koanf:"signing-wallet-password"` + MaxStoreChunkBodySize int `koanf:"max-store-chunk-body-size"` } func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { @@ -102,6 +103,7 @@ func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { f.String("signing-wallet", "", "wallet containing ecdsa key to sign the message with") f.String("signing-wallet-password", genericconf.PASSWORD_NOT_SET, "password to unlock the wallet, if not specified the user is prompted for the password") f.Duration("das-retention-period", 24*time.Hour, "The period which DASes are requested to retain the stored batches.") + f.Int("max-store-chunk-body-size", 512*1024, "The maximum HTTP POST body size for a chunked store request") k, err := confighelpers.BeginCommonParse(f, args) if err != nil { @@ -121,12 +123,7 @@ func startClientStore(args []string) error { return err } - client, err := das.NewDASRPCClient(config.URL) - if err != nil { - return err - } - - var dasClient das.DataAvailabilityServiceWriter = client + var signer signature.DataSignerFunc if config.SigningKey != "" { var privateKey *ecdsa.PrivateKey if config.SigningKey[:2] == "0x" { @@ -140,12 +137,7 @@ func startClientStore(args []string) error { return err } } - signer := signature.DataSignerFromPrivateKey(privateKey) - - dasClient, err = das.NewStoreSigningDAS(dasClient, signer) - if err != nil { - return err - } + signer = signature.DataSignerFromPrivateKey(privateKey) } else if config.SigningWallet != "" { walletConf := &genericconf.WalletConfig{ Pathname: config.SigningWallet, @@ -154,16 +146,17 @@ func startClientStore(args []string) error { Account: "", OnlyCreateKey: false, } - _, signer, err := util.OpenWallet("datool", walletConf, nil) - if err != nil { - return err - } - dasClient, err = das.NewStoreSigningDAS(dasClient, signer) + _, signer, err = util.OpenWallet("datool", walletConf, nil) if err != nil { return err } } + client, err := das.NewDASRPCClient(config.URL, signer, config.MaxStoreChunkBodySize) + if err != nil { + return err + } + ctx := context.Background() var cert *daprovider.DataAvailabilityCertificate @@ -173,9 +166,9 @@ func startClientStore(args []string) error { if err != nil { return err } - cert, err = dasClient.Store(ctx, message, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), []byte{}) + cert, err = client.Store(ctx, message, uint64(time.Now().Add(config.DASRetentionPeriod).Unix())) } else if len(config.Message) > 0 { - cert, err = dasClient.Store(ctx, []byte(config.Message), uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), []byte{}) + cert, err = client.Store(ctx, []byte(config.Message), uint64(time.Now().Add(config.DASRetentionPeriod).Unix())) } else { return errors.New("--message or --random-message-size must be specified") } @@ -323,6 +316,10 @@ func parseDumpKeyset(args []string) (*DumpKeysetConfig, error) { return nil, err } + if err = das.FixKeysetCLIParsing("keyset.backends", k); err != nil { + return nil, err + } + var config DumpKeysetConfig if err := confighelpers.EndCommonParse(k, &config); err != nil { return nil, err @@ -341,7 +338,7 @@ func parseDumpKeyset(args []string) (*DumpKeysetConfig, error) { if config.Keyset.AssumedHonest == 0 { return nil, errors.New("--keyset.assumed-honest must be set") } - if config.Keyset.Backends == "" { + if config.Keyset.Backends == nil { return nil, errors.New("--keyset.backends must be set") } @@ -361,7 +358,7 @@ func dumpKeyset(args []string) error { return err } - services, err := das.ParseServices(config.Keyset) + services, err := das.ParseServices(config.Keyset, nil) if err != nil { return err } diff --git a/cmd/genericconf/server.go b/cmd/genericconf/server.go index 7550791d6d..9b8acd5f71 100644 --- a/cmd/genericconf/server.go +++ b/cmd/genericconf/server.go @@ -48,6 +48,8 @@ var HTTPServerTimeoutConfigDefault = HTTPServerTimeoutConfig{ IdleTimeout: 120 * time.Second, } +const HTTPServerBodyLimitDefault = 0 // Use default from go-ethereum + func (c HTTPConfig) Apply(stackConf *node.Config) { stackConf.HTTPHost = c.Addr stackConf.HTTPPort = c.Port diff --git a/cmd/nitro/config_test.go b/cmd/nitro/config_test.go index ea04d4eb1f..f94f941e0b 100644 --- a/cmd/nitro/config_test.go +++ b/cmd/nitro/config_test.go @@ -16,6 +16,7 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/cmd/util/confighelpers" + "github.com/offchainlabs/nitro/das" "github.com/offchainlabs/nitro/util/colors" "github.com/offchainlabs/nitro/util/testhelpers" @@ -28,6 +29,8 @@ func TestEmptyCliConfig(t *testing.T) { NodeConfigAddOptions(f) k, err := confighelpers.BeginCommonParse(f, []string{}) Require(t, err) + err = das.FixKeysetCLIParsing("node.data-availability.rpc-aggregator.backends", k) + Require(t, err) var emptyCliNodeConfig NodeConfig err = confighelpers.EndCommonParse(k, &emptyCliNodeConfig) Require(t, err) @@ -39,26 +42,26 @@ func TestEmptyCliConfig(t *testing.T) { } func TestSeqConfig(t *testing.T) { - args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.sequencer --execution.sequencer.enable --node.feed.output.enable --node.feed.output.port 9642", " ") - _, _, _, err := ParseNode(context.Background(), args) + args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --node.batch-poster.parent-chain-wallet.pathname /l1keystore --node.batch-poster.parent-chain-wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.sequencer --execution.sequencer.enable --node.feed.output.enable --node.feed.output.port 9642", " ") + _, _, err := ParseNode(context.Background(), args) Require(t, err) } func TestUnsafeStakerConfig(t *testing.T) { - args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.staker.enable --node.staker.strategy MakeNodes --node.staker.staker-interval 10s --execution.forwarding-target null --node.staker.dangerous.without-block-validator", " ") - _, _, _, err := ParseNode(context.Background(), args) + args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --node.staker.parent-chain-wallet.pathname /l1keystore --node.staker.parent-chain-wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.staker.enable --node.staker.strategy MakeNodes --node.staker.staker-interval 10s --execution.forwarding-target null --node.staker.dangerous.without-block-validator", " ") + _, _, err := ParseNode(context.Background(), args) Require(t, err) } func TestValidatorConfig(t *testing.T) { - args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.staker.enable --node.staker.strategy MakeNodes --node.staker.staker-interval 10s --execution.forwarding-target null", " ") - _, _, _, err := ParseNode(context.Background(), args) + args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --node.staker.parent-chain-wallet.pathname /l1keystore --node.staker.parent-chain-wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.staker.enable --node.staker.strategy MakeNodes --node.staker.staker-interval 10s --execution.forwarding-target null", " ") + _, _, err := ParseNode(context.Background(), args) Require(t, err) } func TestAggregatorConfig(t *testing.T) { - args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.sequencer --execution.sequencer.enable --node.feed.output.enable --node.feed.output.port 9642 --node.data-availability.enable --node.data-availability.rpc-aggregator.backends {[\"url\":\"http://localhost:8547\",\"pubkey\":\"abc==\",\"signerMask\":0x1]}", " ") - _, _, _, err := ParseNode(context.Background(), args) + args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --node.batch-poster.parent-chain-wallet.pathname /l1keystore --node.batch-poster.parent-chain-wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.sequencer --execution.sequencer.enable --node.feed.output.enable --node.feed.output.port 9642 --node.data-availability.enable --node.data-availability.rpc-aggregator.backends [{\"url\":\"http://localhost:8547\",\"pubkey\":\"abc==\"}]", " ") + _, _, err := ParseNode(context.Background(), args) Require(t, err) } @@ -120,13 +123,13 @@ func TestLiveNodeConfig(t *testing.T) { jsonConfig := "{\"chain\":{\"id\":421613}}" Require(t, WriteToConfigFile(configFile, jsonConfig)) - args := strings.Split("--file-logging.enable=false --persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.sequencer --execution.sequencer.enable --node.feed.output.enable --node.feed.output.port 9642", " ") + args := strings.Split("--file-logging.enable=false --persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --node.batch-poster.parent-chain-wallet.pathname /l1keystore --node.batch-poster.parent-chain-wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.sequencer --execution.sequencer.enable --node.feed.output.enable --node.feed.output.port 9642", " ") args = append(args, []string{"--conf.file", configFile}...) - config, _, _, err := ParseNode(context.Background(), args) + config, _, err := ParseNode(context.Background(), args) Require(t, err) liveConfig := genericconf.NewLiveConfig[*NodeConfig](args, config, func(ctx context.Context, args []string) (*NodeConfig, error) { - nodeConfig, _, _, err := ParseNode(ctx, args) + nodeConfig, _, err := ParseNode(ctx, args) return nodeConfig, err }) @@ -201,13 +204,13 @@ func TestPeriodicReloadOfLiveNodeConfig(t *testing.T) { jsonConfig := "{\"conf\":{\"reload-interval\":\"20ms\"}}" Require(t, WriteToConfigFile(configFile, jsonConfig)) - args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.sequencer --execution.sequencer.enable --node.feed.output.enable --node.feed.output.port 9642", " ") + args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --node.batch-poster.parent-chain-wallet.pathname /l1keystore --node.batch-poster.parent-chain-wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.sequencer --execution.sequencer.enable --node.feed.output.enable --node.feed.output.port 9642", " ") args = append(args, []string{"--conf.file", configFile}...) - config, _, _, err := ParseNode(context.Background(), args) + config, _, err := ParseNode(context.Background(), args) Require(t, err) liveConfig := genericconf.NewLiveConfig[*NodeConfig](args, config, func(ctx context.Context, args []string) (*NodeConfig, error) { - nodeConfig, _, _, err := ParseNode(ctx, args) + nodeConfig, _, err := ParseNode(ctx, args) return nodeConfig, err }) liveConfig.Start(ctx) diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index a45ec054a1..97678a7d23 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -5,11 +5,19 @@ package main import ( "context" + "crypto/sha256" + "encoding/hex" "encoding/json" "errors" "fmt" + "io" "math/big" + "net/http" + "net/url" "os" + "path" + "path/filepath" + "regexp" "runtime" "strings" "sync" @@ -40,6 +48,8 @@ import ( "github.com/offchainlabs/nitro/util/arbmath" ) +var notFoundError = errors.New("file not found") + func downloadInit(ctx context.Context, initConfig *conf.InitConfig) (string, error) { if initConfig.Url == "" { return "", nil @@ -66,18 +76,43 @@ func downloadInit(ctx context.Context, initConfig *conf.InitConfig) (string, err } return initFile, nil } - grabclient := grab.NewClient() log.Info("Downloading initial database", "url", initConfig.Url) - fmt.Println() + if !initConfig.ValidateChecksum { + file, err := downloadFile(ctx, initConfig, initConfig.Url, nil) + if err != nil && errors.Is(err, notFoundError) { + return downloadInitInParts(ctx, initConfig) + } + return file, err + } + checksum, err := fetchChecksum(ctx, initConfig.Url+".sha256") + if err != nil { + if errors.Is(err, notFoundError) { + return downloadInitInParts(ctx, initConfig) + } + return "", fmt.Errorf("error fetching checksum: %w", err) + } + file, err := downloadFile(ctx, initConfig, initConfig.Url, checksum) + if err != nil && errors.Is(err, notFoundError) { + return "", fmt.Errorf("file not found but checksum exists") + } + return file, err +} + +func downloadFile(ctx context.Context, initConfig *conf.InitConfig, url string, checksum []byte) (string, error) { + grabclient := grab.NewClient() printTicker := time.NewTicker(time.Second) defer printTicker.Stop() attempt := 0 for { attempt++ - req, err := grab.NewRequest(initConfig.DownloadPath, initConfig.Url) + req, err := grab.NewRequest(initConfig.DownloadPath, url) if err != nil { panic(err) } + if checksum != nil { + const deleteOnError = true + req.SetChecksum(sha256.New(), checksum, deleteOnError) + } resp := grabclient.Do(req.WithContext(ctx)) firstPrintTime := time.Now().Add(time.Second * 2) updateLoop: @@ -102,6 +137,9 @@ func downloadInit(ctx context.Context, initConfig *conf.InitConfig) (string, err } case <-resp.Done: if err := resp.Err(); err != nil { + if resp.HTTPResponse.StatusCode == http.StatusNotFound { + return "", notFoundError + } fmt.Printf("\n attempt %d failed: %v\n", attempt, err) break updateLoop } @@ -121,6 +159,162 @@ func downloadInit(ctx context.Context, initConfig *conf.InitConfig) (string, err } } +// httpGet performs a GET request to the specified URL +func httpGet(ctx context.Context, url string) ([]byte, error) { + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return nil, fmt.Errorf("error creating request: %w", err) + } + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("error making GET request: %w", err) + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotFound { + return nil, notFoundError + } else if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected status code: %v", resp.Status) + } + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error reading response body: %w", err) + } + return body, nil +} + +// fetchChecksum performs a GET request to the specified URL and returns the checksum +func fetchChecksum(ctx context.Context, url string) ([]byte, error) { + body, err := httpGet(ctx, url) + if err != nil { + return nil, err + } + checksumStr := strings.TrimSpace(string(body)) + checksum, err := hex.DecodeString(checksumStr) + if err != nil { + return nil, fmt.Errorf("error decoding checksum: %w", err) + } + if len(checksum) != sha256.Size { + return nil, fmt.Errorf("invalid checksum length") + } + return checksum, nil +} + +func downloadInitInParts(ctx context.Context, initConfig *conf.InitConfig) (string, error) { + log.Info("File not found; trying to download database in parts") + fileInfo, err := os.Stat(initConfig.DownloadPath) + if err != nil || !fileInfo.IsDir() { + return "", fmt.Errorf("download path must be a directory: %v", initConfig.DownloadPath) + } + archiveUrl, err := url.Parse(initConfig.Url) + if err != nil { + return "", fmt.Errorf("failed to parse init url \"%s\": %w", initConfig.Url, err) + } + + // Get parts from manifest file + manifest, err := httpGet(ctx, archiveUrl.String()+".manifest.txt") + if err != nil { + return "", fmt.Errorf("failed to get manifest file: %w", err) + } + partNames := []string{} + checksums := [][]byte{} + lines := strings.Split(strings.TrimSpace(string(manifest)), "\n") + for _, line := range lines { + fields := strings.Fields(line) + if len(fields) != 2 { + return "", fmt.Errorf("manifest file in wrong format") + } + checksum, err := hex.DecodeString(fields[0]) + if err != nil { + return "", fmt.Errorf("failed decoding checksum in manifest file: %w", err) + } + checksums = append(checksums, checksum) + partNames = append(partNames, fields[1]) + } + + partFiles := []string{} + defer func() { + // remove all temporary files. + for _, part := range partFiles { + err := os.Remove(part) + if err != nil { + log.Warn("Failed to remove temporary file", "file", part) + } + } + }() + + // Download parts + for i, partName := range partNames { + log.Info("Downloading database part", "part", partName) + partUrl := archiveUrl.JoinPath("..", partName).String() + var checksum []byte + if initConfig.ValidateChecksum { + checksum = checksums[i] + } + partFile, err := downloadFile(ctx, initConfig, partUrl, checksum) + if err != nil { + return "", fmt.Errorf("error downloading part \"%s\": %w", partName, err) + } + partFiles = append(partFiles, partFile) + } + archivePath := path.Join(initConfig.DownloadPath, path.Base(archiveUrl.Path)) + return joinArchive(partFiles, archivePath) +} + +// joinArchive joins the archive parts into a single file and return its path. +func joinArchive(parts []string, archivePath string) (string, error) { + if len(parts) == 0 { + return "", fmt.Errorf("no database parts found") + } + archive, err := os.Create(archivePath) + if err != nil { + return "", fmt.Errorf("failed to create archive: %w", err) + } + defer archive.Close() + for _, part := range parts { + partFile, err := os.Open(part) + if err != nil { + return "", fmt.Errorf("failed to open part file %s: %w", part, err) + } + defer partFile.Close() + _, err = io.Copy(archive, partFile) + if err != nil { + return "", fmt.Errorf("failed to copy part file %s: %w", part, err) + } + log.Info("Joined database part into archive", "part", part) + } + log.Info("Successfully joined parts into archive", "archive", archivePath) + return archivePath, nil +} + +// setLatestSnapshotUrl sets the Url in initConfig to the latest one available on the mirror. +func setLatestSnapshotUrl(ctx context.Context, initConfig *conf.InitConfig, chain string) error { + if initConfig.Latest == "" { + return nil + } + if initConfig.Url != "" { + return fmt.Errorf("cannot set latest url if url is already set") + } + baseUrl, err := url.Parse(initConfig.LatestBase) + if err != nil { + return fmt.Errorf("failed to parse latest mirror \"%s\": %w", initConfig.LatestBase, err) + } + latestFileUrl := baseUrl.JoinPath(chain, "latest-"+initConfig.Latest+".txt").String() + latestFileBytes, err := httpGet(ctx, latestFileUrl) + if err != nil { + return fmt.Errorf("failed to get latest file at \"%s\": %w", latestFileUrl, err) + } + latestFile := strings.TrimSpace(string(latestFileBytes)) + containsScheme := regexp.MustCompile("https?://") + if containsScheme.MatchString(latestFile) { + initConfig.Url = latestFile + } else { + initConfig.Url = baseUrl.JoinPath(latestFile).String() + } + log.Info("Set latest snapshot url", "url", initConfig.Url) + return nil +} + func validateBlockChain(blockChain *core.BlockChain, chainConfig *params.ChainConfig) error { statedb, err := blockChain.State() if err != nil { @@ -170,19 +364,66 @@ func validateBlockChain(blockChain *core.BlockChain, chainConfig *params.ChainCo return nil } -func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeConfig, chainId *big.Int, cacheConfig *core.CacheConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses) (ethdb.Database, *core.BlockChain, error) { +func dirExists(path string) bool { + info, err := os.Stat(path) + if os.IsNotExist(err) { + return false + } + return info.IsDir() +} + +func checkEmptyDatabaseDir(dir string, force bool) error { + entries, err := os.ReadDir(dir) + if err != nil { + return fmt.Errorf("failed to open database dir %s: %w", dir, err) + } + unexpectedFiles := []string{} + allowedFiles := map[string]bool{ + "LOCK": true, "classic-msg": true, "l2chaindata": true, + } + for _, entry := range entries { + if !allowedFiles[entry.Name()] { + unexpectedFiles = append(unexpectedFiles, entry.Name()) + } + } + if len(unexpectedFiles) > 0 { + if force { + return fmt.Errorf("trying to overwrite old database directory '%s' (delete the database directory and try again)", dir) + } + firstThreeFilenames := strings.Join(unexpectedFiles[:min(len(unexpectedFiles), 3)], ", ") + return fmt.Errorf("found %d unexpected files in database directory, including: %s", len(unexpectedFiles), firstThreeFilenames) + } + return nil +} + +var pebbleNotExistErrorRegex = regexp.MustCompile("pebble: database .* does not exist") + +func isPebbleNotExistError(err error) bool { + return pebbleNotExistErrorRegex.MatchString(err.Error()) +} + +func isLeveldbNotExistError(err error) bool { + return os.IsNotExist(err) +} + +func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeConfig, chainId *big.Int, cacheConfig *core.CacheConfig, persistentConfig *conf.PersistentConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses) (ethdb.Database, *core.BlockChain, error) { if !config.Init.Force { - if readOnlyDb, err := stack.OpenDatabaseWithFreezer("l2chaindata", 0, 0, "", "l2chaindata/", true); err == nil { + if readOnlyDb, err := stack.OpenDatabaseWithFreezerWithExtraOptions("l2chaindata", 0, 0, "", "l2chaindata/", true, persistentConfig.Pebble.ExtraOptions("l2chaindata")); err == nil { if chainConfig := gethexec.TryReadStoredChainConfig(readOnlyDb); chainConfig != nil { readOnlyDb.Close() if !arbmath.BigEquals(chainConfig.ChainID, chainId) { return nil, nil, fmt.Errorf("database has chain ID %v but config has chain ID %v (are you sure this database is for the right chain?)", chainConfig.ChainID, chainId) } - chainDb, err := stack.OpenDatabaseWithFreezer("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false) + chainData, err := stack.OpenDatabaseWithFreezerWithExtraOptions("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false, persistentConfig.Pebble.ExtraOptions("l2chaindata")) if err != nil { - return chainDb, nil, err + return nil, nil, err + } + wasmDb, err := stack.OpenDatabaseWithExtraOptions("wasm", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, "wasm/", false, persistentConfig.Pebble.ExtraOptions("wasm")) + if err != nil { + return nil, nil, err } - err = pruning.PruneChainDb(ctx, chainDb, stack, &config.Init, cacheConfig, l1Client, rollupAddrs, config.Node.ValidatorRequired()) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb, 1) + err = pruning.PruneChainDb(ctx, chainDb, stack, &config.Init, cacheConfig, persistentConfig, l1Client, rollupAddrs, config.Node.ValidatorRequired()) if err != nil { return chainDb, nil, fmt.Errorf("error pruning: %w", err) } @@ -200,13 +441,66 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo return chainDb, l2BlockChain, fmt.Errorf("failed to recreate missing states: %w", err) } } - + latestBlock := l2BlockChain.CurrentBlock() + if latestBlock == nil || latestBlock.Number.Uint64() <= chainConfig.ArbitrumChainParams.GenesisBlockNum || + types.DeserializeHeaderExtraInformation(latestBlock).ArbOSFormatVersion < params.ArbosVersion_Stylus { + // If there is only genesis block or no blocks in the blockchain, set Rebuilding of wasm store to Done + // If Stylus upgrade hasn't yet happened, skipping rebuilding of wasm store + log.Info("Setting rebuilding of wasm store to done") + if err = gethexec.WriteToKeyValueStore(wasmDb, gethexec.RebuildingPositionKey, gethexec.RebuildingDone); err != nil { + return nil, nil, fmt.Errorf("unable to set rebuilding status of wasm store to done: %w", err) + } + } else if config.Init.RebuildLocalWasm { + position, err := gethexec.ReadFromKeyValueStore[common.Hash](wasmDb, gethexec.RebuildingPositionKey) + if err != nil { + log.Info("Unable to get codehash position in rebuilding of wasm store, its possible it isnt initialized yet, so initializing it and starting rebuilding", "err", err) + if err := gethexec.WriteToKeyValueStore(wasmDb, gethexec.RebuildingPositionKey, common.Hash{}); err != nil { + return nil, nil, fmt.Errorf("unable to initialize codehash position in rebuilding of wasm store to beginning: %w", err) + } + } + if position != gethexec.RebuildingDone { + startBlockHash, err := gethexec.ReadFromKeyValueStore[common.Hash](wasmDb, gethexec.RebuildingStartBlockHashKey) + if err != nil { + log.Info("Unable to get start block hash in rebuilding of wasm store, its possible it isnt initialized yet, so initializing it to latest block hash", "err", err) + if err := gethexec.WriteToKeyValueStore(wasmDb, gethexec.RebuildingStartBlockHashKey, latestBlock.Hash()); err != nil { + return nil, nil, fmt.Errorf("unable to initialize start block hash in rebuilding of wasm store to latest block hash: %w", err) + } + startBlockHash = latestBlock.Hash() + } + log.Info("Starting or continuing rebuilding of wasm store", "codeHash", position, "startBlockHash", startBlockHash) + if err := gethexec.RebuildWasmStore(ctx, wasmDb, chainDb, config.Execution.RPC.MaxRecreateStateDepth, l2BlockChain, position, startBlockHash); err != nil { + return nil, nil, fmt.Errorf("error rebuilding of wasm store: %w", err) + } + } + } return chainDb, l2BlockChain, nil } readOnlyDb.Close() + } else if !isLeveldbNotExistError(err) && !isPebbleNotExistError(err) { + // we only want to continue if the error is pebble or leveldb not exist error + return nil, nil, fmt.Errorf("Failed to open database: %w", err) } } + // Check if database was misplaced in parent dir + const errorFmt = "database was not found in %s, but it was found in %s (have you placed the database in the wrong directory?)" + parentDir := filepath.Dir(stack.InstanceDir()) + if dirExists(path.Join(parentDir, "l2chaindata")) { + return nil, nil, fmt.Errorf(errorFmt, stack.InstanceDir(), parentDir) + } + grandParentDir := filepath.Dir(parentDir) + if dirExists(path.Join(grandParentDir, "l2chaindata")) { + return nil, nil, fmt.Errorf(errorFmt, stack.InstanceDir(), grandParentDir) + } + + if err := checkEmptyDatabaseDir(stack.InstanceDir(), config.Init.Force); err != nil { + return nil, nil, err + } + + if err := setLatestSnapshotUrl(ctx, &config.Init, config.Chain.Name); err != nil { + return nil, nil, err + } + initFile, err := downloadInit(ctx, &config.Init) if err != nil { return nil, nil, err @@ -230,9 +524,21 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo var initDataReader statetransfer.InitDataReader = nil - chainDb, err := stack.OpenDatabaseWithFreezer("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false) + chainData, err := stack.OpenDatabaseWithFreezerWithExtraOptions("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "l2chaindata/", false, persistentConfig.Pebble.ExtraOptions("l2chaindata")) + if err != nil { + return nil, nil, err + } + wasmDb, err := stack.OpenDatabaseWithExtraOptions("wasm", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, "wasm/", false, persistentConfig.Pebble.ExtraOptions("wasm")) + if err != nil { + return nil, nil, err + } + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb, 1) + + // Rebuilding wasm store is not required when just starting out + err = gethexec.WriteToKeyValueStore(wasmDb, gethexec.RebuildingPositionKey, gethexec.RebuildingDone) + log.Info("Setting codehash position in rebuilding of wasm store to done") if err != nil { - return chainDb, nil, err + return nil, nil, fmt.Errorf("unable to set codehash position in rebuilding of wasm store to done: %w", err) } if config.Init.ImportFile != "" { @@ -378,7 +684,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo return chainDb, l2BlockChain, err } - err = pruning.PruneChainDb(ctx, chainDb, stack, &config.Init, cacheConfig, l1Client, rollupAddrs, config.Node.ValidatorRequired()) + err = pruning.PruneChainDb(ctx, chainDb, stack, &config.Init, cacheConfig, persistentConfig, l1Client, rollupAddrs, config.Node.ValidatorRequired()) if err != nil { return chainDb, nil, fmt.Errorf("error pruning: %w", err) } diff --git a/cmd/nitro/init_test.go b/cmd/nitro/init_test.go new file mode 100644 index 0000000000..6c363972e9 --- /dev/null +++ b/cmd/nitro/init_test.go @@ -0,0 +1,363 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package main + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "net" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/node" + "github.com/offchainlabs/nitro/cmd/conf" + "github.com/offchainlabs/nitro/util/testhelpers" +) + +const ( + archiveName = "random_data.tar.gz" + numParts = 3 + partSize = 1024 * 1024 + dataSize = numParts * partSize + filePerm = 0600 + dirPerm = 0700 +) + +func TestDownloadInitWithoutChecksum(t *testing.T) { + // Create archive with random data + serverDir := t.TempDir() + data := testhelpers.RandomSlice(dataSize) + + // Write archive file + archiveFile := fmt.Sprintf("%s/%s", serverDir, archiveName) + err := os.WriteFile(archiveFile, data, filePerm) + Require(t, err, "failed to write archive") + + // Start HTTP server + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + addr := startFileServer(t, ctx, serverDir) + + // Download file + initConfig := conf.InitConfigDefault + initConfig.Url = fmt.Sprintf("http://%s/%s", addr, archiveName) + initConfig.DownloadPath = t.TempDir() + initConfig.ValidateChecksum = false + receivedArchive, err := downloadInit(ctx, &initConfig) + Require(t, err, "failed to download") + + // Check archive contents + receivedData, err := os.ReadFile(receivedArchive) + Require(t, err, "failed to read received archive") + if !bytes.Equal(receivedData, data) { + t.Error("downloaded archive is different from generated one") + } +} + +func TestDownloadInitWithChecksum(t *testing.T) { + // Create archive with random data + serverDir := t.TempDir() + data := testhelpers.RandomSlice(dataSize) + checksumBytes := sha256.Sum256(data) + checksum := hex.EncodeToString(checksumBytes[:]) + + // Write archive file + archiveFile := fmt.Sprintf("%s/%s", serverDir, archiveName) + err := os.WriteFile(archiveFile, data, filePerm) + Require(t, err, "failed to write archive") + + // Write checksum file + checksumFile := archiveFile + ".sha256" + err = os.WriteFile(checksumFile, []byte(checksum), filePerm) + Require(t, err, "failed to write checksum") + + // Start HTTP server + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + addr := startFileServer(t, ctx, serverDir) + + // Download file + initConfig := conf.InitConfigDefault + initConfig.Url = fmt.Sprintf("http://%s/%s", addr, archiveName) + initConfig.DownloadPath = t.TempDir() + receivedArchive, err := downloadInit(ctx, &initConfig) + Require(t, err, "failed to download") + + // Check archive contents + receivedData, err := os.ReadFile(receivedArchive) + Require(t, err, "failed to read received archive") + if !bytes.Equal(receivedData, data) { + t.Error("downloaded archive is different from generated one") + } +} + +func TestDownloadInitInPartsWithoutChecksum(t *testing.T) { + // Create parts with random data + serverDir := t.TempDir() + data := testhelpers.RandomSlice(dataSize) + manifest := bytes.NewBuffer(nil) + for i := 0; i < numParts; i++ { + partData := data[partSize*i : partSize*(i+1)] + partName := fmt.Sprintf("%s.part%d", archiveName, i) + fmt.Fprintf(manifest, "%s %s\n", strings.Repeat("0", 64), partName) + err := os.WriteFile(path.Join(serverDir, partName), partData, filePerm) + Require(t, err, "failed to write part") + } + manifestFile := fmt.Sprintf("%s/%s.manifest.txt", serverDir, archiveName) + err := os.WriteFile(manifestFile, manifest.Bytes(), filePerm) + Require(t, err, "failed to write manifest file") + + // Start HTTP server + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + addr := startFileServer(t, ctx, serverDir) + + // Download file + initConfig := conf.InitConfigDefault + initConfig.Url = fmt.Sprintf("http://%s/%s", addr, archiveName) + initConfig.DownloadPath = t.TempDir() + initConfig.ValidateChecksum = false + receivedArchive, err := downloadInit(ctx, &initConfig) + Require(t, err, "failed to download") + + // check database contents + receivedData, err := os.ReadFile(receivedArchive) + Require(t, err, "failed to read received archive") + if !bytes.Equal(receivedData, data) { + t.Error("downloaded archive is different from generated one") + } + + // Check if the function deleted the temporary files + entries, err := os.ReadDir(initConfig.DownloadPath) + Require(t, err, "failed to read temp dir") + if len(entries) != 1 { + t.Error("download function did not delete temp files") + } +} + +func TestDownloadInitInPartsWithChecksum(t *testing.T) { + // Create parts with random data + serverDir := t.TempDir() + data := testhelpers.RandomSlice(dataSize) + manifest := bytes.NewBuffer(nil) + for i := 0; i < numParts; i++ { + // Create part and checksum + partData := data[partSize*i : partSize*(i+1)] + partName := fmt.Sprintf("%s.part%d", archiveName, i) + checksumBytes := sha256.Sum256(partData) + checksum := hex.EncodeToString(checksumBytes[:]) + fmt.Fprintf(manifest, "%s %s\n", checksum, partName) + // Write part file + err := os.WriteFile(path.Join(serverDir, partName), partData, filePerm) + Require(t, err, "failed to write part") + } + manifestFile := fmt.Sprintf("%s/%s.manifest.txt", serverDir, archiveName) + err := os.WriteFile(manifestFile, manifest.Bytes(), filePerm) + Require(t, err, "failed to write manifest file") + + // Start HTTP server + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + addr := startFileServer(t, ctx, serverDir) + + // Download file + initConfig := conf.InitConfigDefault + initConfig.Url = fmt.Sprintf("http://%s/%s", addr, archiveName) + initConfig.DownloadPath = t.TempDir() + receivedArchive, err := downloadInit(ctx, &initConfig) + Require(t, err, "failed to download") + + // check database contents + receivedData, err := os.ReadFile(receivedArchive) + Require(t, err, "failed to read received archive") + if !bytes.Equal(receivedData, data) { + t.Error("downloaded archive is different from generated one") + } + + // Check if the function deleted the temporary files + entries, err := os.ReadDir(initConfig.DownloadPath) + Require(t, err, "failed to read temp dir") + if len(entries) != 1 { + t.Error("download function did not delete temp files") + } +} + +func TestSetLatestSnapshotUrl(t *testing.T) { + const ( + chain = "arb1" + snapshotKind = "archive" + latestFile = "latest-" + snapshotKind + ".txt" + ) + + testCases := []struct { + name string + latestContents string + wantUrl func(string) string + }{ + { + name: "latest file with path", + latestContents: "/arb1/2024/21/archive.tar.gz", + wantUrl: func(serverAddr string) string { return serverAddr + "/arb1/2024/21/archive.tar.gz" }, + }, + { + name: "latest file with rootless path", + latestContents: "arb1/2024/21/archive.tar.gz", + wantUrl: func(serverAddr string) string { return serverAddr + "/arb1/2024/21/archive.tar.gz" }, + }, + { + name: "latest file with http url", + latestContents: "http://some.domain.com/arb1/2024/21/archive.tar.gz", + wantUrl: func(serverAddr string) string { return "http://some.domain.com/arb1/2024/21/archive.tar.gz" }, + }, + { + name: "latest file with https url", + latestContents: "https://some.domain.com/arb1/2024/21/archive.tar.gz", + wantUrl: func(serverAddr string) string { return "https://some.domain.com/arb1/2024/21/archive.tar.gz" }, + }, + } + + for _, testCase := range testCases { + t.Log("running test case", testCase.name) + + // Create latest file + serverDir := t.TempDir() + err := os.Mkdir(filepath.Join(serverDir, chain), dirPerm) + Require(t, err) + err = os.WriteFile(filepath.Join(serverDir, chain, latestFile), []byte(testCase.latestContents), filePerm) + Require(t, err) + + // Start HTTP server + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + addr := "http://" + startFileServer(t, ctx, serverDir) + + // Set latest snapshot URL + initConfig := conf.InitConfigDefault + initConfig.Latest = snapshotKind + initConfig.LatestBase = addr + err = setLatestSnapshotUrl(ctx, &initConfig, chain) + Require(t, err) + + // Check url + want := testCase.wantUrl(addr) + if initConfig.Url != want { + t.Fatalf("initConfig.Url = %s; want: %s", initConfig.Url, want) + } + } +} + +func startFileServer(t *testing.T, ctx context.Context, dir string) string { + t.Helper() + ln, err := net.Listen("tcp", "127.0.0.1:0") + Require(t, err, "failed to listen") + addr := ln.Addr().String() + server := &http.Server{ + Addr: addr, + Handler: http.FileServer(http.Dir(dir)), + ReadHeaderTimeout: time.Second, + } + go func() { + err := server.Serve(ln) + if err != nil && !errors.Is(err, http.ErrServerClosed) { + t.Error("failed to shutdown server") + } + }() + go func() { + <-ctx.Done() + err := server.Shutdown(ctx) + Require(t, err, "failed to shutdown server") + }() + return addr +} + +func testIsNotExistError(t *testing.T, dbEngine string, isNotExist func(error) bool) { + stackConf := node.DefaultConfig + stackConf.DataDir = t.TempDir() + stackConf.DBEngine = dbEngine + stack, err := node.New(&stackConf) + if err != nil { + t.Fatalf("Failed to created test stack: %v", err) + } + defer stack.Close() + readonly := true + _, err = stack.OpenDatabaseWithExtraOptions("test", 16, 16, "", readonly, nil) + if err == nil { + t.Fatal("Opening non-existent database did not fail") + } + if !isNotExist(err) { + t.Fatalf("Failed to classify error as not exist error - internal implementation of OpenDatabaseWithExtraOptions might have changed, err: %v", err) + } + err = errors.New("some other error") + if isNotExist(err) { + t.Fatalf("Classified other error as not exist, err: %v", err) + } +} + +func TestIsNotExistError(t *testing.T) { + t.Run("TestIsPebbleNotExistError", func(t *testing.T) { + testIsNotExistError(t, "pebble", isPebbleNotExistError) + }) + t.Run("TestIsLeveldbNotExistError", func(t *testing.T) { + testIsNotExistError(t, "leveldb", isLeveldbNotExistError) + }) +} + +func TestEmptyDatabaseDir(t *testing.T) { + testCases := []struct { + name string + files []string + force bool + wantErr string + }{ + { + name: "succeed with empty dir", + }, + { + name: "succeed with expected files", + files: []string{"LOCK", "classic-msg", "l2chaindata"}, + }, + { + name: "fail with unexpected files", + files: []string{"LOCK", "a", "b", "c", "d"}, + wantErr: "found 4 unexpected files in database directory, including: a, b, c", + }, + { + name: "fail with unexpected files when forcing", + files: []string{"LOCK", "a", "b", "c", "d"}, + force: true, + wantErr: "trying to overwrite old database directory", + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + dir := t.TempDir() + for _, file := range tc.files { + const filePerm = 0600 + err := os.WriteFile(path.Join(dir, file), []byte{1, 2, 3}, filePerm) + Require(t, err) + } + err := checkEmptyDatabaseDir(dir, tc.force) + if tc.wantErr == "" { + if err != nil { + t.Errorf("expected nil error, got %q", err) + } + } else { + if err == nil { + t.Error("expected error, got nil") + } else if !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("expected %q, got %q", tc.wantErr, err) + } + } + }) + } +} diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 9280c3af02..1c4ad80186 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -6,6 +6,7 @@ package main import ( "context" "crypto/ecdsa" + "encoding/hex" "errors" "fmt" "io" @@ -50,6 +51,7 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/cmd/util" "github.com/offchainlabs/nitro/cmd/util/confighelpers" + "github.com/offchainlabs/nitro/das" "github.com/offchainlabs/nitro/execution/gethexec" _ "github.com/offchainlabs/nitro/execution/nodeInterface" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -59,6 +61,7 @@ import ( "github.com/offchainlabs/nitro/staker/validatorwallet" "github.com/offchainlabs/nitro/util/colors" "github.com/offchainlabs/nitro/util/headerreader" + "github.com/offchainlabs/nitro/util/iostat" "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/validator/server_common" @@ -164,7 +167,7 @@ func mainImpl() int { defer cancelFunc() args := os.Args[1:] - nodeConfig, l1Wallet, l2DevWallet, err := ParseNode(ctx, args) + nodeConfig, l2DevWallet, err := ParseNode(ctx, args) if err != nil { confighelpers.PrintErrorAndExit(err, printSampleUsage) } @@ -231,7 +234,6 @@ func mainImpl() int { log.Error("consensus and execution must agree if sequencing is enabled or not", "Execution.Sequencer.Enable", nodeConfig.Execution.Sequencer.Enable, "Node.Sequencer", nodeConfig.Node.Sequencer) } - var l1TransactionOpts *bind.TransactOpts var dataSigner signature.DataSignerFunc var l1TransactionOptsValidator *bind.TransactOpts var l1TransactionOptsBatchPoster *bind.TransactOpts @@ -242,7 +244,6 @@ func mainImpl() int { validatorNeedsKey := nodeConfig.Node.Staker.OnlyCreateWalletContract || (nodeConfig.Node.Staker.Enable && !strings.EqualFold(nodeConfig.Node.Staker.Strategy, "watchtower") && nodeConfig.Node.Staker.DataPoster.ExternalSigner.URL == "") - l1Wallet.ResolveDirectoryNames(nodeConfig.Persistent.Chain) defaultL1WalletConfig := conf.DefaultL1WalletConfig defaultL1WalletConfig.ResolveDirectoryNames(nodeConfig.Persistent.Chain) @@ -254,42 +255,24 @@ func mainImpl() int { defaultBatchPosterL1WalletConfig := arbnode.DefaultBatchPosterL1WalletConfig defaultBatchPosterL1WalletConfig.ResolveDirectoryNames(nodeConfig.Persistent.Chain) - if nodeConfig.Node.Staker.ParentChainWallet == defaultValidatorL1WalletConfig && nodeConfig.Node.BatchPoster.ParentChainWallet == defaultBatchPosterL1WalletConfig { - if sequencerNeedsKey || validatorNeedsKey || l1Wallet.OnlyCreateKey { - l1TransactionOpts, dataSigner, err = util.OpenWallet("l1", l1Wallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ID)) - if err != nil { - flag.Usage() - log.Crit("error opening parent chain wallet", "path", l1Wallet.Pathname, "account", l1Wallet.Account, "err", err) - } - if l1Wallet.OnlyCreateKey { - return 0 - } - l1TransactionOptsBatchPoster = l1TransactionOpts - l1TransactionOptsValidator = l1TransactionOpts + if sequencerNeedsKey || nodeConfig.Node.BatchPoster.ParentChainWallet.OnlyCreateKey { + l1TransactionOptsBatchPoster, dataSigner, err = util.OpenWallet("l1-batch-poster", &nodeConfig.Node.BatchPoster.ParentChainWallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ID)) + if err != nil { + flag.Usage() + log.Crit("error opening Batch poster parent chain wallet", "path", nodeConfig.Node.BatchPoster.ParentChainWallet.Pathname, "account", nodeConfig.Node.BatchPoster.ParentChainWallet.Account, "err", err) } - } else { - if *l1Wallet != defaultL1WalletConfig { - log.Crit("--parent-chain.wallet cannot be set if either --node.staker.l1-wallet or --node.batch-poster.l1-wallet are set") + if nodeConfig.Node.BatchPoster.ParentChainWallet.OnlyCreateKey { + return 0 } - if sequencerNeedsKey || nodeConfig.Node.BatchPoster.ParentChainWallet.OnlyCreateKey { - l1TransactionOptsBatchPoster, dataSigner, err = util.OpenWallet("l1-batch-poster", &nodeConfig.Node.BatchPoster.ParentChainWallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ID)) - if err != nil { - flag.Usage() - log.Crit("error opening Batch poster parent chain wallet", "path", nodeConfig.Node.BatchPoster.ParentChainWallet.Pathname, "account", nodeConfig.Node.BatchPoster.ParentChainWallet.Account, "err", err) - } - if nodeConfig.Node.BatchPoster.ParentChainWallet.OnlyCreateKey { - return 0 - } + } + if validatorNeedsKey || nodeConfig.Node.Staker.ParentChainWallet.OnlyCreateKey { + l1TransactionOptsValidator, _, err = util.OpenWallet("l1-validator", &nodeConfig.Node.Staker.ParentChainWallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ID)) + if err != nil { + flag.Usage() + log.Crit("error opening Validator parent chain wallet", "path", nodeConfig.Node.Staker.ParentChainWallet.Pathname, "account", nodeConfig.Node.Staker.ParentChainWallet.Account, "err", err) } - if validatorNeedsKey || nodeConfig.Node.Staker.ParentChainWallet.OnlyCreateKey { - l1TransactionOptsValidator, _, err = util.OpenWallet("l1-validator", &nodeConfig.Node.Staker.ParentChainWallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ID)) - if err != nil { - flag.Usage() - log.Crit("error opening Validator parent chain wallet", "path", nodeConfig.Node.Staker.ParentChainWallet.Pathname, "account", nodeConfig.Node.Staker.ParentChainWallet.Account, "err", err) - } - if nodeConfig.Node.Staker.ParentChainWallet.OnlyCreateKey { - return 0 - } + if nodeConfig.Node.Staker.ParentChainWallet.OnlyCreateKey { + return 0 } } @@ -317,7 +300,7 @@ func mainImpl() int { } } liveNodeConfig := genericconf.NewLiveConfig[*NodeConfig](args, nodeConfig, func(ctx context.Context, args []string) (*NodeConfig, error) { - nodeConfig, _, _, err := ParseNode(ctx, args) + nodeConfig, _, err := ParseNode(ctx, args) return nodeConfig, err }) @@ -423,6 +406,10 @@ func mainImpl() int { return 1 } + if nodeConfig.Metrics { + go iostat.RegisterAndPopulateMetrics(ctx, 1, 5) + } + var deferFuncs []func() defer func() { for i := range deferFuncs { @@ -452,7 +439,21 @@ func mainImpl() int { if len(allowedWasmModuleRoots) > 0 { moduleRootMatched := false for _, root := range allowedWasmModuleRoots { - if common.HexToHash(root) == moduleRoot { + bytes, err := hex.DecodeString(strings.TrimPrefix(root, "0x")) + if err == nil { + if common.HexToHash(root) == common.BytesToHash(bytes) { + moduleRootMatched = true + break + } + continue + } + locator, locatorErr := server_common.NewMachineLocator(root) + if locatorErr != nil { + log.Warn("allowed-wasm-module-roots: value not a hex nor valid path:", "value", root, "locatorErr", locatorErr, "decodeErr", err) + continue + } + path := locator.GetMachinePath(moduleRoot) + if _, err := os.Stat(path); err == nil { moduleRootMatched = true break } @@ -476,7 +477,7 @@ func mainImpl() int { } } - chainDb, l2BlockChain, err := openInitializeChainDb(ctx, stack, nodeConfig, new(big.Int).SetUint64(nodeConfig.Chain.ID), gethexec.DefaultCacheConfigFor(stack, &nodeConfig.Execution.Caching), l1Client, rollupAddrs) + chainDb, l2BlockChain, err := openInitializeChainDb(ctx, stack, nodeConfig, new(big.Int).SetUint64(nodeConfig.Chain.ID), gethexec.DefaultCacheConfigFor(stack, &nodeConfig.Execution.Caching), &nodeConfig.Persistent, l1Client, rollupAddrs) if l2BlockChain != nil { deferFuncs = append(deferFuncs, func() { l2BlockChain.Stop() }) } @@ -487,13 +488,33 @@ func mainImpl() int { return 1 } - arbDb, err := stack.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", false) + arbDb, err := stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, nodeConfig.Persistent.Pebble.ExtraOptions("arbitrumdata")) deferFuncs = append(deferFuncs, func() { closeDb(arbDb, "arbDb") }) if err != nil { log.Error("failed to open database", "err", err) + log.Error("database is corrupt; delete it and try again", "database-directory", stack.InstanceDir()) return 1 } + fatalErrChan := make(chan error, 10) + + var blocksReExecutor *blocksreexecutor.BlocksReExecutor + if nodeConfig.BlocksReExecutor.Enable && l2BlockChain != nil { + blocksReExecutor = blocksreexecutor.New(&nodeConfig.BlocksReExecutor, l2BlockChain, fatalErrChan) + if nodeConfig.Init.ThenQuit { + success := make(chan struct{}) + blocksReExecutor.Start(ctx, success) + deferFuncs = append(deferFuncs, func() { blocksReExecutor.StopAndWait() }) + select { + case err := <-fatalErrChan: + log.Error("shutting down due to fatal error", "err", err) + defer log.Error("shut down due to fatal error", "err", err) + return 1 + case <-success: + } + } + } + if nodeConfig.Init.ThenQuit && nodeConfig.Init.ResetToMessage < 0 { return 0 } @@ -514,8 +535,6 @@ func mainImpl() int { return 1 } - fatalErrChan := make(chan error, 10) - var valNode *valnode.ValidationNode if sameProcessValidationNodeEnabled { valNode, err = valnode.CreateValidationNode( @@ -644,9 +663,8 @@ func mainImpl() int { // remove previous deferFuncs, StopAndWait closes database and blockchain. deferFuncs = []func(){func() { currentNode.StopAndWait() }} } - if nodeConfig.BlocksReExecutor.Enable && l2BlockChain != nil { - blocksReExecutor := blocksreexecutor.New(&nodeConfig.BlocksReExecutor, l2BlockChain, fatalErrChan) - blocksReExecutor.Start(ctx) + if blocksReExecutor != nil && !nodeConfig.Init.ThenQuit { + blocksReExecutor.Start(ctx, nil) deferFuncs = append(deferFuncs, func() { blocksReExecutor.StopAndWait() }) } @@ -767,7 +785,6 @@ func (c *NodeConfig) ResolveDirectoryNames() error { if err != nil { return err } - c.ParentChain.ResolveDirectoryNames(c.Persistent.Chain) c.Chain.ResolveDirectoryNames(c.Persistent.Chain) return nil @@ -837,14 +854,14 @@ func (c *NodeConfig) GetReloadInterval() time.Duration { return c.Conf.ReloadInterval } -func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.WalletConfig, *genericconf.WalletConfig, error) { +func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.WalletConfig, error) { f := flag.NewFlagSet("", flag.ContinueOnError) NodeConfigAddOptions(f) k, err := confighelpers.BeginCommonParse(f, args) if err != nil { - return nil, nil, nil, err + return nil, nil, err } l2ChainId := k.Int64("chain.id") @@ -855,17 +872,21 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa l2ChainInfoJson := k.String("chain.info-json") err = applyChainParameters(ctx, k, uint64(l2ChainId), l2ChainName, l2ChainInfoFiles, l2ChainInfoJson, l2ChainInfoIpfsUrl, l2ChainInfoIpfsDownloadPath) if err != nil { - return nil, nil, nil, err + return nil, nil, err } err = confighelpers.ApplyOverrides(f, k) if err != nil { - return nil, nil, nil, err + return nil, nil, err + } + + if err = das.FixKeysetCLIParsing("node.data-availability.rpc-aggregator.backends", k); err != nil { + return nil, nil, err } var nodeConfig NodeConfig if err := confighelpers.EndCommonParse(k, &nodeConfig); err != nil { - return nil, nil, nil, err + return nil, nil, err } // Don't print wallet passwords @@ -877,23 +898,21 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa "chain.dev-wallet.private-key": "", }) if err != nil { - return nil, nil, nil, err + return nil, nil, err } } if nodeConfig.Persistent.Chain == "" { - return nil, nil, nil, errors.New("--persistent.chain not specified") + return nil, nil, errors.New("--persistent.chain not specified") } err = nodeConfig.ResolveDirectoryNames() if err != nil { - return nil, nil, nil, err + return nil, nil, err } // Don't pass around wallet contents with normal configuration - l1Wallet := nodeConfig.ParentChain.Wallet l2DevWallet := nodeConfig.Chain.DevWallet - nodeConfig.ParentChain.Wallet = genericconf.WalletConfigDefault nodeConfig.Chain.DevWallet = genericconf.WalletConfigDefault if nodeConfig.Execution.Caching.Archive { @@ -901,9 +920,9 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa } err = nodeConfig.Validate() if err != nil { - return nil, nil, nil, err + return nil, nil, err } - return &nodeConfig, &l1Wallet, &l2DevWallet, nil + return &nodeConfig, &l2DevWallet, nil } func aggregateL2ChainInfoFiles(ctx context.Context, l2ChainInfoFiles []string, l2ChainInfoIpfsUrl string, l2ChainInfoIpfsDownloadPath string) []string { @@ -935,6 +954,7 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c } chainDefaults := map[string]interface{}{ "persistent.chain": chainInfo.ChainName, + "chain.name": chainInfo.ChainName, "chain.id": chainInfo.ChainConfig.ChainID.Uint64(), "parent-chain.id": chainInfo.ParentChainId, } diff --git a/cmd/pruning/pruning.go b/cmd/pruning/pruning.go index 3ef888e897..ab6ec80942 100644 --- a/cmd/pruning/pruning.go +++ b/cmd/pruning/pruning.go @@ -80,12 +80,12 @@ func (r *importantRoots) addHeader(header *types.Header, overwrite bool) error { var hashListRegex = regexp.MustCompile("^(0x)?[0-9a-fA-F]{64}(,(0x)?[0-9a-fA-F]{64})*$") // Finds important roots to retain while proving -func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node.Node, initConfig *conf.InitConfig, cacheConfig *core.CacheConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses, validatorRequired bool) ([]common.Hash, error) { +func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node.Node, initConfig *conf.InitConfig, cacheConfig *core.CacheConfig, persistentConfig *conf.PersistentConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses, validatorRequired bool) ([]common.Hash, error) { chainConfig := gethexec.TryReadStoredChainConfig(chainDb) if chainConfig == nil { return nil, errors.New("database doesn't have a chain config (was this node initialized?)") } - arbDb, err := stack.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", true) + arbDb, err := stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", true, persistentConfig.Pebble.ExtraOptions("arbitrumdata")) if err != nil { return nil, err } @@ -189,7 +189,7 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node return nil, fmt.Errorf("failed to get finalized block: %w", err) } l1BlockNum := l1Block.NumberU64() - tracker, err := arbnode.NewInboxTracker(arbDb, nil, nil) + tracker, err := arbnode.NewInboxTracker(arbDb, nil, nil, arbnode.DefaultSnapSyncConfig) if err != nil { return nil, err } @@ -232,16 +232,16 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node return roots.roots, nil } -func PruneChainDb(ctx context.Context, chainDb ethdb.Database, stack *node.Node, initConfig *conf.InitConfig, cacheConfig *core.CacheConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses, validatorRequired bool) error { +func PruneChainDb(ctx context.Context, chainDb ethdb.Database, stack *node.Node, initConfig *conf.InitConfig, cacheConfig *core.CacheConfig, persistentConfig *conf.PersistentConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses, validatorRequired bool) error { if initConfig.Prune == "" { - return pruner.RecoverPruning(stack.InstanceDir(), chainDb) + return pruner.RecoverPruning(stack.InstanceDir(), chainDb, initConfig.PruneThreads) } - root, err := findImportantRoots(ctx, chainDb, stack, initConfig, cacheConfig, l1Client, rollupAddrs, validatorRequired) + root, err := findImportantRoots(ctx, chainDb, stack, initConfig, cacheConfig, persistentConfig, l1Client, rollupAddrs, validatorRequired) if err != nil { return fmt.Errorf("failed to find root to retain for pruning: %w", err) } - pruner, err := pruner.NewPruner(chainDb, pruner.Config{Datadir: stack.InstanceDir(), BloomSize: initConfig.PruneBloomSize}) + pruner, err := pruner.NewPruner(chainDb, pruner.Config{Datadir: stack.InstanceDir(), BloomSize: initConfig.PruneBloomSize, Threads: initConfig.PruneThreads, CleanCacheSize: initConfig.PruneTrieCleanCache}) if err != nil { return err } diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 63baac072a..57596a74fa 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -115,6 +115,10 @@ func (dasReader *PreimageDASReader) GetByHash(ctx context.Context, hash common.H return dastree.Content(hash, oracle) } +func (dasReader *PreimageDASReader) GetKeysetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { + return dasReader.GetByHash(ctx, hash) +} + func (dasReader *PreimageDASReader) HealthCheck(ctx context.Context) error { return nil } @@ -207,8 +211,11 @@ func main() { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } var dasReader daprovider.DASReader + var dasKeysetFetcher daprovider.DASKeysetFetcher if dasEnabled { + // DAS batch and keysets are all together in the same preimage binary. dasReader = &PreimageDASReader{} + dasKeysetFetcher = &PreimageDASReader{} } backend := WavmInbox{} var keysetValidationMode = daprovider.KeysetPanicIfInvalid @@ -217,7 +224,7 @@ func main() { } var dapReaders []daprovider.Reader if dasReader != nil { - dapReaders = append(dapReaders, daprovider.NewReaderForDAS(dasReader)) + dapReaders = append(dapReaders, daprovider.NewReaderForDAS(dasReader, dasKeysetFetcher)) } dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(&BlobPreimageReader{})) inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dapReaders, keysetValidationMode) diff --git a/cmd/staterecovery/staterecovery.go b/cmd/staterecovery/staterecovery.go index 6390826a91..58ad06ad14 100644 --- a/cmd/staterecovery/staterecovery.go +++ b/cmd/staterecovery/staterecovery.go @@ -31,7 +31,7 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon return fmt.Errorf("start block parent is missing, parent block number: %d", current-1) } hashConfig := *hashdb.Defaults - hashConfig.CleanCacheSize = cacheConfig.TrieCleanLimit + hashConfig.CleanCacheSize = cacheConfig.TrieCleanLimit * 1024 * 1024 trieConfig := &trie.Config{ Preimages: false, HashDB: &hashConfig, diff --git a/cmd/util/confighelpers/configuration.go b/cmd/util/confighelpers/configuration.go index 3ff27d65ce..ff33da6732 100644 --- a/cmd/util/confighelpers/configuration.go +++ b/cmd/util/confighelpers/configuration.go @@ -92,14 +92,49 @@ func applyOverrideOverrides(f *flag.FlagSet, k *koanf.Koanf) error { return nil } +var envvarsToSplitOnComma map[string]any = map[string]any{ + "auth.api": struct{}{}, + "auth.origins": struct{}{}, + "chain.info-files": struct{}{}, + "conf.file": struct{}{}, + "execution.secondary-forwarding-target": struct{}{}, + "graphql.corsdomain": struct{}{}, + "graphql.vhosts": struct{}{}, + "http.api": struct{}{}, + "http.corsdomain": struct{}{}, + "http.vhosts": struct{}{}, + "node.data-availability.rest-aggregator.urls": struct{}{}, + "node.feed.input.secondary-url": struct{}{}, + "node.feed.input.url": struct{}{}, + "node.feed.input.verify.allowed-addresses": struct{}{}, + "node.seq-coordinator.signer.ecdsa.allowed-addresses": struct{}{}, + "p2p.bootnodes": struct{}{}, + "p2p.bootnodes-v5": struct{}{}, + "validation.api-auth": struct{}{}, + "validation.arbitrator.redis-validation-server-config.module-roots": struct{}{}, + "validation.wasm.allowed-wasm-module-roots": struct{}{}, + "ws.api": struct{}{}, + "ws.origins": struct{}{}, +} + func loadEnvironmentVariables(k *koanf.Koanf) error { envPrefix := k.String("conf.env-prefix") if len(envPrefix) != 0 { - return k.Load(env.Provider(envPrefix+"_", ".", func(s string) string { + return k.Load(env.ProviderWithValue(envPrefix+"_", ".", func(key string, v string) (string, interface{}) { // FOO__BAR -> foo-bar to handle dash in config names - s = strings.ReplaceAll(strings.ToLower( - strings.TrimPrefix(s, envPrefix+"_")), "__", "-") - return strings.ReplaceAll(s, "_", ".") + key = strings.ReplaceAll(strings.ToLower( + strings.TrimPrefix(key, envPrefix+"_")), "__", "-") + key = strings.ReplaceAll(key, "_", ".") + + if _, found := envvarsToSplitOnComma[key]; found { + // If there are commas in the value, split the value into a slice. + if strings.Contains(v, ",") { + return key, strings.Split(v, ",") + + } + } + + return key, v }), nil) } diff --git a/das/aggregator.go b/das/aggregator.go index d3edd58437..9aa558b92c 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "math/bits" + "sync/atomic" "time" flag "github.com/spf13/pflag" @@ -22,25 +23,39 @@ import ( "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/solgen/go/bridgegen" - "github.com/offchainlabs/nitro/util/contracts" "github.com/offchainlabs/nitro/util/pretty" ) +const metricBase string = "arb/das/rpc/aggregator/store" + +var ( + // This metric shows 1 if there was any error posting to the backends, until + // there was a Store that had no backend failures. + anyErrorGauge = metrics.GetOrRegisterGauge(metricBase+"/error/gauge", nil) + +// Other aggregator metrics are generated dynamically in the Store function. +) + type AggregatorConfig struct { - Enable bool `koanf:"enable"` - AssumedHonest int `koanf:"assumed-honest"` - Backends string `koanf:"backends"` + Enable bool `koanf:"enable"` + AssumedHonest int `koanf:"assumed-honest"` + Backends BackendConfigList `koanf:"backends"` + MaxStoreChunkBodySize int `koanf:"max-store-chunk-body-size"` } var DefaultAggregatorConfig = AggregatorConfig{ - AssumedHonest: 0, - Backends: "", + AssumedHonest: 0, + Backends: nil, + MaxStoreChunkBodySize: 512 * 1024, } +var parsedBackendsConf BackendConfigList + func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Bool(prefix+".enable", DefaultAggregatorConfig.Enable, "enable storage/retrieval of sequencer batch data from a list of RPC endpoints; this should only be used by the batch poster and not in combination with other DAS storage types") + f.Bool(prefix+".enable", DefaultAggregatorConfig.Enable, "enable storage of sequencer batch data from a list of RPC endpoints; this should only be used by the batch poster and not in combination with other DAS storage types") f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.") - f.String(prefix+".backends", DefaultAggregatorConfig.Backends, "JSON RPC backend configuration") + f.Var(&parsedBackendsConf, prefix+".backends", "JSON RPC backend configuration. This can be specified on the command line as a JSON array, eg: [{\"url\": \"...\", \"pubkey\": \"...\"},...], or as a JSON array in the config file.") + f.Int(prefix+".max-store-chunk-body-size", DefaultAggregatorConfig.MaxStoreChunkBodySize, "maximum HTTP POST body size to use for individual batch chunks, including JSON RPC overhead and an estimated overhead of 512B of headers") } type Aggregator struct { @@ -53,7 +68,6 @@ type Aggregator struct { maxAllowedServiceStoreFailures int keysetHash [32]byte keysetBytes []byte - addrVerifier *contracts.AddressVerifier } type ServiceDetails struct { @@ -121,11 +135,6 @@ func NewAggregatorWithSeqInboxCaller( return nil, err } - var addrVerifier *contracts.AddressVerifier - if seqInboxCaller != nil { - addrVerifier = contracts.NewAddressVerifier(seqInboxCaller) - } - return &Aggregator{ config: config.RPCAggregator, services: services, @@ -134,7 +143,6 @@ func NewAggregatorWithSeqInboxCaller( maxAllowedServiceStoreFailures: config.RPCAggregator.AssumedHonest - 1, keysetHash: keysetHash, keysetBytes: keysetBytes, - addrVerifier: addrVerifier, }, nil } @@ -157,26 +165,17 @@ type storeResponse struct { // // If Store gets not enough successful responses by the time its context is canceled // (eg via TimeoutWrapper) then it also returns an error. -// -// If Sequencer Inbox contract details are provided when a das.Aggregator is -// constructed, calls to Store(...) will try to verify the passed-in data's signature -// is from the batch poster. If the contract details are not provided, then the -// signature is not checked, which is useful for testing. -func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { - log.Trace("das.Aggregator.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig)) - if a.addrVerifier != nil { - actualSigner, err := DasRecoverSigner(message, timeout, sig) - if err != nil { - return nil, err +func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) { + log.Trace("das.Aggregator.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0)) + + allBackendsSucceeded := false + defer func() { + if allBackendsSucceeded { + anyErrorGauge.Update(0) + } else { + anyErrorGauge.Update(1) } - isBatchPosterOrSequencer, err := a.addrVerifier.IsBatchPosterOrSequencer(ctx, actualSigner) - if err != nil { - return nil, err - } - if !isBatchPosterOrSequencer { - return nil, errors.New("store request not properly signed") - } - } + }() responses := make(chan storeResponse, len(a.services)) @@ -184,7 +183,6 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, for _, d := range a.services { go func(ctx context.Context, d ServiceDetails) { storeCtx, cancel := context.WithTimeout(ctx, a.requestTimeout) - const metricBase string = "arb/das/rpc/aggregator/store" var metricWithServiceName = metricBase + "/" + d.metricName defer cancel() incFailureMetric := func() { @@ -192,7 +190,7 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, metrics.GetOrRegisterCounter(metricBase+"/error/all/total", nil).Inc(1) } - cert, err := d.service.Store(storeCtx, message, timeout, sig) + cert, err := d.service.Store(storeCtx, message, timeout) if err != nil { incFailureMetric() if errors.Is(err, context.DeadlineExceeded) { @@ -250,22 +248,22 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, err error } + var storeFailures atomic.Int64 // Collect responses from backends. certDetailsChan := make(chan certDetails) go func() { var pubKeys []blsSignatures.PublicKey var sigs []blsSignatures.Signature var aggSignersMask uint64 - var storeFailures, successfullyStoredCount int + var successfullyStoredCount int var returned bool for i := 0; i < len(a.services); i++ { - select { case <-ctx.Done(): break case r := <-responses: if r.err != nil { - storeFailures++ + _ = storeFailures.Add(1) log.Warn("das.Aggregator: Error from backend", "backend", r.details.service, "signerMask", r.details.signersMask, "err", r.err) } else { pubKeys = append(pubKeys, r.details.pubKey) @@ -289,10 +287,10 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, certDetailsChan <- cd returned = true if a.maxAllowedServiceStoreFailures > 0 && // Ignore the case where AssumedHonest = 1, probably a testnet - storeFailures+1 > a.maxAllowedServiceStoreFailures { + int(storeFailures.Load())+1 > a.maxAllowedServiceStoreFailures { log.Error("das.Aggregator: storing the batch data succeeded to enough DAS commitee members to generate the Data Availability Cert, but if one more had failed then the cert would not have been able to be generated. Look for preceding logs with \"Error from backend\"") } - } else if storeFailures > a.maxAllowedServiceStoreFailures { + } else if int(storeFailures.Load()) > a.maxAllowedServiceStoreFailures { cd := certDetails{} cd.err = fmt.Errorf("aggregator failed to store message to at least %d out of %d DASes (assuming %d are honest). %w", a.requiredServicesForStore, len(a.services), a.config.AssumedHonest, daprovider.ErrBatchToDasFailed) certDetailsChan <- cd @@ -326,6 +324,11 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, if !verified { return nil, fmt.Errorf("failed aggregate signature check. %w", daprovider.ErrBatchToDasFailed) } + + if storeFailures.Load() == 0 { + allBackendsSucceeded = true + } + return &aggCert, nil } diff --git a/das/aggregator_test.go b/das/aggregator_test.go index 728db6cf50..4bc209513e 100644 --- a/das/aggregator_test.go +++ b/das/aggregator_test.go @@ -54,7 +54,7 @@ func TestDAS_BasicAggregationLocal(t *testing.T) { Require(t, err) rawMsg := []byte("It's time for you to see the fnords.") - cert, err := aggregator.Store(ctx, rawMsg, 0, []byte{}) + cert, err := aggregator.Store(ctx, rawMsg, 0) Require(t, err, "Error storing message") for _, storageService := range storageServices { @@ -123,17 +123,17 @@ type WrapStore struct { DataAvailabilityServiceWriter } -func (w *WrapStore) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { +func (w *WrapStore) Store(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) { switch w.injector.shouldFail() { case success: - return w.DataAvailabilityServiceWriter.Store(ctx, message, timeout, sig) + return w.DataAvailabilityServiceWriter.Store(ctx, message, timeout) case immediateError: return nil, errors.New("expected Store failure") case tooSlow: <-ctx.Done() return nil, ctx.Err() case dataCorruption: - cert, err := w.DataAvailabilityServiceWriter.Store(ctx, message, timeout, sig) + cert, err := w.DataAvailabilityServiceWriter.Store(ctx, message, timeout) if err != nil { return nil, err } @@ -214,7 +214,7 @@ func testConfigurableStorageFailures(t *testing.T, shouldFailAggregation bool) { Require(t, err) rawMsg := []byte("It's time for you to see the fnords.") - cert, err := aggregator.Store(ctx, rawMsg, 0, []byte{}) + cert, err := aggregator.Store(ctx, rawMsg, 0) if !shouldFailAggregation { Require(t, err, "Error storing message") } else { diff --git a/das/chain_fetch_das.go b/das/chain_fetch_das.go index 99311decaa..465b54f400 100644 --- a/das/chain_fetch_das.go +++ b/das/chain_fetch_das.go @@ -8,7 +8,6 @@ import ( "errors" "sync" - "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/util/pretty" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -37,59 +36,41 @@ func (c *syncedKeysetCache) put(key [32]byte, value []byte) { c.cache[key] = value } -type ChainFetchReader struct { - daprovider.DASReader +type KeysetFetcher struct { seqInboxCaller *bridgegen.SequencerInboxCaller seqInboxFilterer *bridgegen.SequencerInboxFilterer keysetCache syncedKeysetCache } -func NewChainFetchReader(inner daprovider.DASReader, l1client arbutil.L1Interface, seqInboxAddr common.Address) (*ChainFetchReader, error) { +func NewKeysetFetcher(l1client arbutil.L1Interface, seqInboxAddr common.Address) (*KeysetFetcher, error) { seqInbox, err := bridgegen.NewSequencerInbox(seqInboxAddr, l1client) if err != nil { return nil, err } - return NewChainFetchReaderWithSeqInbox(inner, seqInbox) + return NewKeysetFetcherWithSeqInbox(seqInbox) } -func NewChainFetchReaderWithSeqInbox(inner daprovider.DASReader, seqInbox *bridgegen.SequencerInbox) (*ChainFetchReader, error) { - return &ChainFetchReader{ - DASReader: inner, +func NewKeysetFetcherWithSeqInbox(seqInbox *bridgegen.SequencerInbox) (*KeysetFetcher, error) { + return &KeysetFetcher{ seqInboxCaller: &seqInbox.SequencerInboxCaller, seqInboxFilterer: &seqInbox.SequencerInboxFilterer, keysetCache: syncedKeysetCache{cache: make(map[[32]byte][]byte)}, }, nil } -func (c *ChainFetchReader) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { - log.Trace("das.ChainFetchReader.GetByHash", "hash", pretty.PrettyHash(hash)) - return chainFetchGetByHash(ctx, c.DASReader, &c.keysetCache, c.seqInboxCaller, c.seqInboxFilterer, hash) -} -func (c *ChainFetchReader) String() string { - return "ChainFetchReader" -} +func (c *KeysetFetcher) GetKeysetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { + log.Trace("das.KeysetFetcher.GetKeysetByHash", "hash", pretty.PrettyHash(hash)) + cache := &c.keysetCache + seqInboxCaller := c.seqInboxCaller + seqInboxFilterer := c.seqInboxFilterer -func chainFetchGetByHash( - ctx context.Context, - daReader daprovider.DASReader, - cache *syncedKeysetCache, - seqInboxCaller *bridgegen.SequencerInboxCaller, - seqInboxFilterer *bridgegen.SequencerInboxFilterer, - hash common.Hash, -) ([]byte, error) { // try to fetch from the cache res, ok := cache.get(hash) if ok { return res, nil } - // try to fetch from the inner DAS - innerRes, err := daReader.GetByHash(ctx, hash) - if err == nil && dastree.ValidHash(hash, innerRes) { - return innerRes, nil - } - // try to fetch from the L1 chain blockNumBig, err := seqInboxCaller.GetKeysetCreationBlock(&bind.CallOpts{Context: ctx}, hash) if err != nil { diff --git a/das/das.go b/das/das.go index b0708e3b33..5528323a9c 100644 --- a/das/das.go +++ b/das/das.go @@ -20,7 +20,7 @@ import ( type DataAvailabilityServiceWriter interface { // Store requests that the message be stored until timeout (UTC time in unix epoch seconds). - Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) + Store(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) fmt.Stringer } @@ -41,11 +41,9 @@ type DataAvailabilityConfig struct { LocalCache CacheConfig `koanf:"local-cache"` RedisCache RedisConfig `koanf:"redis-cache"` - LocalDBStorage LocalDBStorageConfig `koanf:"local-db-storage"` - LocalFileStorage LocalFileStorageConfig `koanf:"local-file-storage"` - S3Storage S3StorageServiceConfig `koanf:"s3-storage"` - IpfsStorage IpfsStorageServiceConfig `koanf:"ipfs-storage"` - RegularSyncStorage RegularSyncStorageConfig `koanf:"regular-sync-storage"` + LocalDBStorage LocalDBStorageConfig `koanf:"local-db-storage"` + LocalFileStorage LocalFileStorageConfig `koanf:"local-file-storage"` + S3Storage S3StorageServiceConfig `koanf:"s3-storage"` Key KeyConfig `koanf:"key"` @@ -65,9 +63,9 @@ var DefaultDataAvailabilityConfig = DataAvailabilityConfig{ RequestTimeout: 5 * time.Second, Enable: false, RestAggregator: DefaultRestfulClientAggregatorConfig, + RPCAggregator: DefaultAggregatorConfig, ParentChainConnectionAttempts: 15, PanicOnError: false, - IpfsStorage: DefaultIpfsStorageServiceConfig, } func OptionalAddressFromString(s string) (*common.Address, error) { @@ -114,7 +112,6 @@ func dataAvailabilityConfigAddOptions(prefix string, f *flag.FlagSet, r role) { LocalDBStorageConfigAddOptions(prefix+".local-db-storage", f) LocalFileStorageConfigAddOptions(prefix+".local-file-storage", f) S3ConfigAddOptions(prefix+".s3-storage", f) - RegularSyncStorageConfigAddOptions(prefix+".regular-sync-storage", f) // Key config for storage KeyConfigAddOptions(prefix+".key", f) @@ -128,7 +125,6 @@ func dataAvailabilityConfigAddOptions(prefix string, f *flag.FlagSet, r role) { } // Both the Nitro node and daserver can use these options. - IpfsStorageServiceConfigAddOptions(prefix+".ipfs-storage", f) RestfulClientAggregatorConfigAddOptions(prefix+".rest-aggregator", f) f.String(prefix+".parent-chain-node-url", DefaultDataAvailabilityConfig.ParentChainNodeURL, "URL for parent chain node, only used in standalone daserver; when running as part of a node that node's L1 configuration is used") diff --git a/das/dasRpcClient.go b/das/dasRpcClient.go index 5fca1e449f..ca2ee8e7d4 100644 --- a/das/dasRpcClient.go +++ b/das/dasRpcClient.go @@ -6,36 +6,145 @@ package das import ( "context" "fmt" + "strings" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" + "golang.org/x/sync/errgroup" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/util/pretty" + "github.com/offchainlabs/nitro/util/signature" ) type DASRPCClient struct { // implements DataAvailabilityService - clnt *rpc.Client - url string + clnt *rpc.Client + url string + signer signature.DataSignerFunc + chunkSize uint64 } -func NewDASRPCClient(target string) (*DASRPCClient, error) { +func nilSigner(_ []byte) ([]byte, error) { + return []byte{}, nil +} + +const sendChunkJSONBoilerplate = "{\"jsonrpc\":\"2.0\",\"id\":4294967295,\"method\":\"das_sendChunked\",\"params\":[\"\"]}" + +func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChunkBodySize int) (*DASRPCClient, error) { clnt, err := rpc.Dial(target) if err != nil { return nil, err } + if signer == nil { + signer = nilSigner + } + + // Byte arrays are encoded in base64 + chunkSize := (maxStoreChunkBodySize - len(sendChunkJSONBoilerplate) - 512 /* headers */) / 2 + if chunkSize <= 0 { + return nil, fmt.Errorf("max-store-chunk-body-size %d doesn't leave enough room for chunk payload", maxStoreChunkBodySize) + } + return &DASRPCClient{ - clnt: clnt, - url: target, + clnt: clnt, + url: target, + signer: signer, + chunkSize: uint64(chunkSize), + }, nil +} + +func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) { + timestamp := uint64(time.Now().Unix()) + nChunks := uint64(len(message)) / c.chunkSize + lastChunkSize := uint64(len(message)) % c.chunkSize + if lastChunkSize > 0 { + nChunks++ + } else { + lastChunkSize = c.chunkSize + } + totalSize := uint64(len(message)) + + startReqSig, err := applyDasSigner(c.signer, []byte{}, timestamp, nChunks, c.chunkSize, totalSize, timeout) + if err != nil { + return nil, err + } + + var startChunkedStoreResult StartChunkedStoreResult + if err := c.clnt.CallContext(ctx, &startChunkedStoreResult, "das_startChunkedStore", hexutil.Uint64(timestamp), hexutil.Uint64(nChunks), hexutil.Uint64(c.chunkSize), hexutil.Uint64(totalSize), hexutil.Uint64(timeout), hexutil.Bytes(startReqSig)); err != nil { + if strings.Contains(err.Error(), "the method das_startChunkedStore does not exist") { + return c.legacyStore(ctx, message, timeout) + } + return nil, err + } + batchId := uint64(startChunkedStoreResult.BatchId) + + g := new(errgroup.Group) + for i := uint64(0); i < nChunks; i++ { + var chunk []byte + if i == nChunks-1 { + chunk = message[i*c.chunkSize : i*c.chunkSize+lastChunkSize] + } else { + chunk = message[i*c.chunkSize : (i+1)*c.chunkSize] + } + + inner := func(_i uint64, _chunk []byte) func() error { + return func() error { return c.sendChunk(ctx, batchId, _i, _chunk) } + } + g.Go(inner(i, chunk)) + } + if err := g.Wait(); err != nil { + return nil, err + } + + finalReqSig, err := applyDasSigner(c.signer, []byte{}, uint64(startChunkedStoreResult.BatchId)) + if err != nil { + return nil, err + } + + var storeResult StoreResult + if err := c.clnt.CallContext(ctx, &storeResult, "das_commitChunkedStore", startChunkedStoreResult.BatchId, hexutil.Bytes(finalReqSig)); err != nil { + return nil, err + } + + respSig, err := blsSignatures.SignatureFromBytes(storeResult.Sig) + if err != nil { + return nil, err + } + + return &daprovider.DataAvailabilityCertificate{ + DataHash: common.BytesToHash(storeResult.DataHash), + Timeout: uint64(storeResult.Timeout), + SignersMask: uint64(storeResult.SignersMask), + Sig: respSig, + KeysetHash: common.BytesToHash(storeResult.KeysetHash), + Version: byte(storeResult.Version), }, nil } -func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64, reqSig []byte) (*daprovider.DataAvailabilityCertificate, error) { - log.Trace("das.DASRPCClient.Store(...)", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(reqSig), "this", *c) +func (c *DASRPCClient) sendChunk(ctx context.Context, batchId, i uint64, chunk []byte) error { + chunkReqSig, err := applyDasSigner(c.signer, chunk, batchId, i) + if err != nil { + return err + } + + if err := c.clnt.CallContext(ctx, nil, "das_sendChunk", hexutil.Uint64(batchId), hexutil.Uint64(i), hexutil.Bytes(chunk), hexutil.Bytes(chunkReqSig)); err != nil { + return err + } + return nil +} + +func (c *DASRPCClient) legacyStore(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) { + log.Trace("das.DASRPCClient.Store(...)", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "this", *c) + + reqSig, err := applyDasSigner(c.signer, message, timeout) + if err != nil { + return nil, err + } + var ret StoreResult if err := c.clnt.CallContext(ctx, &ret, "das_store", hexutil.Bytes(message), hexutil.Uint64(timeout), hexutil.Bytes(reqSig)); err != nil { return nil, err diff --git a/das/dasRpcServer.go b/das/dasRpcServer.go index 2f1fc1fd42..9e6228ca5d 100644 --- a/das/dasRpcServer.go +++ b/das/dasRpcServer.go @@ -7,8 +7,11 @@ import ( "context" "errors" "fmt" + "math/rand" "net" "net/http" + "sync" + "sync/atomic" "time" "github.com/ethereum/go-ethereum/common/hexutil" @@ -28,31 +31,47 @@ var ( rpcStoreFailureGauge = metrics.NewRegisteredGauge("arb/das/rpc/store/failure", nil) rpcStoreStoredBytesGauge = metrics.NewRegisteredGauge("arb/das/rpc/store/bytes", nil) rpcStoreDurationHistogram = metrics.NewRegisteredHistogram("arb/das/rpc/store/duration", nil, metrics.NewBoundedHistogramSample()) + + rpcSendChunkSuccessGauge = metrics.NewRegisteredGauge("arb/das/rpc/sendchunk/success", nil) + rpcSendChunkFailureGauge = metrics.NewRegisteredGauge("arb/das/rpc/sendchunk/failure", nil) ) type DASRPCServer struct { daReader DataAvailabilityServiceReader daWriter DataAvailabilityServiceWriter daHealthChecker DataAvailabilityServiceHealthChecker + + signatureVerifier *SignatureVerifier + + batches *batchBuilder } -func StartDASRPCServer(ctx context.Context, addr string, portNum uint64, rpcServerTimeouts genericconf.HTTPServerTimeoutConfig, daReader DataAvailabilityServiceReader, daWriter DataAvailabilityServiceWriter, daHealthChecker DataAvailabilityServiceHealthChecker) (*http.Server, error) { +func StartDASRPCServer(ctx context.Context, addr string, portNum uint64, rpcServerTimeouts genericconf.HTTPServerTimeoutConfig, rpcServerBodyLimit int, daReader DataAvailabilityServiceReader, daWriter DataAvailabilityServiceWriter, daHealthChecker DataAvailabilityServiceHealthChecker, signatureVerifier *SignatureVerifier) (*http.Server, error) { listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", addr, portNum)) if err != nil { return nil, err } - return StartDASRPCServerOnListener(ctx, listener, rpcServerTimeouts, daReader, daWriter, daHealthChecker) + return StartDASRPCServerOnListener(ctx, listener, rpcServerTimeouts, rpcServerBodyLimit, daReader, daWriter, daHealthChecker, signatureVerifier) } -func StartDASRPCServerOnListener(ctx context.Context, listener net.Listener, rpcServerTimeouts genericconf.HTTPServerTimeoutConfig, daReader DataAvailabilityServiceReader, daWriter DataAvailabilityServiceWriter, daHealthChecker DataAvailabilityServiceHealthChecker) (*http.Server, error) { +func StartDASRPCServerOnListener(ctx context.Context, listener net.Listener, rpcServerTimeouts genericconf.HTTPServerTimeoutConfig, rpcServerBodyLimit int, daReader DataAvailabilityServiceReader, daWriter DataAvailabilityServiceWriter, daHealthChecker DataAvailabilityServiceHealthChecker, signatureVerifier *SignatureVerifier) (*http.Server, error) { if daWriter == nil { return nil, errors.New("No writer backend was configured for DAS RPC server. Has the BLS signing key been set up (--data-availability.key.key-dir or --data-availability.key.priv-key options)?") } rpcServer := rpc.NewServer() + if legacyDASStoreAPIOnly { + rpcServer.ApplyAPIFilter(map[string]bool{"das_store": true}) + } + if rpcServerBodyLimit > 0 { + rpcServer.SetHTTPBodyLimit(rpcServerBodyLimit) + } + err := rpcServer.RegisterName("das", &DASRPCServer{ - daReader: daReader, - daWriter: daWriter, - daHealthChecker: daHealthChecker, + daReader: daReader, + daWriter: daWriter, + daHealthChecker: daHealthChecker, + signatureVerifier: signatureVerifier, + batches: newBatchBuilder(), }) if err != nil { return nil, err @@ -88,8 +107,8 @@ type StoreResult struct { Version hexutil.Uint64 `json:"version,omitempty"` } -func (serv *DASRPCServer) Store(ctx context.Context, message hexutil.Bytes, timeout hexutil.Uint64, sig hexutil.Bytes) (*StoreResult, error) { - log.Trace("dasRpc.DASRPCServer.Store", "message", pretty.FirstFewBytes(message), "message length", len(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig), "this", serv) +func (s *DASRPCServer) Store(ctx context.Context, message hexutil.Bytes, timeout hexutil.Uint64, sig hexutil.Bytes) (*StoreResult, error) { + log.Trace("dasRpc.DASRPCServer.Store", "message", pretty.FirstFewBytes(message), "message length", len(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig), "this", s) rpcStoreRequestGauge.Inc(1) start := time.Now() success := false @@ -102,7 +121,220 @@ func (serv *DASRPCServer) Store(ctx context.Context, message hexutil.Bytes, time rpcStoreDurationHistogram.Update(time.Since(start).Nanoseconds()) }() - cert, err := serv.daWriter.Store(ctx, message, uint64(timeout), sig) + if err := s.signatureVerifier.verify(ctx, message, sig, uint64(timeout)); err != nil { + return nil, err + } + + cert, err := s.daWriter.Store(ctx, message, uint64(timeout)) + if err != nil { + return nil, err + } + rpcStoreStoredBytesGauge.Inc(int64(len(message))) + success = true + return &StoreResult{ + KeysetHash: cert.KeysetHash[:], + DataHash: cert.DataHash[:], + Timeout: hexutil.Uint64(cert.Timeout), + SignersMask: hexutil.Uint64(cert.SignersMask), + Sig: blsSignatures.SignatureToBytes(cert.Sig), + Version: hexutil.Uint64(cert.Version), + }, nil +} + +type StartChunkedStoreResult struct { + BatchId hexutil.Uint64 `json:"batchId,omitempty"` +} + +type SendChunkResult struct { + Ok hexutil.Uint64 `json:"sendChunkResult,omitempty"` +} + +type batch struct { + chunks [][]byte + expectedChunks uint64 + seenChunks atomic.Int64 + expectedChunkSize, expectedSize uint64 + timeout uint64 + startTime time.Time +} + +const ( + maxPendingBatches = 10 + batchBuildingExpiry = 1 * time.Minute +) + +// exposed global for test control +var ( + legacyDASStoreAPIOnly = false +) + +type batchBuilder struct { + mutex sync.Mutex + batches map[uint64]*batch +} + +func newBatchBuilder() *batchBuilder { + return &batchBuilder{ + batches: make(map[uint64]*batch), + } +} + +func (b *batchBuilder) assign(nChunks, timeout, chunkSize, totalSize uint64) (uint64, error) { + b.mutex.Lock() + defer b.mutex.Unlock() + if len(b.batches) >= maxPendingBatches { + return 0, fmt.Errorf("can't start new batch, already %d pending", len(b.batches)) + } + + id := rand.Uint64() + _, ok := b.batches[id] + if ok { + return 0, fmt.Errorf("can't start new batch, try again") + } + + b.batches[id] = &batch{ + chunks: make([][]byte, nChunks), + expectedChunks: nChunks, + expectedChunkSize: chunkSize, + expectedSize: totalSize, + timeout: timeout, + startTime: time.Now(), + } + go func(id uint64) { + <-time.After(batchBuildingExpiry) + b.mutex.Lock() + // Batch will only exist if expiry was reached without it being complete. + if _, exists := b.batches[id]; exists { + rpcStoreFailureGauge.Inc(1) + delete(b.batches, id) + } + b.mutex.Unlock() + }(id) + return id, nil +} + +func (b *batchBuilder) add(id, idx uint64, data []byte) error { + b.mutex.Lock() + batch, ok := b.batches[id] + b.mutex.Unlock() + if !ok { + return fmt.Errorf("unknown batch(%d)", id) + } + + if idx >= uint64(len(batch.chunks)) { + return fmt.Errorf("batch(%d): chunk(%d) out of range", id, idx) + } + + if batch.chunks[idx] != nil { + return fmt.Errorf("batch(%d): chunk(%d) already added", id, idx) + } + + if batch.expectedChunkSize < uint64(len(data)) { + return fmt.Errorf("batch(%d): chunk(%d) greater than expected size %d, was %d", id, idx, batch.expectedChunkSize, len(data)) + } + + batch.chunks[idx] = data + batch.seenChunks.Add(1) + return nil +} + +func (b *batchBuilder) close(id uint64) ([]byte, uint64, time.Time, error) { + b.mutex.Lock() + batch, ok := b.batches[id] + delete(b.batches, id) + b.mutex.Unlock() + if !ok { + return nil, 0, time.Time{}, fmt.Errorf("unknown batch(%d)", id) + } + + if batch.expectedChunks != uint64(batch.seenChunks.Load()) { + return nil, 0, time.Time{}, fmt.Errorf("incomplete batch(%d): got %d/%d chunks", id, batch.seenChunks.Load(), batch.expectedChunks) + } + + var flattened []byte + for _, chunk := range batch.chunks { + flattened = append(flattened, chunk...) + } + + if batch.expectedSize != uint64(len(flattened)) { + return nil, 0, time.Time{}, fmt.Errorf("batch(%d) was not expected size %d, was %d", id, batch.expectedSize, len(flattened)) + } + + return flattened, batch.timeout, batch.startTime, nil +} + +func (s *DASRPCServer) StartChunkedStore(ctx context.Context, timestamp, nChunks, chunkSize, totalSize, timeout hexutil.Uint64, sig hexutil.Bytes) (*StartChunkedStoreResult, error) { + rpcStoreRequestGauge.Inc(1) + failed := true + defer func() { + if failed { + rpcStoreFailureGauge.Inc(1) + } // success gague will be incremented on successful commit + }() + + if err := s.signatureVerifier.verify(ctx, []byte{}, sig, uint64(timestamp), uint64(nChunks), uint64(chunkSize), uint64(totalSize), uint64(timeout)); err != nil { + return nil, err + } + + // Prevent replay of old messages + if time.Since(time.Unix(int64(timestamp), 0)).Abs() > time.Minute { + return nil, errors.New("too much time has elapsed since request was signed") + } + + id, err := s.batches.assign(uint64(nChunks), uint64(timeout), uint64(chunkSize), uint64(totalSize)) + if err != nil { + return nil, err + } + + failed = false + return &StartChunkedStoreResult{ + BatchId: hexutil.Uint64(id), + }, nil + +} + +func (s *DASRPCServer) SendChunk(ctx context.Context, batchId, chunkId hexutil.Uint64, message hexutil.Bytes, sig hexutil.Bytes) error { + success := false + defer func() { + if success { + rpcSendChunkSuccessGauge.Inc(1) + } else { + rpcSendChunkFailureGauge.Inc(1) + } + }() + + if err := s.signatureVerifier.verify(ctx, message, sig, uint64(batchId), uint64(chunkId)); err != nil { + return err + } + + if err := s.batches.add(uint64(batchId), uint64(chunkId), message); err != nil { + return err + } + + success = true + return nil +} + +func (s *DASRPCServer) CommitChunkedStore(ctx context.Context, batchId hexutil.Uint64, sig hexutil.Bytes) (*StoreResult, error) { + if err := s.signatureVerifier.verify(ctx, []byte{}, sig, uint64(batchId)); err != nil { + return nil, err + } + + message, timeout, startTime, err := s.batches.close(uint64(batchId)) + if err != nil { + return nil, err + } + + cert, err := s.daWriter.Store(ctx, message, timeout) + success := false + defer func() { + if success { + rpcStoreSuccessGauge.Inc(1) + } else { + rpcStoreFailureGauge.Inc(1) + } + rpcStoreDurationHistogram.Update(time.Since(startTime).Nanoseconds()) + }() if err != nil { return nil, err } diff --git a/das/das_test.go b/das/das_test.go index 4377dc4dce..c52616fe20 100644 --- a/das/das_test.go +++ b/das/das_test.go @@ -47,9 +47,7 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) { ParentChainNodeURL: "none", } - var syncFromStorageServicesFirst []*IterableStorageService - var syncToStorageServicesFirst []StorageService - storageService, lifecycleManager, err := CreatePersistentStorageService(firstCtx, &config, &syncFromStorageServicesFirst, &syncToStorageServicesFirst) + storageService, lifecycleManager, err := CreatePersistentStorageService(firstCtx, &config) Require(t, err) defer lifecycleManager.StopAndWaitUntil(time.Second) daWriter, err := NewSignAfterStoreDASWriter(firstCtx, config, storageService) @@ -58,7 +56,7 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) { timeout := uint64(time.Now().Add(time.Hour * 24).Unix()) messageSaved := []byte("hello world") - cert, err := daWriter.Store(firstCtx, messageSaved, timeout, []byte{}) + cert, err := daWriter.Store(firstCtx, messageSaved, timeout) Require(t, err, "Error storing message") if cert.Timeout != timeout { Fail(t, fmt.Sprintf("Expected timeout of %d in cert, was %d", timeout, cert.Timeout)) @@ -77,9 +75,7 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) { secondCtx, secondCancel := context.WithCancel(context.Background()) defer secondCancel() - var syncFromStorageServicesSecond []*IterableStorageService - var syncToStorageServicesSecond []StorageService - storageService2, lifecycleManager, err := CreatePersistentStorageService(secondCtx, &config, &syncFromStorageServicesSecond, &syncToStorageServicesSecond) + storageService2, lifecycleManager, err := CreatePersistentStorageService(secondCtx, &config) Require(t, err) defer lifecycleManager.StopAndWaitUntil(time.Second) var daReader2 DataAvailabilityServiceReader = storageService2 @@ -140,9 +136,7 @@ func testDASMissingMessage(t *testing.T, storageType string) { ParentChainNodeURL: "none", } - var syncFromStorageServices []*IterableStorageService - var syncToStorageServices []StorageService - storageService, lifecycleManager, err := CreatePersistentStorageService(ctx, &config, &syncFromStorageServices, &syncToStorageServices) + storageService, lifecycleManager, err := CreatePersistentStorageService(ctx, &config) Require(t, err) defer lifecycleManager.StopAndWaitUntil(time.Second) daWriter, err := NewSignAfterStoreDASWriter(ctx, config, storageService) @@ -151,7 +145,7 @@ func testDASMissingMessage(t *testing.T, storageType string) { messageSaved := []byte("hello world") timeout := uint64(time.Now().Add(time.Hour * 24).Unix()) - cert, err := daWriter.Store(ctx, messageSaved, timeout, []byte{}) + cert, err := daWriter.Store(ctx, messageSaved, timeout) Require(t, err, "Error storing message") if cert.Timeout != timeout { Fail(t, fmt.Sprintf("Expected timeout of %d in cert, was %d", timeout, cert.Timeout)) diff --git a/das/db_storage_service.go b/das/db_storage_service.go index 5596ff378e..0fbe1c2723 100644 --- a/das/db_storage_service.go +++ b/das/db_storage_service.go @@ -20,11 +20,9 @@ import ( ) type LocalDBStorageConfig struct { - Enable bool `koanf:"enable"` - DataDir string `koanf:"data-dir"` - DiscardAfterTimeout bool `koanf:"discard-after-timeout"` - SyncFromStorageService bool `koanf:"sync-from-storage-service"` - SyncToStorageService bool `koanf:"sync-to-storage-service"` + Enable bool `koanf:"enable"` + DataDir string `koanf:"data-dir"` + DiscardAfterTimeout bool `koanf:"discard-after-timeout"` // BadgerDB options NumMemtables int `koanf:"num-memtables"` @@ -38,11 +36,9 @@ type LocalDBStorageConfig struct { var badgerDefaultOptions = badger.DefaultOptions("") var DefaultLocalDBStorageConfig = LocalDBStorageConfig{ - Enable: false, - DataDir: "", - DiscardAfterTimeout: false, - SyncFromStorageService: false, - SyncToStorageService: false, + Enable: false, + DataDir: "", + DiscardAfterTimeout: false, NumMemtables: badgerDefaultOptions.NumMemtables, NumLevelZeroTables: badgerDefaultOptions.NumLevelZeroTables, @@ -56,8 +52,6 @@ func LocalDBStorageConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultLocalDBStorageConfig.Enable, "enable storage/retrieval of sequencer batch data from a database on the local filesystem") f.String(prefix+".data-dir", DefaultLocalDBStorageConfig.DataDir, "directory in which to store the database") f.Bool(prefix+".discard-after-timeout", DefaultLocalDBStorageConfig.DiscardAfterTimeout, "discard data after its expiry timeout") - f.Bool(prefix+".sync-from-storage-service", DefaultLocalDBStorageConfig.SyncFromStorageService, "enable db storage to be used as a source for regular sync storage") - f.Bool(prefix+".sync-to-storage-service", DefaultLocalDBStorageConfig.SyncToStorageService, "enable db storage to be used as a sink for regular sync storage") f.Int(prefix+".num-memtables", DefaultLocalDBStorageConfig.NumMemtables, "BadgerDB option: sets the maximum number of tables to keep in memory before stalling") f.Int(prefix+".num-level-zero-tables", DefaultLocalDBStorageConfig.NumLevelZeroTables, "BadgerDB option: sets the maximum number of Level 0 tables before compaction starts") @@ -158,13 +152,6 @@ func (dbs *DBStorageService) Put(ctx context.Context, data []byte, timeout uint6 }) } -func (dbs *DBStorageService) putKeyValue(ctx context.Context, key common.Hash, value []byte) error { - return dbs.db.Update(func(txn *badger.Txn) error { - e := badger.NewEntry(key.Bytes(), value) - return txn.SetEntry(e) - }) -} - func (dbs *DBStorageService) Sync(ctx context.Context) error { return dbs.db.Sync() } diff --git a/das/extra_signature_checker_test.go b/das/extra_signature_checker_test.go index 2fcfac167d..11c218ae03 100644 --- a/das/extra_signature_checker_test.go +++ b/das/extra_signature_checker_test.go @@ -5,25 +5,19 @@ package das import ( "bytes" - "context" "encoding/hex" "errors" "io/ioutil" "testing" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/util/signature" ) -type StubSignatureCheckDAS struct { - keyDir string -} - -func (s *StubSignatureCheckDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { - pubkeyEncoded, err := ioutil.ReadFile(s.keyDir + "/ecdsa.pub") +func checkSig(keyDir string, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { + pubkeyEncoded, err := ioutil.ReadFile(keyDir + "/ecdsa.pub") if err != nil { return nil, err } @@ -39,22 +33,6 @@ func (s *StubSignatureCheckDAS) Store(ctx context.Context, message []byte, timeo return nil, nil } -func (s *StubSignatureCheckDAS) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { - return daprovider.KeepForever, nil -} - -func (s *StubSignatureCheckDAS) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { - return []byte{}, nil -} - -func (s *StubSignatureCheckDAS) HealthCheck(ctx context.Context) error { - return nil -} - -func (s *StubSignatureCheckDAS) String() string { - return "StubSignatureCheckDAS" -} - func TestExtraSignatureCheck(t *testing.T) { keyDir := t.TempDir() err := GenerateAndStoreECDSAKeys(keyDir) @@ -64,11 +42,11 @@ func TestExtraSignatureCheck(t *testing.T) { Require(t, err) signer := signature.DataSignerFromPrivateKey(privateKey) - var da DataAvailabilityServiceWriter = &StubSignatureCheckDAS{keyDir} - da, err = NewStoreSigningDAS(da, signer) + msg := []byte("Hello world") + timeout := uint64(1234) + sig, err := applyDasSigner(signer, msg, timeout) Require(t, err) - - _, err = da.Store(context.Background(), []byte("Hello world"), 1234, []byte{}) + _, err = checkSig(keyDir, msg, timeout, sig) Require(t, err) } diff --git a/das/factory.go b/das/factory.go index a459d1a464..fd6f60abb2 100644 --- a/das/factory.go +++ b/das/factory.go @@ -22,8 +22,6 @@ import ( func CreatePersistentStorageService( ctx context.Context, config *DataAvailabilityConfig, - syncFromStorageServices *[]*IterableStorageService, - syncToStorageServices *[]StorageService, ) (StorageService, *LifecycleManager, error) { storageServices := make([]StorageService, 0, 10) var lifecycleManager LifecycleManager @@ -32,14 +30,6 @@ func CreatePersistentStorageService( if err != nil { return nil, nil, err } - if config.LocalDBStorage.SyncFromStorageService { - iterableStorageService := NewIterableStorageService(ConvertStorageServiceToIterationCompatibleStorageService(s)) - *syncFromStorageServices = append(*syncFromStorageServices, iterableStorageService) - s = iterableStorageService - } - if config.LocalDBStorage.SyncToStorageService { - *syncToStorageServices = append(*syncToStorageServices, s) - } lifecycleManager.Register(s) storageServices = append(storageServices, s) } @@ -49,14 +39,6 @@ func CreatePersistentStorageService( if err != nil { return nil, nil, err } - if config.LocalFileStorage.SyncFromStorageService { - iterableStorageService := NewIterableStorageService(ConvertStorageServiceToIterationCompatibleStorageService(s)) - *syncFromStorageServices = append(*syncFromStorageServices, iterableStorageService) - s = iterableStorageService - } - if config.LocalFileStorage.SyncToStorageService { - *syncToStorageServices = append(*syncToStorageServices, s) - } lifecycleManager.Register(s) storageServices = append(storageServices, s) } @@ -67,23 +49,6 @@ func CreatePersistentStorageService( return nil, nil, err } lifecycleManager.Register(s) - if config.S3Storage.SyncFromStorageService { - iterableStorageService := NewIterableStorageService(ConvertStorageServiceToIterationCompatibleStorageService(s)) - *syncFromStorageServices = append(*syncFromStorageServices, iterableStorageService) - s = iterableStorageService - } - if config.S3Storage.SyncToStorageService { - *syncToStorageServices = append(*syncToStorageServices, s) - } - storageServices = append(storageServices, s) - } - - if config.IpfsStorage.Enable { - s, err := NewIpfsStorageService(ctx, config.IpfsStorage) - if err != nil { - return nil, nil, err - } - lifecycleManager.Register(s) storageServices = append(storageServices, s) } @@ -105,8 +70,6 @@ func WrapStorageWithCache( ctx context.Context, config *DataAvailabilityConfig, storageService StorageService, - syncFromStorageServices *[]*IterableStorageService, - syncToStorageServices *[]StorageService, lifecycleManager *LifecycleManager) (StorageService, error) { if storageService == nil { return nil, nil @@ -120,14 +83,6 @@ func WrapStorageWithCache( if err != nil { return nil, err } - if config.RedisCache.SyncFromStorageService { - iterableStorageService := NewIterableStorageService(ConvertStorageServiceToIterationCompatibleStorageService(storageService)) - *syncFromStorageServices = append(*syncFromStorageServices, iterableStorageService) - storageService = iterableStorageService - } - if config.RedisCache.SyncToStorageService { - *syncToStorageServices = append(*syncToStorageServices, storageService) - } } if config.LocalCache.Enable { storageService = NewCacheStorageService(config.LocalCache, storageService) @@ -142,48 +97,37 @@ func CreateBatchPosterDAS( dataSigner signature.DataSignerFunc, l1Reader arbutil.L1Interface, sequencerInboxAddr common.Address, -) (DataAvailabilityServiceWriter, DataAvailabilityServiceReader, *LifecycleManager, error) { +) (DataAvailabilityServiceWriter, DataAvailabilityServiceReader, *KeysetFetcher, *LifecycleManager, error) { if !config.Enable { - return nil, nil, nil, nil + return nil, nil, nil, nil, nil } // Check config requirements if !config.RPCAggregator.Enable || !config.RestAggregator.Enable { - return nil, nil, nil, errors.New("--node.data-availability.rpc-aggregator.enable and rest-aggregator.enable must be set when running a Batch Poster in AnyTrust mode") - } - - if config.IpfsStorage.Enable { - return nil, nil, nil, errors.New("--node.data-availability.ipfs-storage.enable may not be set when running a Nitro AnyTrust node in Batch Poster mode") + return nil, nil, nil, nil, errors.New("--node.data-availability.rpc-aggregator.enable and rest-aggregator.enable must be set when running a Batch Poster in AnyTrust mode") } // Done checking config requirements var daWriter DataAvailabilityServiceWriter - daWriter, err := NewRPCAggregator(ctx, *config) + daWriter, err := NewRPCAggregator(ctx, *config, dataSigner) if err != nil { - return nil, nil, nil, err - } - if dataSigner != nil { - // In some tests the batch poster does not sign Store requests - daWriter, err = NewStoreSigningDAS(daWriter, dataSigner) - if err != nil { - return nil, nil, nil, err - } + return nil, nil, nil, nil, err } restAgg, err := NewRestfulClientAggregator(ctx, &config.RestAggregator) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } restAgg.Start(ctx) var lifecycleManager LifecycleManager lifecycleManager.Register(restAgg) var daReader DataAvailabilityServiceReader = restAgg - daReader, err = NewChainFetchReader(daReader, l1Reader, sequencerInboxAddr) + keysetFetcher, err := NewKeysetFetcher(l1Reader, sequencerInboxAddr) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } - return daWriter, daReader, &lifecycleManager, nil + return daWriter, daReader, keysetFetcher, &lifecycleManager, nil } func CreateDAComponentsForDaserver( @@ -191,30 +135,27 @@ func CreateDAComponentsForDaserver( config *DataAvailabilityConfig, l1Reader *headerreader.HeaderReader, seqInboxAddress *common.Address, -) (DataAvailabilityServiceReader, DataAvailabilityServiceWriter, DataAvailabilityServiceHealthChecker, *LifecycleManager, error) { +) (DataAvailabilityServiceReader, DataAvailabilityServiceWriter, *SignatureVerifier, DataAvailabilityServiceHealthChecker, *LifecycleManager, error) { if !config.Enable { - return nil, nil, nil, nil, nil + return nil, nil, nil, nil, nil, nil } // Check config requirements if !config.LocalDBStorage.Enable && !config.LocalFileStorage.Enable && - !config.S3Storage.Enable && - !config.IpfsStorage.Enable { - return nil, nil, nil, nil, errors.New("At least one of --data-availability.(local-db-storage|local-file-storage|s3-storage|ipfs-storage) must be enabled.") + !config.S3Storage.Enable { + return nil, nil, nil, nil, nil, errors.New("At least one of --data-availability.(local-db-storage|local-file-storage|s3-storage) must be enabled.") } // Done checking config requirements - var syncFromStorageServices []*IterableStorageService - var syncToStorageServices []StorageService - storageService, dasLifecycleManager, err := CreatePersistentStorageService(ctx, config, &syncFromStorageServices, &syncToStorageServices) + storageService, dasLifecycleManager, err := CreatePersistentStorageService(ctx, config) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, err } - storageService, err = WrapStorageWithCache(ctx, config, storageService, &syncFromStorageServices, &syncToStorageServices, dasLifecycleManager) + storageService, err = WrapStorageWithCache(ctx, config, storageService, dasLifecycleManager) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, err } // The REST aggregator is used as the fallback if requested data is not present @@ -222,7 +163,7 @@ func CreateDAComponentsForDaserver( if config.RestAggregator.Enable { restAgg, err := NewRestfulClientAggregator(ctx, &config.RestAggregator) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, err } restAgg.Start(ctx) dasLifecycleManager.Register(restAgg) @@ -237,7 +178,7 @@ func CreateDAComponentsForDaserver( if syncConf.Eager { if l1Reader == nil || seqInboxAddress == nil { - return nil, nil, nil, nil, errors.New("l1-node-url and sequencer-inbox-address must be specified along with sync-to-storage.eager") + return nil, nil, nil, nil, nil, errors.New("l1-node-url and sequencer-inbox-address must be specified along with sync-to-storage.eager") } storageService, err = NewSyncingFallbackStorageService( ctx, @@ -249,7 +190,7 @@ func CreateDAComponentsForDaserver( syncConf) dasLifecycleManager.Register(storageService) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, err } } else { storageService = NewFallbackStorageService(storageService, restAgg, restAgg, @@ -262,13 +203,14 @@ func CreateDAComponentsForDaserver( var daWriter DataAvailabilityServiceWriter var daReader DataAvailabilityServiceReader = storageService var daHealthChecker DataAvailabilityServiceHealthChecker = storageService + var signatureVerifier *SignatureVerifier if config.Key.KeyDir != "" || config.Key.PrivKey != "" { var seqInboxCaller *bridgegen.SequencerInboxCaller if seqInboxAddress != nil { seqInbox, err := bridgegen.NewSequencerInbox(*seqInboxAddress, (*l1Reader).Client()) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, err } seqInboxCaller = &seqInbox.SequencerInboxCaller @@ -277,35 +219,21 @@ func CreateDAComponentsForDaserver( seqInboxCaller = nil } - privKey, err := config.Key.BLSPrivKey() + daWriter, err = NewSignAfterStoreDASWriter(ctx, *config, storageService) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, err } - daWriter, err = NewSignAfterStoreDASWriterWithSeqInboxCaller( - privKey, + signatureVerifier, err = NewSignatureVerifierWithSeqInboxCaller( seqInboxCaller, - storageService, config.ExtraSignatureCheckingPublicKey, ) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, err } } - if config.RegularSyncStorage.Enable && len(syncFromStorageServices) != 0 && len(syncToStorageServices) != 0 { - regularlySyncStorage := NewRegularlySyncStorage(syncFromStorageServices, syncToStorageServices, config.RegularSyncStorage) - regularlySyncStorage.Start(ctx) - } - - if seqInboxAddress != nil { - daReader, err = NewChainFetchReader(daReader, (*l1Reader).Client(), *seqInboxAddress) - if err != nil { - return nil, nil, nil, nil, err - } - } - - return daReader, daWriter, daHealthChecker, dasLifecycleManager, nil + return daReader, daWriter, signatureVerifier, daHealthChecker, dasLifecycleManager, nil } func CreateDAReaderForNode( @@ -313,70 +241,46 @@ func CreateDAReaderForNode( config *DataAvailabilityConfig, l1Reader *headerreader.HeaderReader, seqInboxAddress *common.Address, -) (DataAvailabilityServiceReader, *LifecycleManager, error) { +) (DataAvailabilityServiceReader, *KeysetFetcher, *LifecycleManager, error) { if !config.Enable { - return nil, nil, nil + return nil, nil, nil, nil } // Check config requirements if config.RPCAggregator.Enable { - return nil, nil, errors.New("node.data-availability.rpc-aggregator is only for Batch Poster mode") - } - - if !config.RestAggregator.Enable && !config.IpfsStorage.Enable { - return nil, nil, fmt.Errorf("--node.data-availability.enable was set but neither of --node.data-availability.(rest-aggregator|ipfs-storage) were enabled. When running a Nitro Anytrust node in non-Batch Poster mode, some way to get the batch data is required.") + return nil, nil, nil, errors.New("node.data-availability.rpc-aggregator is only for Batch Poster mode") } - if config.RestAggregator.SyncToStorage.Eager { - return nil, nil, errors.New("--node.data-availability.rest-aggregator.sync-to-storage.eager can't be used with a Nitro node, only lazy syncing can be used.") + if !config.RestAggregator.Enable { + return nil, nil, nil, fmt.Errorf("--node.data-availability.enable was set but not --node.data-availability.rest-aggregator. When running a Nitro Anytrust node in non-Batch Poster mode, some way to get the batch data is required.") } // Done checking config requirements - storageService, dasLifecycleManager, err := CreatePersistentStorageService(ctx, config, nil, nil) - if err != nil { - return nil, nil, err - } - + var lifecycleManager LifecycleManager var daReader DataAvailabilityServiceReader if config.RestAggregator.Enable { var restAgg *SimpleDASReaderAggregator - restAgg, err = NewRestfulClientAggregator(ctx, &config.RestAggregator) + restAgg, err := NewRestfulClientAggregator(ctx, &config.RestAggregator) if err != nil { - return nil, nil, err + return nil, nil, nil, err } restAgg.Start(ctx) - dasLifecycleManager.Register(restAgg) - - if storageService != nil { - syncConf := &config.RestAggregator.SyncToStorage - var retentionPeriodSeconds uint64 - if uint64(syncConf.RetentionPeriod) == math.MaxUint64 { - retentionPeriodSeconds = math.MaxUint64 - } else { - retentionPeriodSeconds = uint64(syncConf.RetentionPeriod.Seconds()) - } - - // This falls back to REST and updates the local IPFS repo if the data is found. - storageService = NewFallbackStorageService(storageService, restAgg, restAgg, - retentionPeriodSeconds, syncConf.IgnoreWriteErrors, true) - dasLifecycleManager.Register(storageService) - - daReader = storageService - } else { - daReader = restAgg - } + lifecycleManager.Register(restAgg) + daReader = restAgg } + var keysetFetcher *KeysetFetcher if seqInboxAddress != nil { seqInbox, err := bridgegen.NewSequencerInbox(*seqInboxAddress, (*l1Reader).Client()) if err != nil { - return nil, nil, err + return nil, nil, nil, err } - daReader, err = NewChainFetchReaderWithSeqInbox(daReader, seqInbox) + keysetFetcher, err = NewKeysetFetcherWithSeqInbox(seqInbox) if err != nil { - return nil, nil, err + return nil, nil, nil, err } + } - return daReader, dasLifecycleManager, nil + return daReader, keysetFetcher, &lifecycleManager, nil } diff --git a/das/ipfs_storage_service.bkup_go b/das/ipfs_storage_service.bkup_go deleted file mode 100644 index 43b06fd4b6..0000000000 --- a/das/ipfs_storage_service.bkup_go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -// IPFS DAS backend. -// It takes advantage of IPFS' content addressing scheme to be able to directly retrieve -// the batches from IPFS using their root hash from the L1 sequencer inbox contract. - -//go:build ipfs -// +build ipfs - -package das - -import ( - "bytes" - "context" - "errors" - "io" - "math/rand" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - "github.com/ipfs/go-cid" - coreiface "github.com/ipfs/interface-go-ipfs-core" - "github.com/ipfs/interface-go-ipfs-core/options" - "github.com/ipfs/interface-go-ipfs-core/path" - "github.com/multiformats/go-multihash" - "github.com/offchainlabs/nitro/arbstate/daprovider" - "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/cmd/ipfshelper" - "github.com/offchainlabs/nitro/das/dastree" - "github.com/offchainlabs/nitro/util/pretty" - flag "github.com/spf13/pflag" -) - -type IpfsStorageServiceConfig struct { - Enable bool `koanf:"enable"` - RepoDir string `koanf:"repo-dir"` - ReadTimeout time.Duration `koanf:"read-timeout"` - Profiles string `koanf:"profiles"` - Peers []string `koanf:"peers"` - - // Pinning options - PinAfterGet bool `koanf:"pin-after-get"` - PinPercentage float64 `koanf:"pin-percentage"` -} - -var DefaultIpfsStorageServiceConfig = IpfsStorageServiceConfig{ - Enable: false, - RepoDir: "", - ReadTimeout: time.Minute, - Profiles: "", - Peers: []string{}, - - PinAfterGet: true, - PinPercentage: 100.0, -} - -func IpfsStorageServiceConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Bool(prefix+".enable", DefaultIpfsStorageServiceConfig.Enable, "enable storage/retrieval of sequencer batch data from IPFS") - f.String(prefix+".repo-dir", DefaultIpfsStorageServiceConfig.RepoDir, "directory to use to store the local IPFS repo") - f.Duration(prefix+".read-timeout", DefaultIpfsStorageServiceConfig.ReadTimeout, "timeout for IPFS reads, since by default it will wait forever. Treat timeout as not found") - f.String(prefix+".profiles", DefaultIpfsStorageServiceConfig.Profiles, "comma separated list of IPFS profiles to use, see https://docs.ipfs.tech/how-to/default-profile") - f.StringSlice(prefix+".peers", DefaultIpfsStorageServiceConfig.Peers, "list of IPFS peers to connect to, eg /ip4/1.2.3.4/tcp/12345/p2p/abc...xyz") - f.Bool(prefix+".pin-after-get", DefaultIpfsStorageServiceConfig.PinAfterGet, "pin sequencer batch data in IPFS") - f.Float64(prefix+".pin-percentage", DefaultIpfsStorageServiceConfig.PinPercentage, "percent of sequencer batch data to pin, as a floating point number in the range 0.0 to 100.0") -} - -type IpfsStorageService struct { - config IpfsStorageServiceConfig - ipfsHelper *ipfshelper.IpfsHelper - ipfsApi coreiface.CoreAPI -} - -func NewIpfsStorageService(ctx context.Context, config IpfsStorageServiceConfig) (*IpfsStorageService, error) { - ipfsHelper, err := ipfshelper.CreateIpfsHelper(ctx, config.RepoDir, false, config.Peers, config.Profiles) - if err != nil { - return nil, err - } - addrs, err := ipfsHelper.GetPeerHostAddresses() - if err != nil { - return nil, err - } - log.Info("IPFS node started up", "hostAddresses", addrs) - - return &IpfsStorageService{ - config: config, - ipfsHelper: ipfsHelper, - ipfsApi: ipfsHelper.GetAPI(), - }, nil -} - -func hashToCid(hash common.Hash) (cid.Cid, error) { - multiEncodedHashBytes, err := multihash.Encode(hash[:], multihash.KECCAK_256) - if err != nil { - return cid.Cid{}, err - } - - _, multiHash, err := multihash.MHFromBytes(multiEncodedHashBytes) - if err != nil { - return cid.Cid{}, err - } - - return cid.NewCidV1(cid.Raw, multiHash), nil -} - -// GetByHash retrieves and reconstructs one batch's data, using IPFS to retrieve the preimages -// for each chunk of data and the dastree nodes. -func (s *IpfsStorageService) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { - log.Trace("das.IpfsStorageService.GetByHash", "hash", pretty.PrettyHash(hash)) - - doPin := false // If true, pin every block related to this batch - if s.config.PinAfterGet { - if s.config.PinPercentage == 100.0 { - doPin = true - } else if (rand.Float64() * 100.0) <= s.config.PinPercentage { - doPin = true - } - - } - - oracle := func(h common.Hash) ([]byte, error) { - thisCid, err := hashToCid(h) - if err != nil { - return nil, err - } - - ipfsPath := path.IpfsPath(thisCid) - log.Trace("Retrieving IPFS path", "path", ipfsPath.String()) - - parentCtx := ctx - if doPin { - // If we want to pin this batch, then detach from the parent context so - // we are not canceled before s.config.ReadTimeout. - parentCtx = context.Background() - } - - timeoutCtx, cancel := context.WithTimeout(parentCtx, s.config.ReadTimeout) - defer cancel() - rdr, err := s.ipfsApi.Block().Get(timeoutCtx, ipfsPath) - if err != nil { - if timeoutCtx.Err() != nil { - return nil, ErrNotFound - } - return nil, err - } - - data, err := io.ReadAll(rdr) - if err != nil { - return nil, err - } - - if doPin { - go func() { - pinCtx, pinCancel := context.WithTimeout(context.Background(), s.config.ReadTimeout) - defer pinCancel() - err := s.ipfsApi.Pin().Add(pinCtx, ipfsPath) - // Recursive pinning not needed, each dastree preimage fits in a single - // IPFS block. - if err != nil { - // Pinning is best-effort. - log.Warn("Failed to pin in IPFS", "hash", pretty.PrettyHash(hash), "path", ipfsPath.String()) - } else { - log.Trace("Pin in IPFS successful", "hash", pretty.PrettyHash(hash), "path", ipfsPath.String()) - } - }() - } - - return data, nil - } - - return dastree.Content(hash, oracle) -} - -// Put stores all the preimages required to reconstruct the dastree for single batch, -// ie the hashed data chunks and dastree nodes. -// This takes advantage of IPFS supporting keccak256 on raw data blocks for calculating -// its CIDs, and the fact that the dastree structure uses keccak256 for addressing its -// nodes, to directly store the dastree structure in IPFS. -// IPFS default block size is 256KB and dastree max block size is 64KB so each dastree -// node and data chunk easily fits within an IPFS block. -func (s *IpfsStorageService) Put(ctx context.Context, data []byte, timeout uint64) error { - logPut("das.IpfsStorageService.Put", data, timeout, s) - - var chunks [][]byte - - record := func(_ common.Hash, value []byte, ty arbutil.PreimageType) { - chunks = append(chunks, value) - } - - _ = dastree.RecordHash(record, data) - - numChunks := len(chunks) - resultChan := make(chan error, numChunks) - for _, chunk := range chunks { - _chunk := chunk - go func() { - blockStat, err := s.ipfsApi.Block().Put( - ctx, - bytes.NewReader(_chunk), - options.Block.CidCodec("raw"), // Store the data in raw form since the hash in the CID must be the hash - // of the preimage for our lookup scheme to work. - options.Block.Hash(multihash.KECCAK_256, -1), // Use keccak256 to calculate the hash to put in the block's - // CID, since it is the same algo used by dastree. - options.Block.Pin(true)) // Keep the data in the local IPFS repo, don't GC it. - if err == nil { - log.Trace("Wrote IPFS path", "path", blockStat.Path().String()) - } - resultChan <- err - }() - } - - successfullyWrittenChunks := 0 - for err := range resultChan { - if err != nil { - return err - } - successfullyWrittenChunks++ - if successfullyWrittenChunks == numChunks { - return nil - } - } - panic("unreachable") -} - -func (s *IpfsStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { - return daprovider.KeepForever, nil -} - -func (s *IpfsStorageService) Sync(ctx context.Context) error { - return nil -} - -func (s *IpfsStorageService) Close(ctx context.Context) error { - return s.ipfsHelper.Close() -} - -func (s *IpfsStorageService) String() string { - return "IpfsStorageService" -} - -func (s *IpfsStorageService) HealthCheck(ctx context.Context) error { - testData := []byte("Test-Data") - err := s.Put(ctx, testData, 0) - if err != nil { - return err - } - res, err := s.GetByHash(ctx, dastree.Hash(testData)) - if err != nil { - return err - } - if !bytes.Equal(res, testData) { - return errors.New("invalid GetByHash result") - } - return nil -} diff --git a/das/ipfs_storage_service_stub.go b/das/ipfs_storage_service_stub.go deleted file mode 100644 index 5814f2c7e4..0000000000 --- a/das/ipfs_storage_service_stub.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -// IPFS DAS backend stub -// a stub. we don't currently support ipfs - -//go:build !ipfs -// +build !ipfs - -package das - -import ( - "context" - "errors" - - "github.com/ethereum/go-ethereum/common" - "github.com/offchainlabs/nitro/arbstate/daprovider" - flag "github.com/spf13/pflag" -) - -var ErrIpfsNotSupported = errors.New("ipfs not supported") - -type IpfsStorageServiceConfig struct { - Enable bool -} - -var DefaultIpfsStorageServiceConfig = IpfsStorageServiceConfig{ - Enable: false, -} - -func IpfsStorageServiceConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Bool(prefix+".enable", DefaultIpfsStorageServiceConfig.Enable, "legacy option - not supported") -} - -type IpfsStorageService struct { -} - -func NewIpfsStorageService(ctx context.Context, config IpfsStorageServiceConfig) (*IpfsStorageService, error) { - return nil, ErrIpfsNotSupported -} - -func (s *IpfsStorageService) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { - return nil, ErrIpfsNotSupported -} - -func (s *IpfsStorageService) Put(ctx context.Context, data []byte, timeout uint64) error { - return ErrIpfsNotSupported -} - -func (s *IpfsStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { - return daprovider.KeepForever, ErrIpfsNotSupported -} - -func (s *IpfsStorageService) Sync(ctx context.Context) error { - return ErrIpfsNotSupported -} - -func (s *IpfsStorageService) Close(ctx context.Context) error { - return ErrIpfsNotSupported -} - -func (s *IpfsStorageService) String() string { - return "IpfsStorageService-not supported" -} - -func (s *IpfsStorageService) HealthCheck(ctx context.Context) error { - return ErrIpfsNotSupported -} diff --git a/das/ipfs_storage_service_test.go b/das/ipfs_storage_service_test.go deleted file mode 100644 index 6e1a86b234..0000000000 --- a/das/ipfs_storage_service_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -//go:build ipfs -// +build ipfs - -package das - -import ( - "bytes" - "context" - "math" - "math/rand" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/offchainlabs/nitro/das/dastree" -) - -func runAddAndGetTest(t *testing.T, ctx context.Context, svc *IpfsStorageService, size int) { - - data := make([]byte, size) - _, err := rand.Read(data) - Require(t, err) - - err = svc.Put(ctx, data, 0) - Require(t, err) - - hash := dastree.Hash(data).Bytes() - returnedData, err := svc.GetByHash(ctx, common.BytesToHash(hash)) - Require(t, err) - if !bytes.Equal(data, returnedData) { - Fail(t, "Returned data didn't match!") - } - -} - -func TestIpfsStorageServiceAddAndGet(t *testing.T) { - enableLogging() - ctx := context.Background() - svc, err := NewIpfsStorageService(ctx, - IpfsStorageServiceConfig{ - Enable: true, - RepoDir: t.TempDir(), - ReadTimeout: time.Minute, - Profiles: "test", - }) - defer svc.Close(ctx) - Require(t, err) - - pow2Size := 1 << 16 // 64kB - for i := 1; i < 8; i++ { - runAddAndGetTest(t, ctx, svc, int(math.Pow10(i))) - runAddAndGetTest(t, ctx, svc, pow2Size) - runAddAndGetTest(t, ctx, svc, pow2Size-1) - runAddAndGetTest(t, ctx, svc, pow2Size+1) - pow2Size = pow2Size << 1 - } -} diff --git a/das/iterable_storage_service.go b/das/iterable_storage_service.go deleted file mode 100644 index a0829f00e4..0000000000 --- a/das/iterable_storage_service.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package das - -import ( - "context" - "strconv" - "sync" - "sync/atomic" - - "github.com/ethereum/go-ethereum/common" - - "github.com/offchainlabs/nitro/das/dastree" -) - -const iteratorStorageKeyPrefix = "iterator_key_prefix_" -const iteratorBegin = "iterator_begin" -const iteratorEnd = "iterator_end" -const expirationTimeKeyPrefix = "expiration_time_key_prefix_" - -// IterationCompatibleStorageService is a StorageService which is -// compatible to be used as a backend for IterableStorageService. -type IterationCompatibleStorageService interface { - putKeyValue(ctx context.Context, key common.Hash, value []byte) error - StorageService -} - -// IterationCompatibleStorageServiceAdaptor is an adaptor used to covert iteration incompatible StorageService -// to IterationCompatibleStorageService (basically adds an empty putKeyValue to the StorageService) -type IterationCompatibleStorageServiceAdaptor struct { - StorageService -} - -func (i *IterationCompatibleStorageServiceAdaptor) putKeyValue(ctx context.Context, key common.Hash, value []byte) error { - return nil -} - -func ConvertStorageServiceToIterationCompatibleStorageService(storageService StorageService) IterationCompatibleStorageService { - service, ok := storageService.(IterationCompatibleStorageService) - if ok { - return service - } - return &IterationCompatibleStorageServiceAdaptor{storageService} -} - -// An IterableStorageService is used as a wrapper on top of a storage service, -// to add the capability of iterating over the stored date in a sequential manner. -type IterableStorageService struct { - // Local copy of iterator end. End can also be accessed by getByHash for iteratorEnd. - end atomic.Value // atomic access to common.Hash - IterationCompatibleStorageService - - mutex sync.Mutex -} - -func NewIterableStorageService(storageService IterationCompatibleStorageService) *IterableStorageService { - i := &IterableStorageService{IterationCompatibleStorageService: storageService} - i.end.Store(common.Hash{}) - return i -} - -func (i *IterableStorageService) Put(ctx context.Context, data []byte, expiration uint64) error { - dataHash := dastree.Hash(data) - - // Do not insert data if data is already present. - // (This is being done to avoid redundant hash being added to the - // linked list ,since it can lead to loops in the linked list.) - if _, err := i.IterationCompatibleStorageService.GetByHash(ctx, dataHash); err == nil { - return nil - } - - if err := i.IterationCompatibleStorageService.Put(ctx, data, expiration); err != nil { - return err - } - - if err := i.putKeyValue(ctx, dastree.Hash([]byte(expirationTimeKeyPrefix+EncodeStorageServiceKey(dastree.Hash(data)))), []byte(strconv.FormatUint(expiration, 10))); err != nil { - return err - } - - i.mutex.Lock() - defer i.mutex.Unlock() - - endHash := i.End(ctx) - if (endHash == common.Hash{}) { - // First element being inserted in the chain. - if err := i.putKeyValue(ctx, dastree.Hash([]byte(iteratorBegin)), dataHash.Bytes()); err != nil { - return err - } - } else { - if err := i.putKeyValue(ctx, dastree.Hash([]byte(iteratorStorageKeyPrefix+EncodeStorageServiceKey(endHash))), dataHash.Bytes()); err != nil { - return err - } - } - - if err := i.putKeyValue(ctx, dastree.Hash([]byte(iteratorEnd)), dataHash.Bytes()); err != nil { - return err - } - i.end.Store(dataHash) - - return nil -} - -func (i *IterableStorageService) GetExpirationTime(ctx context.Context, hash common.Hash) (uint64, error) { - value, err := i.IterationCompatibleStorageService.GetByHash(ctx, dastree.Hash([]byte(expirationTimeKeyPrefix+EncodeStorageServiceKey(hash)))) - if err != nil { - return 0, err - } - - expirationTime, err := strconv.ParseUint(string(value), 10, 64) - if err != nil { - return 0, err - } - return expirationTime, nil -} - -func (i *IterableStorageService) DefaultBegin() common.Hash { - return dastree.Hash([]byte(iteratorBegin)) -} - -func (i *IterableStorageService) End(ctx context.Context) common.Hash { - endHash, ok := i.end.Load().(common.Hash) - if !ok { - return common.Hash{} - } - if (endHash != common.Hash{}) { - return endHash - } - value, err := i.GetByHash(ctx, dastree.Hash([]byte(iteratorEnd))) - if err != nil { - return common.Hash{} - } - endHash = common.BytesToHash(value) - i.end.Store(endHash) - return endHash -} - -func (i *IterableStorageService) Next(ctx context.Context, hash common.Hash) common.Hash { - if hash != i.DefaultBegin() { - hash = dastree.Hash([]byte(iteratorStorageKeyPrefix + EncodeStorageServiceKey(hash))) - } - value, err := i.GetByHash(ctx, hash) - if err != nil { - return common.Hash{} - } - return common.BytesToHash(value) -} diff --git a/das/local_file_storage_service.go b/das/local_file_storage_service.go index 4ebb1d56d9..8be03bcb30 100644 --- a/das/local_file_storage_service.go +++ b/das/local_file_storage_service.go @@ -22,10 +22,8 @@ import ( ) type LocalFileStorageConfig struct { - Enable bool `koanf:"enable"` - DataDir string `koanf:"data-dir"` - SyncFromStorageService bool `koanf:"sync-from-storage-service"` - SyncToStorageService bool `koanf:"sync-to-storage-service"` + Enable bool `koanf:"enable"` + DataDir string `koanf:"data-dir"` } var DefaultLocalFileStorageConfig = LocalFileStorageConfig{ @@ -35,8 +33,6 @@ var DefaultLocalFileStorageConfig = LocalFileStorageConfig{ func LocalFileStorageConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultLocalFileStorageConfig.Enable, "enable storage/retrieval of sequencer batch data from a directory of files, one per batch") f.String(prefix+".data-dir", DefaultLocalFileStorageConfig.DataDir, "local data directory") - f.Bool(prefix+".sync-from-storage-service", DefaultLocalFileStorageConfig.SyncFromStorageService, "enable local storage to be used as a source for regular sync storage") - f.Bool(prefix+".sync-to-storage-service", DefaultLocalFileStorageConfig.SyncToStorageService, "enable local storage to be used as a sink for regular sync storage") } type LocalFileStorageService struct { @@ -96,32 +92,6 @@ func (s *LocalFileStorageService) Put(ctx context.Context, data []byte, timeout } -func (s *LocalFileStorageService) putKeyValue(ctx context.Context, key common.Hash, value []byte) error { - fileName := EncodeStorageServiceKey(key) - finalPath := s.dataDir + "/" + fileName - - // Use a temp file and rename to achieve atomic writes. - f, err := os.CreateTemp(s.dataDir, fileName) - if err != nil { - return err - } - err = f.Chmod(0o600) - if err != nil { - return err - } - _, err = f.Write(value) - if err != nil { - return err - } - err = f.Close() - if err != nil { - return err - } - - return os.Rename(f.Name(), finalPath) - -} - func (s *LocalFileStorageService) Sync(ctx context.Context) error { return nil } diff --git a/das/memory_backed_storage_service.go b/das/memory_backed_storage_service.go index 91f7d9a2f5..c013b501b9 100644 --- a/das/memory_backed_storage_service.go +++ b/das/memory_backed_storage_service.go @@ -53,16 +53,6 @@ func (m *MemoryBackedStorageService) Put(ctx context.Context, data []byte, expir return nil } -func (m *MemoryBackedStorageService) putKeyValue(ctx context.Context, key common.Hash, value []byte) error { - m.rwmutex.Lock() - defer m.rwmutex.Unlock() - if m.closed { - return ErrClosed - } - m.contents[key] = append([]byte{}, value...) - return nil -} - func (m *MemoryBackedStorageService) Sync(ctx context.Context) error { m.rwmutex.RLock() defer m.rwmutex.RUnlock() diff --git a/das/panic_wrapper.go b/das/panic_wrapper.go index dbb61cba96..3530cb651d 100644 --- a/das/panic_wrapper.go +++ b/das/panic_wrapper.go @@ -26,8 +26,8 @@ func (w *WriterPanicWrapper) String() string { return fmt.Sprintf("WriterPanicWrapper{%v}", w.DataAvailabilityServiceWriter) } -func (w *WriterPanicWrapper) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { - cert, err := w.DataAvailabilityServiceWriter.Store(ctx, message, timeout, sig) +func (w *WriterPanicWrapper) Store(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) { + cert, err := w.DataAvailabilityServiceWriter.Store(ctx, message, timeout) if err != nil { panic(fmt.Sprintf("panic wrapper Store: %v", err)) } diff --git a/das/redis_storage_service.go b/das/redis_storage_service.go index dbd85921ed..210d5cb2d4 100644 --- a/das/redis_storage_service.go +++ b/das/redis_storage_service.go @@ -24,12 +24,10 @@ import ( ) type RedisConfig struct { - Enable bool `koanf:"enable"` - Url string `koanf:"url"` - Expiration time.Duration `koanf:"expiration"` - KeyConfig string `koanf:"key-config"` - SyncFromStorageService bool `koanf:"sync-from-storage-service"` - SyncToStorageService bool `koanf:"sync-to-storage-service"` + Enable bool `koanf:"enable"` + Url string `koanf:"url"` + Expiration time.Duration `koanf:"expiration"` + KeyConfig string `koanf:"key-config"` } var DefaultRedisConfig = RedisConfig{ @@ -43,8 +41,6 @@ func RedisConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".url", DefaultRedisConfig.Url, "Redis url") f.Duration(prefix+".expiration", DefaultRedisConfig.Expiration, "Redis expiration") f.String(prefix+".key-config", DefaultRedisConfig.KeyConfig, "Redis key config") - f.Bool(prefix+".sync-from-storage-service", DefaultRedisConfig.SyncFromStorageService, "enable Redis to be used as a source for regular sync storage") - f.Bool(prefix+".sync-to-storage-service", DefaultRedisConfig.SyncToStorageService, "enable Redis to be used as a sink for regular sync storage") } type RedisStorageService struct { @@ -139,17 +135,6 @@ func (rs *RedisStorageService) Put(ctx context.Context, value []byte, timeout ui return err } -func (rs *RedisStorageService) putKeyValue(ctx context.Context, key common.Hash, value []byte) error { - // Expiration is set to zero here, since we want to keep the index inserted for iterable storage forever. - err := rs.client.Set( - ctx, string(key.Bytes()), rs.signMessage(value), 0, - ).Err() - if err != nil { - log.Error("das.RedisStorageService.putKeyValue", "err", err) - } - return err -} - func (rs *RedisStorageService) Sync(ctx context.Context) error { return rs.baseStorageService.Sync(ctx) } diff --git a/das/regular_sync_storage_test.go b/das/regular_sync_storage_test.go deleted file mode 100644 index 5fed7a90b3..0000000000 --- a/das/regular_sync_storage_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2021-2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package das - -import ( - "bytes" - "context" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - - "github.com/offchainlabs/nitro/das/dastree" -) - -func TestRegularSyncStorage(t *testing.T) { - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - syncFromStorageService := []*IterableStorageService{ - NewIterableStorageService(ConvertStorageServiceToIterationCompatibleStorageService(NewMemoryBackedStorageService(ctx))), - NewIterableStorageService(ConvertStorageServiceToIterationCompatibleStorageService(NewMemoryBackedStorageService(ctx))), - } - syncToStorageService := []StorageService{ - NewMemoryBackedStorageService(ctx), - NewMemoryBackedStorageService(ctx), - } - - regularSyncStorage := NewRegularlySyncStorage( - syncFromStorageService, - syncToStorageService, RegularSyncStorageConfig{ - Enable: true, - SyncInterval: 100 * time.Millisecond, - }) - - val := [][]byte{ - []byte("The first value"), - []byte("The second value"), - []byte("The third value"), - []byte("The forth value"), - } - valKey := []common.Hash{ - dastree.Hash(val[0]), - dastree.Hash(val[1]), - dastree.Hash(val[2]), - dastree.Hash(val[3]), - } - - reqCtx := context.Background() - timeout := uint64(time.Now().Add(time.Hour).Unix()) - for i := 0; i < 2; i++ { - for j := 0; j < 2; j++ { - err := syncFromStorageService[i].Put(reqCtx, val[j], timeout) - Require(t, err) - } - } - - regularSyncStorage.Start(ctx) - time.Sleep(300 * time.Millisecond) - - for i := 0; i < 2; i++ { - for j := 2; j < 4; j++ { - err := syncFromStorageService[i].Put(reqCtx, val[j], timeout) - Require(t, err) - } - } - - time.Sleep(300 * time.Millisecond) - - for i := 0; i < 2; i++ { - for j := 0; j < 4; j++ { - v, err := syncToStorageService[i].GetByHash(reqCtx, valKey[j]) - Require(t, err) - if !bytes.Equal(v, val[j]) { - t.Fatal(v, val[j]) - } - } - } -} diff --git a/das/regularly_sync_storage.go b/das/regularly_sync_storage.go deleted file mode 100644 index c6b8ed5ea1..0000000000 --- a/das/regularly_sync_storage.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package das - -import ( - "context" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - - "github.com/offchainlabs/nitro/util/stopwaiter" - - flag "github.com/spf13/pflag" -) - -type RegularSyncStorageConfig struct { - Enable bool `koanf:"enable"` - SyncInterval time.Duration `koanf:"sync-interval"` -} - -var DefaultRegularSyncStorageConfig = RegularSyncStorageConfig{ - Enable: false, - SyncInterval: 5 * time.Minute, -} - -func RegularSyncStorageConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Bool(prefix+".enable", DefaultRegularSyncStorageConfig.Enable, "enable regular storage syncing") - f.Duration(prefix+".sync-interval", DefaultRegularSyncStorageConfig.SyncInterval, "interval for running regular storage sync") -} - -// A RegularlySyncStorage is used to sync data from syncFromStorageServices to -// all the syncToStorageServices at regular intervals. -// (Only newly added data since the last sync is copied over.) -type RegularlySyncStorage struct { - stopwaiter.StopWaiter - syncFromStorageServices []*IterableStorageService - syncToStorageServices []StorageService - lastSyncedHashOfEachSyncFromStorageService map[*IterableStorageService]common.Hash - syncInterval time.Duration -} - -func NewRegularlySyncStorage(syncFromStorageServices []*IterableStorageService, syncToStorageServices []StorageService, conf RegularSyncStorageConfig) *RegularlySyncStorage { - lastSyncedHashOfEachSyncFromStorageService := make(map[*IterableStorageService]common.Hash) - for _, syncFrom := range syncFromStorageServices { - lastSyncedHashOfEachSyncFromStorageService[syncFrom] = syncFrom.DefaultBegin() - } - return &RegularlySyncStorage{ - syncFromStorageServices: syncFromStorageServices, - syncToStorageServices: syncToStorageServices, - lastSyncedHashOfEachSyncFromStorageService: lastSyncedHashOfEachSyncFromStorageService, - syncInterval: conf.SyncInterval, - } -} - -func (r *RegularlySyncStorage) Start(ctx context.Context) { - // Start thread for regular sync - r.StopWaiter.Start(ctx, r) - r.CallIteratively(r.syncAllStorages) -} - -func (r *RegularlySyncStorage) syncAllStorages(ctx context.Context) time.Duration { - for syncFrom, lastSyncedHash := range r.lastSyncedHashOfEachSyncFromStorageService { - end := syncFrom.End(ctx) - if (end == common.Hash{}) { - continue - } - - syncHash := lastSyncedHash - for syncHash != end { - syncHash = syncFrom.Next(ctx, syncHash) - data, err := syncFrom.GetByHash(ctx, syncHash) - if err != nil { - continue - } - expirationTime, err := syncFrom.GetExpirationTime(ctx, syncHash) - if err != nil { - continue - } - for _, syncTo := range r.syncToStorageServices { - _, err = syncTo.GetByHash(ctx, syncHash) - if err == nil { - continue - } - - if err = syncTo.Put(ctx, data, expirationTime); err != nil { - log.Error("Error while running regular storage sync", "err", err) - } - } - } - r.lastSyncedHashOfEachSyncFromStorageService[syncFrom] = end - } - return r.syncInterval -} diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go index 490116a89a..24a470be5b 100644 --- a/das/rpc_aggregator.go +++ b/das/rpc_aggregator.go @@ -12,72 +12,114 @@ import ( "math/bits" "net/url" + "github.com/knadh/koanf" + "github.com/knadh/koanf/providers/confmap" "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util/metricsutil" + "github.com/offchainlabs/nitro/util/signature" "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbutil" ) type BackendConfig struct { - URL string `json:"url"` - PubKeyBase64Encoded string `json:"pubkey"` - SignerMask uint64 `json:"signermask"` + URL string `koanf:"url" json:"url"` + Pubkey string `koanf:"pubkey" json:"pubkey"` } -func NewRPCAggregator(ctx context.Context, config DataAvailabilityConfig) (*Aggregator, error) { - services, err := ParseServices(config.RPCAggregator) +type BackendConfigList []BackendConfig + +func (l *BackendConfigList) String() string { + b, _ := json.Marshal(*l) + return string(b) +} + +func (l *BackendConfigList) Set(value string) error { + return l.UnmarshalJSON([]byte(value)) +} + +func (l *BackendConfigList) UnmarshalJSON(data []byte) error { + var tmp []BackendConfig + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + *l = tmp + return nil +} + +func (l *BackendConfigList) Type() string { + return "backendConfigList" +} + +func FixKeysetCLIParsing(path string, k *koanf.Koanf) error { + rawBackends := k.Get(path) + if bk, ok := rawBackends.(string); ok { + err := parsedBackendsConf.UnmarshalJSON([]byte(bk)) + if err != nil { + return err + } + + // Create a map with the parsed backend configurations + tempMap := map[string]interface{}{ + path: parsedBackendsConf, + } + + // Load the map into koanf + if err = k.Load(confmap.Provider(tempMap, "."), nil); err != nil { + return err + } + + } + return nil +} + +func NewRPCAggregator(ctx context.Context, config DataAvailabilityConfig, signer signature.DataSignerFunc) (*Aggregator, error) { + services, err := ParseServices(config.RPCAggregator, signer) if err != nil { return nil, err } return NewAggregator(ctx, config, services) } -func NewRPCAggregatorWithL1Info(config DataAvailabilityConfig, l1client arbutil.L1Interface, seqInboxAddress common.Address) (*Aggregator, error) { - services, err := ParseServices(config.RPCAggregator) +func NewRPCAggregatorWithL1Info(config DataAvailabilityConfig, l1client arbutil.L1Interface, seqInboxAddress common.Address, signer signature.DataSignerFunc) (*Aggregator, error) { + services, err := ParseServices(config.RPCAggregator, signer) if err != nil { return nil, err } return NewAggregatorWithL1Info(config, services, l1client, seqInboxAddress) } -func NewRPCAggregatorWithSeqInboxCaller(config DataAvailabilityConfig, seqInboxCaller *bridgegen.SequencerInboxCaller) (*Aggregator, error) { - services, err := ParseServices(config.RPCAggregator) +func NewRPCAggregatorWithSeqInboxCaller(config DataAvailabilityConfig, seqInboxCaller *bridgegen.SequencerInboxCaller, signer signature.DataSignerFunc) (*Aggregator, error) { + services, err := ParseServices(config.RPCAggregator, signer) if err != nil { return nil, err } return NewAggregatorWithSeqInboxCaller(config, services, seqInboxCaller) } -func ParseServices(config AggregatorConfig) ([]ServiceDetails, error) { - var cs []BackendConfig - err := json.Unmarshal([]byte(config.Backends), &cs) - if err != nil { - return nil, err - } - +func ParseServices(config AggregatorConfig, signer signature.DataSignerFunc) ([]ServiceDetails, error) { var services []ServiceDetails - for _, b := range cs { + for i, b := range config.Backends { url, err := url.Parse(b.URL) if err != nil { return nil, err } metricName := metricsutil.CanonicalizeMetricName(url.Hostname()) - service, err := NewDASRPCClient(b.URL) + service, err := NewDASRPCClient(b.URL, signer, config.MaxStoreChunkBodySize) if err != nil { return nil, err } - pubKey, err := DecodeBase64BLSPublicKey([]byte(b.PubKeyBase64Encoded)) + pubKey, err := DecodeBase64BLSPublicKey([]byte(b.Pubkey)) if err != nil { return nil, err } - d, err := NewServiceDetails(service, *pubKey, b.SignerMask, metricName) + d, err := NewServiceDetails(service, *pubKey, 1<= 64 { - return crypto.VerifySignature(pubkey, dasStoreHash(message, timeout), sig[:64]) - } - return false - } - } - return &SignAfterStoreDASWriter{ - privKey: privKey, - pubKey: &publicKey, - keysetHash: ksHash, - keysetBytes: ksBuf.Bytes(), - storageService: storageService, - addrVerifier: addrVerifier, - extraBpVerifier: extraBpVerifier, + privKey: privKey, + pubKey: &publicKey, + keysetHash: ksHash, + keysetBytes: ksBuf.Bytes(), + storageService: storageService, }, nil } -func (d *SignAfterStoreDASWriter) Store( - ctx context.Context, message []byte, timeout uint64, sig []byte, -) (c *daprovider.DataAvailabilityCertificate, err error) { - log.Trace("das.SignAfterStoreDASWriter.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig), "this", d) - var verified bool - if d.extraBpVerifier != nil { - verified = d.extraBpVerifier(message, timeout, sig) - } - - if !verified && d.addrVerifier != nil { - actualSigner, err := DasRecoverSigner(message, timeout, sig) - if err != nil { - return nil, err - } - isBatchPosterOrSequencer, err := d.addrVerifier.IsBatchPosterOrSequencer(ctx, actualSigner) - if err != nil { - return nil, err - } - if !isBatchPosterOrSequencer { - return nil, errors.New("store request not properly signed") - } - } - +func (d *SignAfterStoreDASWriter) Store(ctx context.Context, message []byte, timeout uint64) (c *daprovider.DataAvailabilityCertificate, err error) { + log.Trace("das.SignAfterStoreDASWriter.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "this", d) c = &daprovider.DataAvailabilityCertificate{ Timeout: timeout, DataHash: dastree.Hash(message), diff --git a/das/signature_verifier.go b/das/signature_verifier.go new file mode 100644 index 0000000000..0aa42bceb6 --- /dev/null +++ b/das/signature_verifier.go @@ -0,0 +1,126 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package das + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "os" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/solgen/go/bridgegen" + "github.com/offchainlabs/nitro/util/contracts" +) + +// SignatureVerifier.Store will try to verify that the passed-in data's signature +// is from the batch poster, or from an injectable verification method. +type SignatureVerifier struct { + addrVerifier *contracts.AddressVerifier + + // Extra batch poster verifier, for local installations to have their + // own way of testing Stores. + extraBpVerifier func(message []byte, sig []byte, extraFields ...uint64) bool +} + +func NewSignatureVerifier(ctx context.Context, config DataAvailabilityConfig) (*SignatureVerifier, error) { + if config.ParentChainNodeURL == "none" { + return NewSignatureVerifierWithSeqInboxCaller(nil, config.ExtraSignatureCheckingPublicKey) + } + l1client, err := GetL1Client(ctx, config.ParentChainConnectionAttempts, config.ParentChainNodeURL) + if err != nil { + return nil, err + } + seqInboxAddress, err := OptionalAddressFromString(config.SequencerInboxAddress) + if err != nil { + return nil, err + } + if seqInboxAddress == nil { + return NewSignatureVerifierWithSeqInboxCaller(nil, config.ExtraSignatureCheckingPublicKey) + } + + seqInboxCaller, err := bridgegen.NewSequencerInboxCaller(*seqInboxAddress, l1client) + if err != nil { + return nil, err + } + return NewSignatureVerifierWithSeqInboxCaller(seqInboxCaller, config.ExtraSignatureCheckingPublicKey) + +} + +func NewSignatureVerifierWithSeqInboxCaller( + seqInboxCaller *bridgegen.SequencerInboxCaller, + extraSignatureCheckingPublicKey string, +) (*SignatureVerifier, error) { + var addrVerifier *contracts.AddressVerifier + if seqInboxCaller != nil { + addrVerifier = contracts.NewAddressVerifier(seqInboxCaller) + } + + var extraBpVerifier func(message []byte, sig []byte, extraFeilds ...uint64) bool + if extraSignatureCheckingPublicKey != "" { + var pubkey []byte + var err error + if extraSignatureCheckingPublicKey[:2] == "0x" { + pubkey, err = hex.DecodeString(extraSignatureCheckingPublicKey[2:]) + if err != nil { + return nil, err + } + } else { + pubkeyEncoded, err := os.ReadFile(extraSignatureCheckingPublicKey) + if err != nil { + return nil, err + } + pubkey, err = hex.DecodeString(string(pubkeyEncoded)) + if err != nil { + return nil, err + } + } + extraBpVerifier = func(message []byte, sig []byte, extraFields ...uint64) bool { + if len(sig) >= 64 { + return crypto.VerifySignature(pubkey, dasStoreHash(message, extraFields...), sig[:64]) + } + return false + } + } + + return &SignatureVerifier{ + addrVerifier: addrVerifier, + extraBpVerifier: extraBpVerifier, + }, nil + +} + +func (v *SignatureVerifier) verify( + ctx context.Context, message []byte, sig []byte, extraFields ...uint64) error { + if v.extraBpVerifier == nil && v.addrVerifier == nil { + return errors.New("no signature verification method configured") + } + + var verified bool + if v.extraBpVerifier != nil { + verified = v.extraBpVerifier(message, sig, extraFields...) + } + + if !verified && v.addrVerifier != nil { + actualSigner, err := DasRecoverSigner(message, sig, extraFields...) + if err != nil { + return err + } + verified, err = v.addrVerifier.IsBatchPosterOrSequencer(ctx, actualSigner) + if err != nil { + return err + } + } + if !verified { + return errors.New("request not properly signed") + } + return nil +} + +func (v *SignatureVerifier) String() string { + hasAddrVerifier := v.addrVerifier != nil + hasExtraBpVerifier := v.extraBpVerifier != nil + return fmt.Sprintf("SignatureVerifier{hasAddrVerifier:%v,hasExtraBpVerifier:%v}", hasAddrVerifier, hasExtraBpVerifier) +} diff --git a/das/store_signing.go b/das/store_signing.go index 8ebc1a9805..eac25e48b0 100644 --- a/das/store_signing.go +++ b/das/store_signing.go @@ -4,71 +4,35 @@ package das import ( - "context" "encoding/binary" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" - "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/signature" ) var uniquifyingPrefix = []byte("Arbitrum Nitro DAS API Store:") -func applyDasSigner(signer signature.DataSignerFunc, data []byte, timeout uint64) ([]byte, error) { - return signer(dasStoreHash(data, timeout)) +func applyDasSigner(signer signature.DataSignerFunc, data []byte, extraFields ...uint64) ([]byte, error) { + return signer(dasStoreHash(data, extraFields...)) } -func DasRecoverSigner(data []byte, timeout uint64, sig []byte) (common.Address, error) { - pk, err := crypto.SigToPub(dasStoreHash(data, timeout), sig) +func DasRecoverSigner(data []byte, sig []byte, extraFields ...uint64) (common.Address, error) { + pk, err := crypto.SigToPub(dasStoreHash(data, extraFields...), sig) if err != nil { return common.Address{}, err } return crypto.PubkeyToAddress(*pk), nil } -func dasStoreHash(data []byte, timeout uint64) []byte { - var buf8 [8]byte - binary.BigEndian.PutUint64(buf8[:], timeout) - return dastree.HashBytes(uniquifyingPrefix, buf8[:], data) -} - -type StoreSigningDAS struct { - DataAvailabilityServiceWriter - signer signature.DataSignerFunc - addr common.Address -} +func dasStoreHash(data []byte, extraFields ...uint64) []byte { + var buf []byte -func NewStoreSigningDAS(inner DataAvailabilityServiceWriter, signer signature.DataSignerFunc) (DataAvailabilityServiceWriter, error) { - sig, err := applyDasSigner(signer, []byte{}, 0) - if err != nil { - return nil, err + for _, field := range extraFields { + buf = binary.BigEndian.AppendUint64(buf, field) } - addr, err := DasRecoverSigner([]byte{}, 0, sig) - if err != nil { - return nil, err - } - return &StoreSigningDAS{inner, signer, addr}, nil -} - -func (s *StoreSigningDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { - log.Trace("das.StoreSigningDAS.Store(...)", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig), "this", s) - mySig, err := applyDasSigner(s.signer, message, timeout) - if err != nil { - return nil, err - } - return s.DataAvailabilityServiceWriter.Store(ctx, message, timeout, mySig) -} - -func (s *StoreSigningDAS) String() string { - return "StoreSigningDAS (" + s.SignerAddress().Hex() + " ," + s.DataAvailabilityServiceWriter.String() + ")" -} -func (s *StoreSigningDAS) SignerAddress() common.Address { - return s.addr + return dastree.HashBytes(uniquifyingPrefix, buf, data) } diff --git a/das/store_signing_test.go b/das/store_signing_test.go index 33b94f66e1..a50d1c37f4 100644 --- a/das/store_signing_test.go +++ b/das/store_signing_test.go @@ -25,7 +25,7 @@ func TestStoreSigning(t *testing.T) { sig, err := applyDasSigner(signer, weirdMessage, timeout) Require(t, err) - recoveredAddr, err := DasRecoverSigner(weirdMessage, timeout, sig) + recoveredAddr, err := DasRecoverSigner(weirdMessage, sig, timeout) Require(t, err) if recoveredAddr != addr { diff --git a/das/syncing_fallback_storage.go b/das/syncing_fallback_storage.go index 411e7a1977..3f4f2765b5 100644 --- a/das/syncing_fallback_storage.go +++ b/das/syncing_fallback_storage.go @@ -6,7 +6,6 @@ package das import ( "context" "encoding/binary" - "errors" "fmt" "math" "math/big" @@ -57,7 +56,6 @@ func init() { } type SyncToStorageConfig struct { - CheckAlreadyExists bool `koanf:"check-already-exists"` Eager bool `koanf:"eager"` EagerLowerBoundBlock uint64 `koanf:"eager-lower-bound-block"` RetentionPeriod time.Duration `koanf:"retention-period"` @@ -68,7 +66,6 @@ type SyncToStorageConfig struct { } var DefaultSyncToStorageConfig = SyncToStorageConfig{ - CheckAlreadyExists: true, Eager: false, EagerLowerBoundBlock: 0, RetentionPeriod: time.Duration(math.MaxInt64), @@ -79,7 +76,6 @@ var DefaultSyncToStorageConfig = SyncToStorageConfig{ } func SyncToStorageConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Bool(prefix+".check-already-exists", DefaultSyncToStorageConfig.CheckAlreadyExists, "check if the data already exists in this DAS's storage. Must be disabled for fast sync with an IPFS backend") f.Bool(prefix+".eager", DefaultSyncToStorageConfig.Eager, "eagerly sync batch data to this DAS's storage from the rest endpoints, using L1 as the index of batch data hashes; otherwise only sync lazily") f.Uint64(prefix+".eager-lower-bound-block", DefaultSyncToStorageConfig.EagerLowerBoundBlock, "when eagerly syncing, start indexing forward from this L1 block. Only used if there is no sync state") f.Uint64(prefix+".parent-chain-blocks-per-read", DefaultSyncToStorageConfig.ParentChainBlocksPerRead, "when eagerly syncing, max l1 blocks to read per poll") @@ -92,9 +88,10 @@ func SyncToStorageConfigAddOptions(prefix string, f *flag.FlagSet) { type l1SyncService struct { stopwaiter.StopWaiter - config SyncToStorageConfig - syncTo StorageService - dataSource daprovider.DASReader + config SyncToStorageConfig + syncTo StorageService + dataSource daprovider.DASReader + keysetFetcher *KeysetFetcher l1Reader *headerreader.HeaderReader inboxContract *bridgegen.SequencerInbox @@ -106,7 +103,9 @@ type l1SyncService struct { lastBatchAcc common.Hash } -const nextBlockNoFilename = "nextBlockNumber" +// The original syncing process had a bug, so the file was renamed to cause any mirrors +// in the wild to re-sync from their configured starting block number. +const nextBlockNoFilename = "nextBlockNumberV2" func readSyncStateOrDefault(syncDir string, dflt uint64) uint64 { if syncDir == "" { @@ -167,8 +166,7 @@ func newl1SyncService(config *SyncToStorageConfig, syncTo StorageService, dataSo if err != nil { return nil, err } - // make sure that as we sync, any Keysets missing from dataSource will fetched from the L1 chain - dataSource, err = NewChainFetchReader(dataSource, l1Client, inboxAddr) + keysetFetcher, err := NewKeysetFetcher(l1Client, inboxAddr) if err != nil { return nil, err } @@ -176,6 +174,7 @@ func newl1SyncService(config *SyncToStorageConfig, syncTo StorageService, dataSo config: *config, syncTo: syncTo, dataSource: dataSource, + keysetFetcher: keysetFetcher, l1Reader: l1Reader, inboxContract: inboxContract, inboxAddr: inboxAddr, @@ -212,31 +211,18 @@ func (s *l1SyncService) processBatchDelivered(ctx context.Context, batchDelivere binary.BigEndian.PutUint64(header[32:40], deliveredEvent.AfterDelayedMessagesRead.Uint64()) data = append(header, data...) - preimages := make(map[arbutil.PreimageType]map[common.Hash][]byte) - preimageRecorder := daprovider.RecordPreimagesTo(preimages) - if _, err = daprovider.RecoverPayloadFromDasBatch(ctx, deliveredEvent.BatchSequenceNumber.Uint64(), data, s.dataSource, preimageRecorder, true); err != nil { - if errors.Is(err, daprovider.ErrSeqMsgValidation) { - log.Error(err.Error()) - } else { - log.Error("recover payload failed", "txhash", batchDeliveredLog.TxHash, "data", data) - return err - } + var payload []byte + if payload, err = daprovider.RecoverPayloadFromDasBatch(ctx, deliveredEvent.BatchSequenceNumber.Uint64(), data, s.dataSource, s.keysetFetcher, nil, true); err != nil { + log.Error("recover payload failed", "txhash", batchDeliveredLog.TxHash, "data", data) + return err } - for _, preimages := range preimages { - for hash, contents := range preimages { - var err error - if s.config.CheckAlreadyExists { - _, err = s.syncTo.GetByHash(ctx, hash) - } - if err == nil || errors.Is(err, ErrNotFound) { - if err := s.syncTo.Put(ctx, contents, storeUntil); err != nil { - return err - } - } else { - return err - } + + if payload != nil { + if err := s.syncTo.Put(ctx, payload, storeUntil); err != nil { + return err } } + seqNumber := deliveredEvent.BatchSequenceNumber if seqNumber == nil { seqNumber = common.Big0 diff --git a/execution/gethexec/blockchain.go b/execution/gethexec/blockchain.go index 2a20c3da26..1d5060ca8a 100644 --- a/execution/gethexec/blockchain.go +++ b/execution/gethexec/blockchain.go @@ -37,6 +37,7 @@ type CachingConfig struct { SnapshotRestoreGasLimit uint64 `koanf:"snapshot-restore-gas-limit"` MaxNumberOfBlocksToSkipStateSaving uint32 `koanf:"max-number-of-blocks-to-skip-state-saving"` MaxAmountOfGasToSkipStateSaving uint64 `koanf:"max-amount-of-gas-to-skip-state-saving"` + StylusLRUCache uint32 `koanf:"stylus-lru-cache"` } func CachingConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -51,6 +52,7 @@ func CachingConfigAddOptions(prefix string, f *flag.FlagSet) { f.Uint64(prefix+".snapshot-restore-gas-limit", DefaultCachingConfig.SnapshotRestoreGasLimit, "maximum gas rolled back to recover snapshot") f.Uint32(prefix+".max-number-of-blocks-to-skip-state-saving", DefaultCachingConfig.MaxNumberOfBlocksToSkipStateSaving, "maximum number of blocks to skip state saving to persistent storage (archive node only) -- warning: this option seems to cause issues") f.Uint64(prefix+".max-amount-of-gas-to-skip-state-saving", DefaultCachingConfig.MaxAmountOfGasToSkipStateSaving, "maximum amount of gas in blocks to skip saving state to Persistent storage (archive node only) -- warning: this option seems to cause issues") + f.Uint32(prefix+".stylus-lru-cache", DefaultCachingConfig.StylusLRUCache, "initialized stylus programs to keep in LRU cache") } var DefaultCachingConfig = CachingConfig{ @@ -65,6 +67,22 @@ var DefaultCachingConfig = CachingConfig{ SnapshotRestoreGasLimit: 300_000_000_000, MaxNumberOfBlocksToSkipStateSaving: 0, MaxAmountOfGasToSkipStateSaving: 0, + StylusLRUCache: 256, +} + +var TestCachingConfig = CachingConfig{ + Archive: false, + BlockCount: 128, + BlockAge: 30 * time.Minute, + TrieTimeLimit: time.Hour, + TrieDirtyCache: 1024, + TrieCleanCache: 600, + SnapshotCache: 400, + DatabaseCache: 2048, + SnapshotRestoreGasLimit: 300_000_000_000, + MaxNumberOfBlocksToSkipStateSaving: 0, + MaxAmountOfGasToSkipStateSaving: 0, + StylusLRUCache: 0, } // TODO remove stack from parameters as it is no longer needed here diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 6fdc88a8b2..5f8bf43ea1 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -1,6 +1,9 @@ // Copyright 2022-2024, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE +//go:build !wasm +// +build !wasm + package gethexec /* @@ -10,10 +13,15 @@ package gethexec */ import "C" import ( + "bytes" "context" "encoding/binary" "errors" "fmt" + "os" + "path" + "runtime/pprof" + "runtime/trace" "sync" "testing" "time" @@ -25,10 +33,12 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" + "github.com/google/uuid" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/l1pricing" + "github.com/offchainlabs/nitro/arbos/programs" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/util/arbmath" @@ -37,6 +47,7 @@ import ( ) var ( + l1GasPriceEstimateGauge = metrics.NewRegisteredGauge("arb/l1gasprice/estimate", nil) baseFeeGauge = metrics.NewRegisteredGauge("arb/block/basefee", nil) blockGasUsedHistogram = metrics.NewRegisteredHistogram("arb/block/gasused", nil, metrics.NewBoundedHistogramSample()) txCountHistogram = metrics.NewRegisteredHistogram("arb/block/transactions/count", nil, metrics.NewBoundedHistogramSample()) @@ -44,6 +55,20 @@ var ( gasUsedSinceStartupCounter = metrics.NewRegisteredCounter("arb/gas_used", nil) ) +type L1PriceDataOfMsg struct { + callDataUnits uint64 + cummulativeCallDataUnits uint64 + l1GasCharged uint64 + cummulativeL1GasCharged uint64 +} + +type L1PriceData struct { + mutex sync.RWMutex + startOfL1PriceDataCache arbutil.MessageIndex + endOfL1PriceDataCache arbutil.MessageIndex + msgToL1PriceData []L1PriceDataOfMsg +} + type ExecutionEngine struct { stopwaiter.StopWaiter @@ -63,16 +88,74 @@ type ExecutionEngine struct { reorgSequencing bool prefetchBlock bool + + cachedL1PriceData *L1PriceData +} + +func NewL1PriceData() *L1PriceData { + return &L1PriceData{ + msgToL1PriceData: []L1PriceDataOfMsg{}, + } } func NewExecutionEngine(bc *core.BlockChain) (*ExecutionEngine, error) { return &ExecutionEngine{ - bc: bc, - resequenceChan: make(chan []*arbostypes.MessageWithMetadata), - newBlockNotifier: make(chan struct{}, 1), + bc: bc, + resequenceChan: make(chan []*arbostypes.MessageWithMetadata), + newBlockNotifier: make(chan struct{}, 1), + cachedL1PriceData: NewL1PriceData(), }, nil } +func (s *ExecutionEngine) backlogCallDataUnits() uint64 { + s.cachedL1PriceData.mutex.RLock() + defer s.cachedL1PriceData.mutex.RUnlock() + + size := len(s.cachedL1PriceData.msgToL1PriceData) + if size == 0 { + return 0 + } + return (s.cachedL1PriceData.msgToL1PriceData[size-1].cummulativeCallDataUnits - + s.cachedL1PriceData.msgToL1PriceData[0].cummulativeCallDataUnits + + s.cachedL1PriceData.msgToL1PriceData[0].callDataUnits) +} + +func (s *ExecutionEngine) backlogL1GasCharged() uint64 { + s.cachedL1PriceData.mutex.RLock() + defer s.cachedL1PriceData.mutex.RUnlock() + + size := len(s.cachedL1PriceData.msgToL1PriceData) + if size == 0 { + return 0 + } + return (s.cachedL1PriceData.msgToL1PriceData[size-1].cummulativeL1GasCharged - + s.cachedL1PriceData.msgToL1PriceData[0].cummulativeL1GasCharged + + s.cachedL1PriceData.msgToL1PriceData[0].l1GasCharged) +} + +func (s *ExecutionEngine) MarkFeedStart(to arbutil.MessageIndex) { + s.cachedL1PriceData.mutex.Lock() + defer s.cachedL1PriceData.mutex.Unlock() + + if to < s.cachedL1PriceData.startOfL1PriceDataCache { + log.Info("trying to trim older cache which doesnt exist anymore") + } else if to >= s.cachedL1PriceData.endOfL1PriceDataCache { + s.cachedL1PriceData.startOfL1PriceDataCache = 0 + s.cachedL1PriceData.endOfL1PriceDataCache = 0 + s.cachedL1PriceData.msgToL1PriceData = []L1PriceDataOfMsg{} + } else { + newStart := to - s.cachedL1PriceData.startOfL1PriceDataCache + 1 + s.cachedL1PriceData.msgToL1PriceData = s.cachedL1PriceData.msgToL1PriceData[newStart:] + s.cachedL1PriceData.startOfL1PriceDataCache = to + 1 + } +} + +func (s *ExecutionEngine) Initialize(rustCacheSize uint32) { + if rustCacheSize != 0 { + programs.ResizeWasmLruCache(rustCacheSize) + } +} + func (s *ExecutionEngine) SetRecorder(recorder *BlockRecorder) { if s.Started() { panic("trying to set recorder after start") @@ -117,7 +200,7 @@ func (s *ExecutionEngine) GetBatchFetcher() execution.BatchFetcher { return s.consensus } -func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) ([]*execution.MessageResult, error) { +func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadataAndBlockHash, oldMessages []*arbostypes.MessageWithMetadata) ([]*execution.MessageResult, error) { if count == 0 { return nil, errors.New("cannot reorg out genesis") } @@ -138,8 +221,9 @@ func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbost return nil, nil } + tag := s.bc.StateCache().WasmCacheTag() // reorg Rust-side VM state - C.stylus_reorg_vm(C.uint64_t(blockNum)) + C.stylus_reorg_vm(C.uint64_t(blockNum), C.uint32_t(tag)) err := s.bc.ReorgToOldBlock(targetBlock) if err != nil { @@ -150,9 +234,9 @@ func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbost for i := range newMessages { var msgForPrefetch *arbostypes.MessageWithMetadata if i < len(newMessages)-1 { - msgForPrefetch = &newMessages[i] + msgForPrefetch = &newMessages[i].MessageWithMeta } - msgResult, err := s.digestMessageWithBlockMutex(count+arbutil.MessageIndex(i), &newMessages[i], msgForPrefetch) + msgResult, err := s.digestMessageWithBlockMutex(count+arbutil.MessageIndex(i), &newMessages[i].MessageWithMeta, msgForPrefetch) if err != nil { return nil, err } @@ -198,7 +282,7 @@ func (s *ExecutionEngine) NextDelayedMessageNumber() (uint64, error) { return currentHeader.Nonce.Uint64(), nil } -func messageFromTxes(header *arbostypes.L1IncomingMessageHeader, txes types.Transactions, txErrors []error) (*arbostypes.L1IncomingMessage, error) { +func MessageFromTxes(header *arbostypes.L1IncomingMessageHeader, txes types.Transactions, txErrors []error) (*arbostypes.L1IncomingMessage, error) { var l2Message []byte if len(txes) == 1 && txErrors[0] == nil { txBytes, err := txes[0].MarshalBinary() @@ -224,6 +308,9 @@ func messageFromTxes(header *arbostypes.L1IncomingMessageHeader, txes types.Tran l2Message = append(l2Message, txBytes...) } } + if len(l2Message) > arbostypes.MaxL2MessageSize { + return nil, errors.New("l2message too long") + } return &arbostypes.L1IncomingMessage{ Header: header, L2msg: l2Message, @@ -419,6 +506,44 @@ func (s *ExecutionEngine) SequenceTransactionsEspresso( }) } +// SequenceTransactionsWithProfiling runs SequenceTransactions with tracing and +// CPU profiling enabled. If the block creation takes longer than 2 seconds, it +// keeps both and prints out filenames in an error log line. +func (s *ExecutionEngine) SequenceTransactionsWithProfiling(header *arbostypes.L1IncomingMessageHeader, txes types.Transactions, hooks *arbos.SequencingHooks) (*types.Block, error) { + pprofBuf, traceBuf := bytes.NewBuffer(nil), bytes.NewBuffer(nil) + if err := pprof.StartCPUProfile(pprofBuf); err != nil { + log.Error("Starting CPU profiling", "error", err) + } + if err := trace.Start(traceBuf); err != nil { + log.Error("Starting tracing", "error", err) + } + start := time.Now() + res, err := s.SequenceTransactions(header, txes, hooks) + elapsed := time.Since(start) + pprof.StopCPUProfile() + trace.Stop() + if elapsed > 2*time.Second { + writeAndLog(pprofBuf, traceBuf) + return res, err + } + return res, err +} + +func writeAndLog(pprof, trace *bytes.Buffer) { + id := uuid.NewString() + pprofFile := path.Join(os.TempDir(), id+".pprof") + if err := os.WriteFile(pprofFile, pprof.Bytes(), 0o600); err != nil { + log.Error("Creating temporary file for pprof", "fileName", pprofFile, "error", err) + return + } + traceFile := path.Join(os.TempDir(), id+".trace") + if err := os.WriteFile(traceFile, trace.Bytes(), 0o600); err != nil { + log.Error("Creating temporary file for trace", "fileName", traceFile, "error", err) + return + } + log.Info("Transactions sequencing took longer than 2 seconds, created pprof and trace files", "pprof", pprofFile, "traceFile", traceFile) +} + func (s *ExecutionEngine) sequenceTransactionsWithBlockMutex(header *arbostypes.L1IncomingMessageHeader, txes types.Transactions, hooks *arbos.SequencingHooks) (*types.Block, error) { lastBlockHeader, err := s.getCurrentHeader() if err != nil { @@ -468,7 +593,7 @@ func (s *ExecutionEngine) sequenceTransactionsWithBlockMutex(header *arbostypes. return nil, nil } - msg, err := messageFromTxes(header, txes, hooks.TxErrors) + msg, err := MessageFromTxes(header, txes, hooks.TxErrors) if err != nil { return nil, err } @@ -498,8 +623,7 @@ func (s *ExecutionEngine) sequenceTransactionsWithBlockMutex(header *arbostypes. if err != nil { return nil, err } - - s.cacheL1PriceDataOfMsg(pos, receipts, block) + s.cacheL1PriceDataOfMsg(pos, receipts, block, false) return block, nil } @@ -519,7 +643,7 @@ func (s *ExecutionEngine) sequenceDelayedMessageWithBlockMutex(message *arbostyp expectedDelayed := currentHeader.Nonce.Uint64() - lastMsg, err := s.BlockNumberToMessageIndex(currentHeader.Number.Uint64()) + pos, err := s.BlockNumberToMessageIndex(currentHeader.Number.Uint64() + 1) if err != nil { return nil, err } @@ -545,7 +669,7 @@ func (s *ExecutionEngine) sequenceDelayedMessageWithBlockMutex(message *arbostyp return nil, err } - err = s.consensus.WriteMessageFromSequencer(lastMsg+1, messageWithMeta, *msgResult) + err = s.consensus.WriteMessageFromSequencer(pos, messageWithMeta, *msgResult) if err != nil { return nil, err } @@ -554,8 +678,9 @@ func (s *ExecutionEngine) sequenceDelayedMessageWithBlockMutex(message *arbostyp if err != nil { return nil, err } + s.cacheL1PriceDataOfMsg(pos, receipts, block, true) - log.Info("ExecutionEngine: Added DelayedMessages", "pos", lastMsg+1, "delayed", delayedSeqNum, "block-header", block.Header()) + log.Info("ExecutionEngine: Added DelayedMessages", "pos", pos, "delayed", delayedSeqNum, "block-header", block.Header()) return block, nil } @@ -642,6 +767,7 @@ func (s *ExecutionEngine) appendBlock(block *types.Block, statedb *state.StateDB } blockGasUsedHistogram.Update(int64(blockGasused)) gasUsedSinceStartupCounter.Inc(int64(blockGasused)) + s.updateL1GasPriceEstimateMetric() return nil } @@ -661,22 +787,25 @@ func (s *ExecutionEngine) ResultAtPos(pos arbutil.MessageIndex) (*execution.Mess return s.resultFromHeader(s.bc.GetHeaderByNumber(s.MessageIndexToBlockNumber(pos))) } -func (s *ExecutionEngine) GetL1GasPriceEstimate() (uint64, error) { +func (s *ExecutionEngine) updateL1GasPriceEstimateMetric() { bc := s.bc latestHeader := bc.CurrentBlock() latestState, err := bc.StateAt(latestHeader.Root) if err != nil { - return 0, errors.New("error getting latest statedb while fetching l2 Estimate of L1 GasPrice") + log.Error("error getting latest statedb while fetching l2 Estimate of L1 GasPrice") + return } arbState, err := arbosState.OpenSystemArbosState(latestState, nil, true) if err != nil { - return 0, errors.New("error opening system arbos state while fetching l2 Estimate of L1 GasPrice") + log.Error("error opening system arbos state while fetching l2 Estimate of L1 GasPrice") + return } l2EstimateL1GasPrice, err := arbState.L1PricingState().PricePerUnit() if err != nil { - return 0, errors.New("error fetching l2 Estimate of L1 GasPrice") + log.Error("error fetching l2 Estimate of L1 GasPrice") + return } - return l2EstimateL1GasPrice.Uint64(), nil + l1GasPriceEstimateGauge.Update(l2EstimateL1GasPrice.Int64()) } func (s *ExecutionEngine) getL1PricingSurplus() (int64, error) { @@ -697,17 +826,65 @@ func (s *ExecutionEngine) getL1PricingSurplus() (int64, error) { return surplus.Int64(), nil } -func (s *ExecutionEngine) cacheL1PriceDataOfMsg(num arbutil.MessageIndex, receipts types.Receipts, block *types.Block) { +func (s *ExecutionEngine) cacheL1PriceDataOfMsg(seqNum arbutil.MessageIndex, receipts types.Receipts, block *types.Block, blockBuiltUsingDelayedMessage bool) { var gasUsedForL1 uint64 - for i := 1; i < len(receipts); i++ { - gasUsedForL1 += receipts[i].GasUsedForL1 - } - gasChargedForL1 := gasUsedForL1 * block.BaseFee().Uint64() var callDataUnits uint64 - for _, tx := range block.Transactions() { - callDataUnits += tx.CalldataUnits + if !blockBuiltUsingDelayedMessage { + // s.cachedL1PriceData tracks L1 price data for messages posted by Nitro, + // so delayed messages should not update cummulative values kept on it. + + // First transaction in every block is an Arbitrum internal transaction, + // so we skip it here. + for i := 1; i < len(receipts); i++ { + gasUsedForL1 += receipts[i].GasUsedForL1 + } + for _, tx := range block.Transactions() { + callDataUnits += tx.CalldataUnits + } + } + l1GasCharged := gasUsedForL1 * block.BaseFee().Uint64() + + s.cachedL1PriceData.mutex.Lock() + defer s.cachedL1PriceData.mutex.Unlock() + + resetCache := func() { + s.cachedL1PriceData.startOfL1PriceDataCache = seqNum + s.cachedL1PriceData.endOfL1PriceDataCache = seqNum + s.cachedL1PriceData.msgToL1PriceData = []L1PriceDataOfMsg{{ + callDataUnits: callDataUnits, + cummulativeCallDataUnits: callDataUnits, + l1GasCharged: l1GasCharged, + cummulativeL1GasCharged: l1GasCharged, + }} + } + size := len(s.cachedL1PriceData.msgToL1PriceData) + if size == 0 || + s.cachedL1PriceData.startOfL1PriceDataCache == 0 || + s.cachedL1PriceData.endOfL1PriceDataCache == 0 || + arbutil.MessageIndex(size) != s.cachedL1PriceData.endOfL1PriceDataCache-s.cachedL1PriceData.startOfL1PriceDataCache+1 { + resetCache() + return + } + if seqNum != s.cachedL1PriceData.endOfL1PriceDataCache+1 { + if seqNum > s.cachedL1PriceData.endOfL1PriceDataCache+1 { + log.Info("message position higher then current end of l1 price data cache, resetting cache to this message") + resetCache() + } else if seqNum < s.cachedL1PriceData.startOfL1PriceDataCache { + log.Info("message position lower than start of l1 price data cache, ignoring") + } else { + log.Info("message position already seen in l1 price data cache, ignoring") + } + } else { + cummulativeCallDataUnits := s.cachedL1PriceData.msgToL1PriceData[size-1].cummulativeCallDataUnits + cummulativeL1GasCharged := s.cachedL1PriceData.msgToL1PriceData[size-1].cummulativeL1GasCharged + s.cachedL1PriceData.msgToL1PriceData = append(s.cachedL1PriceData.msgToL1PriceData, L1PriceDataOfMsg{ + callDataUnits: callDataUnits, + cummulativeCallDataUnits: cummulativeCallDataUnits + callDataUnits, + l1GasCharged: l1GasCharged, + cummulativeL1GasCharged: cummulativeL1GasCharged + l1GasCharged, + }) + s.cachedL1PriceData.endOfL1PriceDataCache = seqNum } - s.consensus.CacheL1PriceDataOfMsg(num, callDataUnits, gasChargedForL1) } // DigestMessage is used to create a block by executing msg against the latest state and storing it. @@ -755,6 +932,7 @@ func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, if err != nil { return nil, err } + s.cacheL1PriceDataOfMsg(num, receipts, block, false) if time.Now().After(s.nextScheduledVersionCheck) { s.nextScheduledVersionCheck = time.Now().Add(time.Minute) diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 165ea6c58c..b0147f4fb4 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -107,6 +107,7 @@ var ConfigDefault = Config{ func ConfigDefaultNonSequencerTest() *Config { config := ConfigDefault + config.Caching = TestCachingConfig config.ParentChainReader = headerreader.TestConfig config.Sequencer.Enable = false config.Forwarder = DefaultTestForwarderConfig @@ -119,6 +120,7 @@ func ConfigDefaultNonSequencerTest() *Config { func ConfigDefaultTest() *Config { config := ConfigDefault + config.Caching = TestCachingConfig config.Sequencer = TestSequencerConfig config.ParentChainReader = headerreader.TestConfig config.ForwardingTarget = "null" @@ -229,7 +231,7 @@ func CreateExecutionNode( var classicOutbox *ClassicOutboxRetriever if l2BlockChain.Config().ArbitrumChainParams.GenesisBlockNum > 0 { - classicMsgDb, err := stack.OpenDatabase("classic-msg", 0, 0, "classicmsg/", true) + classicMsgDb, err := stack.OpenDatabase("classic-msg", 0, 0, "classicmsg/", true) // TODO can we skip using ExtraOptions here? if err != nil { log.Warn("Classic Msg Database not found", "err", err) classicOutbox = nil @@ -288,11 +290,12 @@ func CreateExecutionNode( } -func (n *ExecutionNode) GetL1GasPriceEstimate() (uint64, error) { - return n.ExecEngine.GetL1GasPriceEstimate() +func (n *ExecutionNode) MarkFeedStart(to arbutil.MessageIndex) { + n.ExecEngine.MarkFeedStart(to) } func (n *ExecutionNode) Initialize(ctx context.Context) error { + n.ExecEngine.Initialize(n.ConfigFetcher().Caching.StylusLRUCache) n.ArbInterface.Initialize(n) err := n.Backend.Start() if err != nil { @@ -359,7 +362,7 @@ func (n *ExecutionNode) StopAndWait() { func (n *ExecutionNode) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) (*execution.MessageResult, error) { return n.ExecEngine.DigestMessage(num, msg, msgForPrefetch) } -func (n *ExecutionNode) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) ([]*execution.MessageResult, error) { +func (n *ExecutionNode) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadataAndBlockHash, oldMessages []*arbostypes.MessageWithMetadata) ([]*execution.MessageResult, error) { return n.ExecEngine.Reorg(count, newMessages, oldMessages) } func (n *ExecutionNode) HeadMessageNumber() (arbutil.MessageIndex, error) { @@ -427,3 +430,11 @@ func (n *ExecutionNode) MessageIndexToBlockNumber(messageNum arbutil.MessageInde func (n *ExecutionNode) Maintenance() error { return n.ChainDB.Compact(nil, nil) } + +func (n *ExecutionNode) Synced() bool { + return n.SyncMonitor.Synced() +} + +func (n *ExecutionNode) FullSyncProgressMap() map[string]interface{} { + return n.SyncMonitor.FullSyncProgressMap() +} diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 552fb72bda..61df8188c9 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -76,6 +76,7 @@ type SequencerConfig struct { NonceFailureCacheExpiry time.Duration `koanf:"nonce-failure-cache-expiry" reload:"hot"` ExpectedSurplusSoftThreshold string `koanf:"expected-surplus-soft-threshold" reload:"hot"` ExpectedSurplusHardThreshold string `koanf:"expected-surplus-hard-threshold" reload:"hot"` + EnableProfiling bool `koanf:"enable-profiling" reload:"hot"` expectedSurplusSoftThreshold int expectedSurplusHardThreshold int @@ -117,6 +118,9 @@ func (c *SequencerConfig) Validate() error { return errors.New("expected-surplus-soft-threshold cannot be lower than expected-surplus-hard-threshold") } } + if c.MaxTxDataSize > arbostypes.MaxL2MessageSize-50000 { + return errors.New("max-tx-data-size too large for MaxL2MessageSize") + } return nil } @@ -138,6 +142,7 @@ var DefaultSequencerConfig = SequencerConfig{ NonceFailureCacheExpiry: time.Second, ExpectedSurplusSoftThreshold: "default", ExpectedSurplusHardThreshold: "default", + EnableProfiling: false, } var TestSequencerConfig = SequencerConfig{ @@ -155,6 +160,7 @@ var TestSequencerConfig = SequencerConfig{ NonceFailureCacheExpiry: time.Second, ExpectedSurplusSoftThreshold: "default", ExpectedSurplusHardThreshold: "default", + EnableProfiling: false, } func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -178,23 +184,24 @@ func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Duration(prefix+".switch-poll-interval", DefaultSequencerConfig.SwitchPollInterval, "the poll interval of checking the sequencer should be switched or not") f.String(prefix+".expected-surplus-soft-threshold", DefaultSequencerConfig.ExpectedSurplusSoftThreshold, "if expected surplus is lower than this value, warnings are posted") f.String(prefix+".expected-surplus-hard-threshold", DefaultSequencerConfig.ExpectedSurplusHardThreshold, "if expected surplus is lower than this value, new incoming transactions will be denied") + f.Bool(prefix+".enable-profiling", DefaultSequencerConfig.EnableProfiling, "enable CPU profiling and tracing") } type txQueueItem struct { tx *types.Transaction + txSize int // size in bytes of the marshalled transaction options *arbitrum_types.ConditionalOptions resultChan chan<- error - returnedResult bool + returnedResult *atomic.Bool ctx context.Context firstAppearance time.Time } func (i *txQueueItem) returnResult(err error) { - if i.returnedResult { + if i.returnedResult.Swap(true) { log.Error("attempting to return result to already finished queue item", "err", err) return } - i.returnedResult = true i.resultChan <- err close(i.resultChan) } @@ -419,11 +426,12 @@ func ctxWithTimeout(ctx context.Context, timeout time.Duration) (context.Context } func (s *Sequencer) PublishTransaction(parentCtx context.Context, tx *types.Transaction, options *arbitrum_types.ConditionalOptions) error { + config := s.config() // Only try to acquire Rlock and check for hard threshold if l1reader is not nil // And hard threshold was enabled, this prevents spamming of read locks when not needed - if s.l1Reader != nil && s.config().ExpectedSurplusHardThreshold != "default" { + if s.l1Reader != nil && config.ExpectedSurplusHardThreshold != "default" { s.expectedSurplusMutex.RLock() - if s.expectedSurplusUpdated && s.expectedSurplus < int64(s.config().expectedSurplusHardThreshold) { + if s.expectedSurplusUpdated && s.expectedSurplus < int64(config.expectedSurplusHardThreshold) { return errors.New("currently not accepting transactions due to expected surplus being below threshold") } s.expectedSurplusMutex.RUnlock() @@ -457,7 +465,12 @@ func (s *Sequencer) PublishTransaction(parentCtx context.Context, tx *types.Tran return types.ErrTxTypeNotSupported } - queueTimeout := s.config().QueueTimeout + txBytes, err := tx.MarshalBinary() + if err != nil { + return err + } + + queueTimeout := config.QueueTimeout queueCtx, cancelFunc := ctxWithTimeout(parentCtx, queueTimeout) defer cancelFunc() @@ -468,9 +481,10 @@ func (s *Sequencer) PublishTransaction(parentCtx context.Context, tx *types.Tran resultChan := make(chan error, 1) queueItem := txQueueItem{ tx, + len(txBytes), options, resultChan, - false, + &atomic.Bool{}, queueCtx, time.Now(), } @@ -693,7 +707,8 @@ func (s *Sequencer) expireNonceFailures() *time.Timer { } // There's no guarantee that returned tx nonces will be correct -func (s *Sequencer) precheckNonces(queueItems []txQueueItem) []txQueueItem { +func (s *Sequencer) precheckNonces(queueItems []txQueueItem, totalBlockSize int) []txQueueItem { + config := s.config() bc := s.execEngine.bc latestHeader := bc.CurrentBlock() latestState, err := bc.StateAt(latestHeader.Root) @@ -743,7 +758,13 @@ func (s *Sequencer) precheckNonces(queueItems []txQueueItem) []txQueueItem { if err != nil { revivingFailure.queueItem.returnResult(err) } else { - nextQueueItem = &revivingFailure.queueItem + if arbmath.SaturatingAdd(totalBlockSize, revivingFailure.queueItem.txSize) > config.MaxTxDataSize { + // This tx would be too large to add to this block + s.txRetryQueue.Push(revivingFailure.queueItem) + } else { + nextQueueItem = &revivingFailure.queueItem + totalBlockSize += revivingFailure.queueItem.txSize + } } } } else if txNonce < stateNonce || txNonce > pendingNonce { @@ -779,7 +800,7 @@ func (s *Sequencer) precheckNonces(queueItems []txQueueItem) []txQueueItem { func (s *Sequencer) createBlock(ctx context.Context) (returnValue bool) { var queueItems []txQueueItem - var totalBatchSize int + var totalBlockSize int defer func() { panicErr := recover() @@ -787,7 +808,8 @@ func (s *Sequencer) createBlock(ctx context.Context) (returnValue bool) { log.Error("sequencer block creation panicked", "panic", panicErr, "backtrace", string(debug.Stack())) // Return an internal error to any queue items we were trying to process for _, item := range queueItems { - if !item.returnedResult { + // This can race, but that's alright, worst case is a log line in returnResult + if !item.returnedResult.Load() { item.returnResult(sequencerInternalError) } } @@ -850,37 +872,47 @@ func (s *Sequencer) createBlock(ctx context.Context) (returnValue bool) { queueItem.returnResult(err) continue } - txBytes, err := queueItem.tx.MarshalBinary() - if err != nil { - queueItem.returnResult(err) - continue - } - if len(txBytes) > config.MaxTxDataSize { + if queueItem.txSize > config.MaxTxDataSize { // This tx is too large queueItem.returnResult(txpool.ErrOversizedData) continue } - if totalBatchSize+len(txBytes) > config.MaxTxDataSize { + if totalBlockSize+queueItem.txSize > config.MaxTxDataSize { // This tx would be too large to add to this batch s.txRetryQueue.Push(queueItem) // End the batch here to put this tx in the next one break } - totalBatchSize += len(txBytes) + totalBlockSize += queueItem.txSize queueItems = append(queueItems, queueItem) } s.nonceCache.Resize(config.NonceCacheSize) // Would probably be better in a config hook but this is basically free s.nonceCache.BeginNewBlock() - queueItems = s.precheckNonces(queueItems) + queueItems = s.precheckNonces(queueItems, totalBlockSize) txes := make([]*types.Transaction, len(queueItems)) hooks := s.makeSequencingHooks() hooks.ConditionalOptionsForTx = make([]*arbitrum_types.ConditionalOptions, len(queueItems)) + totalBlockSize = 0 // recompute the totalBlockSize to double check it for i, queueItem := range queueItems { txes[i] = queueItem.tx + totalBlockSize = arbmath.SaturatingAdd(totalBlockSize, queueItem.txSize) hooks.ConditionalOptionsForTx[i] = queueItem.options } + if totalBlockSize > config.MaxTxDataSize { + for _, queueItem := range queueItems { + s.txRetryQueue.Push(queueItem) + } + log.Error( + "put too many transactions in a block", + "numTxes", len(queueItems), + "totalBlockSize", totalBlockSize, + "maxTxDataSize", config.MaxTxDataSize, + ) + return false + } + if s.handleInactive(ctx, queueItems) { return false } @@ -892,13 +924,16 @@ func (s *Sequencer) createBlock(ctx context.Context) (returnValue bool) { s.L1BlockAndTimeMutex.Unlock() if s.l1Reader != nil && (l1Block == 0 || math.Abs(float64(l1Timestamp)-float64(timestamp)) > config.MaxAcceptableTimestampDelta.Seconds()) { + for _, queueItem := range queueItems { + s.txRetryQueue.Push(queueItem) + } log.Error( "cannot sequence: unknown L1 block or L1 timestamp too far from local clock time", "l1Block", l1Block, "l1Timestamp", time.Unix(int64(l1Timestamp), 0), "localTimestamp", time.Unix(int64(timestamp), 0), ) - return false + return true } header := &arbostypes.L1IncomingMessageHeader{ @@ -911,7 +946,15 @@ func (s *Sequencer) createBlock(ctx context.Context) (returnValue bool) { } start := time.Now() - block, err := s.execEngine.SequenceTransactions(header, txes, hooks) + var ( + block *types.Block + err error + ) + if config.EnableProfiling { + block, err = s.execEngine.SequenceTransactionsWithProfiling(header, txes, hooks) + } else { + block, err = s.execEngine.SequenceTransactions(header, txes, hooks) + } elapsed := time.Since(start) blockCreationTimer.Update(elapsed) if elapsed >= time.Second*5 { @@ -1034,8 +1077,8 @@ func (s *Sequencer) updateExpectedSurplus(ctx context.Context) (int64, error) { if err != nil { return 0, fmt.Errorf("error encountered getting l1 pricing surplus while updating expectedSurplus: %w", err) } - backlogL1GasCharged := int64(s.execEngine.consensus.BacklogL1GasCharged()) - backlogCallDataUnits := int64(s.execEngine.consensus.BacklogCallDataUnits()) + backlogL1GasCharged := int64(s.execEngine.backlogL1GasCharged()) + backlogCallDataUnits := int64(s.execEngine.backlogCallDataUnits()) expectedSurplus := int64(surplus) + backlogL1GasCharged - backlogCallDataUnits*int64(l1GasPrice) // update metrics l1GasPriceGauge.Update(int64(l1GasPrice)) @@ -1043,16 +1086,17 @@ func (s *Sequencer) updateExpectedSurplus(ctx context.Context) (int64, error) { unusedL1GasChargeGauge.Update(backlogL1GasCharged) currentSurplusGauge.Update(surplus) expectedSurplusGauge.Update(expectedSurplus) - if s.config().ExpectedSurplusSoftThreshold != "default" && expectedSurplus < int64(s.config().expectedSurplusSoftThreshold) { - log.Warn("expected surplus is below soft threshold", "value", expectedSurplus, "threshold", s.config().expectedSurplusSoftThreshold) + config := s.config() + if config.ExpectedSurplusSoftThreshold != "default" && expectedSurplus < int64(config.expectedSurplusSoftThreshold) { + log.Warn("expected surplus is below soft threshold", "value", expectedSurplus, "threshold", config.expectedSurplusSoftThreshold) } return expectedSurplus, nil } func (s *Sequencer) Start(ctxIn context.Context) error { s.StopWaiter.Start(ctxIn, s) - - if (s.config().ExpectedSurplusHardThreshold != "default" || s.config().ExpectedSurplusSoftThreshold != "default") && s.l1Reader == nil { + config := s.config() + if (config.ExpectedSurplusHardThreshold != "default" || config.ExpectedSurplusSoftThreshold != "default") && s.l1Reader == nil { return errors.New("expected surplus soft/hard thresholds are enabled but l1Reader is nil") } @@ -1064,7 +1108,7 @@ func (s *Sequencer) Start(ctxIn context.Context) error { expectedSurplus, err := s.updateExpectedSurplus(ctxIn) if err != nil { - if s.config().ExpectedSurplusHardThreshold != "default" { + if config.ExpectedSurplusHardThreshold != "default" { return fmt.Errorf("expected-surplus-hard-threshold is enabled but error fetching initial expected surplus value: %w", err) } log.Error("expected-surplus-soft-threshold is enabled but error fetching initial expected surplus value", "err", err) @@ -1107,8 +1151,7 @@ func (s *Sequencer) Start(ctxIn context.Context) error { s.CallIteratively(func(ctx context.Context) time.Duration { nextBlock := time.Now().Add(s.config().MaxBlockSpeed) - madeBlock := s.createBlock(ctx) - if madeBlock { + if s.createBlock(ctx) { // Note: this may return a negative duration, but timers are fine with that (they treat negative durations as 0). return time.Until(nextBlock) } diff --git a/execution/gethexec/sync_monitor.go b/execution/gethexec/sync_monitor.go index 564c6d74bd..86949c7767 100644 --- a/execution/gethexec/sync_monitor.go +++ b/execution/gethexec/sync_monitor.go @@ -59,12 +59,8 @@ func (s *SyncMonitor) FullSyncProgressMap() map[string]interface{} { } func (s *SyncMonitor) SyncProgressMap() map[string]interface{} { - if s.consensus.Synced() { - built, err := s.exec.HeadMessageNumber() - consensusSyncTarget := s.consensus.SyncTargetMessageCount() - if err == nil && built+1 >= consensusSyncTarget { - return make(map[string]interface{}) - } + if s.Synced() { + return make(map[string]interface{}) } return s.FullSyncProgressMap() } @@ -112,7 +108,14 @@ func (s *SyncMonitor) FinalizedBlockNumber(ctx context.Context) (uint64, error) } func (s *SyncMonitor) Synced() bool { - return len(s.SyncProgressMap()) == 0 + if s.consensus.Synced() { + built, err := s.exec.HeadMessageNumber() + consensusSyncTarget := s.consensus.SyncTargetMessageCount() + if err == nil && built+1 >= consensusSyncTarget { + return true + } + } + return false } func (s *SyncMonitor) SetConsensusInfo(consensus execution.ConsensusInfo) { diff --git a/execution/gethexec/wasmstorerebuilder.go b/execution/gethexec/wasmstorerebuilder.go new file mode 100644 index 0000000000..dcbee45a3f --- /dev/null +++ b/execution/gethexec/wasmstorerebuilder.go @@ -0,0 +1,115 @@ +// Copyright 2021-2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package gethexec + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/arbitrum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/offchainlabs/nitro/arbos/arbosState" +) + +var RebuildingPositionKey []byte = []byte("_rebuildingPosition") // contains the codehash upto which rebuilding of wasm store was last completed. Initialized to common.Hash{} at the start +var RebuildingStartBlockHashKey []byte = []byte("_rebuildingStartBlockHash") // contains the block hash of starting block when rebuilding of wasm store first began +var RebuildingDone common.Hash = common.BytesToHash([]byte("_done")) // indicates that the rebuilding is done, if RebuildingPositionKey holds this value it implies rebuilding was completed + +func ReadFromKeyValueStore[T any](store ethdb.KeyValueStore, key []byte) (T, error) { + var empty T + posBytes, err := store.Get(key) + if err != nil { + return empty, err + } + var val T + err = rlp.DecodeBytes(posBytes, &val) + if err != nil { + return empty, fmt.Errorf("error decoding value stored for key in the KeyValueStore: %w", err) + } + return val, nil +} + +func WriteToKeyValueStore[T any](store ethdb.KeyValueStore, key []byte, val T) error { + valBytes, err := rlp.EncodeToBytes(val) + if err != nil { + return err + } + err = store.Put(key, valBytes) + if err != nil { + return err + } + return nil +} + +// RebuildWasmStore function runs a loop looking at every codehash in diskDb, checking if its an activated stylus contract and +// saving it to wasm store if it doesnt already exists. When errored it logs them and silently returns +// +// It stores the status of rebuilding to wasm store by updating the codehash (of the latest sucessfully checked contract) in +// RebuildingPositionKey after every second of work. +// +// It also stores a special value that is only set once when rebuilding commenced in RebuildingStartBlockHashKey as the block +// time of the latest block when rebuilding was first called, this is used to avoid recomputing of assembly and module of +// contracts that were created after rebuilding commenced since they would anyway already be added during sync. +func RebuildWasmStore(ctx context.Context, wasmStore ethdb.KeyValueStore, chainDb ethdb.Database, maxRecreateStateDepth int64, l2Blockchain *core.BlockChain, position, rebuildingStartBlockHash common.Hash) error { + var err error + var stateDb *state.StateDB + latestHeader := l2Blockchain.CurrentBlock() + // Attempt to get state at the start block when rebuilding commenced, if not available (in case of non-archival nodes) use latest state + rebuildingStartHeader := l2Blockchain.GetHeaderByHash(rebuildingStartBlockHash) + stateDb, _, err = arbitrum.StateAndHeaderFromHeader(ctx, chainDb, l2Blockchain, maxRecreateStateDepth, rebuildingStartHeader, nil) + if err != nil { + log.Info("Error getting state at start block of rebuilding wasm store, attempting rebuilding with latest state", "err", err) + stateDb, _, err = arbitrum.StateAndHeaderFromHeader(ctx, chainDb, l2Blockchain, maxRecreateStateDepth, latestHeader, nil) + if err != nil { + return fmt.Errorf("error getting state at latest block, aborting rebuilding: %w", err) + } + } + diskDb := stateDb.Database().DiskDB() + arbState, err := arbosState.OpenSystemArbosState(stateDb, nil, true) + if err != nil { + return fmt.Errorf("error getting arbos state, aborting rebuilding: %w", err) + } + programs := arbState.Programs() + iter := diskDb.NewIterator(rawdb.CodePrefix, position[:]) + defer iter.Release() + lastStatusUpdate := time.Now() + for iter.Next() { + codeHashBytes := bytes.TrimPrefix(iter.Key(), rawdb.CodePrefix) + codeHash := common.BytesToHash(codeHashBytes) + code := iter.Value() + if state.IsStylusProgram(code) { + if err := programs.SaveActiveProgramToWasmStore(stateDb, codeHash, code, latestHeader.Time, l2Blockchain.Config().DebugMode(), rebuildingStartHeader.Time); err != nil { + return fmt.Errorf("error while rebuilding of wasm store, aborting rebuilding: %w", err) + } + } + // After every one second of work, update the rebuilding position + // This also notifies user that we are working on rebuilding + if time.Since(lastStatusUpdate) >= time.Second || ctx.Err() != nil { + log.Info("Storing rebuilding status to disk", "codeHash", codeHash) + if err := WriteToKeyValueStore(wasmStore, RebuildingPositionKey, codeHash); err != nil { + return fmt.Errorf("error updating codehash position in rebuilding of wasm store: %w", err) + } + // If outer context is cancelled we should terminate rebuilding + // We attempted to write the latest checked codeHash to wasm store + if ctx.Err() != nil { + return ctx.Err() + } + lastStatusUpdate = time.Now() + } + } + // Set rebuilding position to done indicating completion + if err := WriteToKeyValueStore(wasmStore, RebuildingPositionKey, RebuildingDone); err != nil { + return fmt.Errorf("error updating codehash position in rebuilding of wasm store to done: %w", err) + } + log.Info("Rebuilding of wasm store was successful") + return nil +} diff --git a/execution/interface.go b/execution/interface.go index f652f7ce68..40b34ee2fd 100644 --- a/execution/interface.go +++ b/execution/interface.go @@ -32,7 +32,7 @@ var ErrSequencerInsertLockTaken = errors.New("insert lock taken") // always needed type ExecutionClient interface { DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) (*MessageResult, error) - Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) ([]*MessageResult, error) + Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadataAndBlockHash, oldMessages []*arbostypes.MessageWithMetadata) ([]*MessageResult, error) HeadMessageNumber() (arbutil.MessageIndex, error) HeadMessageNumberSync(t *testing.T) (arbutil.MessageIndex, error) ResultAtPos(pos arbutil.MessageIndex) (*MessageResult, error) @@ -57,7 +57,9 @@ type ExecutionSequencer interface { ForwardTo(url string) error SequenceDelayedMessage(message *arbostypes.L1IncomingMessage, delayedSeqNum uint64) error NextDelayedMessageNumber() (uint64, error) - GetL1GasPriceEstimate() (uint64, error) + MarkFeedStart(to arbutil.MessageIndex) + Synced() bool + FullSyncProgressMap() map[string]interface{} } type FullExecutionClient interface { @@ -95,9 +97,6 @@ type ConsensusInfo interface { type ConsensusSequencer interface { WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata, msgResult MessageResult) error ExpectChosenSequencer() error - CacheL1PriceDataOfMsg(pos arbutil.MessageIndex, callDataUnits uint64, l1GasCharged uint64) - BacklogL1GasCharged() uint64 - BacklogCallDataUnits() uint64 } type FullConsensusClient interface { diff --git a/execution/nodeInterface/NodeInterface.go b/execution/nodeInterface/NodeInterface.go index 7e524731d0..9179a52718 100644 --- a/execution/nodeInterface/NodeInterface.go +++ b/execution/nodeInterface/NodeInterface.go @@ -213,12 +213,11 @@ func (n NodeInterface) EstimateRetryableTicket( } // ArbitrumSubmitRetryableTx is unsigned so the following won't panic - msg, err := core.TransactionToMessage(types.NewTx(submitTx), types.NewArbitrumSigner(nil), nil) + msg, err := core.TransactionToMessage(types.NewTx(submitTx), types.NewArbitrumSigner(nil), nil, core.MessageGasEstimationMode) if err != nil { return err } - msg.TxRunMode = core.MessageGasEstimationMode *n.returnMessage.message = *msg *n.returnMessage.changed = true return nil diff --git a/fastcache b/fastcache index f9d9f11052..cd4f9b8d15 160000 --- a/fastcache +++ b/fastcache @@ -1 +1 @@ -Subproject commit f9d9f11052817d478af08b64d139d5f09ec3a68f +Subproject commit cd4f9b8d15b0b22bc628cbbf1dba11540d023904 diff --git a/go-ethereum b/go-ethereum index 75e2c5a8d5..c1b97a4653 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 75e2c5a8d5cb8340750be2723b16a333e742d00d +Subproject commit c1b97a465330307b6618ec635fccdec77ee96c23 diff --git a/go.mod b/go.mod index 516ef1dea4..66e12162ea 100644 --- a/go.mod +++ b/go.mod @@ -52,6 +52,8 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) +require github.com/google/go-querystring v1.1.0 // indirect + require ( github.com/DataDog/zstd v1.4.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect @@ -115,6 +117,7 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/flatbuffers v1.12.1 // indirect + github.com/google/go-github/v62 v62.0.0 github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/graph-gophers/graphql-go v1.3.0 // indirect @@ -160,7 +163,7 @@ require ( go.opencensus.io v0.24.0 // indirect golang.org/x/mod v0.14.0 // indirect golang.org/x/net v0.22.0 // indirect - golang.org/x/sync v0.5.0 // indirect + golang.org/x/sync v0.5.0 golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/protobuf v1.33.0 // indirect diff --git a/go.sum b/go.sum index 7622aa0c23..9fa4b33a62 100644 --- a/go.sum +++ b/go.sum @@ -7,18 +7,6 @@ github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3 github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/EspressoSystems/espresso-sequencer-go v0.0.20 h1:lqxMZm1IKibvTVx9E30DoCbakvJPWMbHVFG43KyIygs= -github.com/EspressoSystems/espresso-sequencer-go v0.0.20/go.mod h1:BbU8N23RGl45QXSf/bYc8OQ8TG/vlMaPC1GU1acqKmc= -github.com/EspressoSystems/espresso-sequencer-go v0.0.21-0.20240627061508-b391361a8efd h1:dc/f2PSSwhgtSkUg9yFxoFSomhnBonTiB4BrxmHZ4mI= -github.com/EspressoSystems/espresso-sequencer-go v0.0.21-0.20240627061508-b391361a8efd/go.mod h1:BbU8N23RGl45QXSf/bYc8OQ8TG/vlMaPC1GU1acqKmc= -github.com/EspressoSystems/espresso-sequencer-go v0.0.21-0.20240627082021-b15afa4d1e3b h1:nC4ozgrjAsiLIo4LBD+Qsbman6a3Jc7s7WN69mA08Og= -github.com/EspressoSystems/espresso-sequencer-go v0.0.21-0.20240627082021-b15afa4d1e3b/go.mod h1:BbU8N23RGl45QXSf/bYc8OQ8TG/vlMaPC1GU1acqKmc= -github.com/EspressoSystems/espresso-sequencer-go v0.0.21-0.20240628053039-6263c8a675aa h1:KhEx/c0MOBqpQZ8NKqQpn39ynk0/Oby5DlB+J8OLhUg= -github.com/EspressoSystems/espresso-sequencer-go v0.0.21-0.20240628053039-6263c8a675aa/go.mod h1:BbU8N23RGl45QXSf/bYc8OQ8TG/vlMaPC1GU1acqKmc= -github.com/EspressoSystems/espresso-sequencer-go v0.0.21-0.20240628063139-ed58fed9e319 h1:Vr4E9alDak4XImOX2ai+hMJ/SgfjaIcfXYpmXnIMk6M= -github.com/EspressoSystems/espresso-sequencer-go v0.0.21-0.20240628063139-ed58fed9e319/go.mod h1:BbU8N23RGl45QXSf/bYc8OQ8TG/vlMaPC1GU1acqKmc= -github.com/EspressoSystems/espresso-sequencer-go v0.0.21-0.20240628072620-351db6a9e261 h1:Jp4m7pWglCuS6vVimUYQpLzgD9uglmLcET87JFmVbGQ= -github.com/EspressoSystems/espresso-sequencer-go v0.0.21-0.20240628072620-351db6a9e261/go.mod h1:BbU8N23RGl45QXSf/bYc8OQ8TG/vlMaPC1GU1acqKmc= github.com/EspressoSystems/espresso-sequencer-go v0.0.21 h1:pHLdf8qfIGMNLX3QjoPoYzJ+LUgKf30ipd+Pxcypsaw= github.com/EspressoSystems/espresso-sequencer-go v0.0.21/go.mod h1:BbU8N23RGl45QXSf/bYc8OQ8TG/vlMaPC1GU1acqKmc= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= @@ -311,6 +299,7 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -319,7 +308,11 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8 github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github/v62 v62.0.0 h1:/6mGCaRywZz9MuHyw9gD1CwsbmBX8GWsbFkwMmHdhl4= +github.com/google/go-github/v62 v62.0.0/go.mod h1:EMxeUqGJq2xRu9DYBMwel/mr7kZrzUOfQmmpYrZn2a4= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= diff --git a/nitro-testnode b/nitro-testnode index 3fd5f4bd56..03095e0bb6 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit 3fd5f4bd56214b1f1d87bb30247dae99dd28c02a +Subproject commit 03095e0bb682dc404526611f8f14d380477c5f04 diff --git a/precompiles/ArbGasInfo.go b/precompiles/ArbGasInfo.go index 25801109c7..b41dfda8a2 100644 --- a/precompiles/ArbGasInfo.go +++ b/precompiles/ArbGasInfo.go @@ -36,7 +36,12 @@ func (con ArbGasInfo) GetPricesInWeiWithAggregator( if err != nil { return nil, nil, nil, nil, nil, nil, err } - l2GasPrice := evm.Context.BaseFee + var l2GasPrice *big.Int + if evm.Context.BaseFeeInBlock != nil { + l2GasPrice = evm.Context.BaseFeeInBlock + } else { + l2GasPrice = evm.Context.BaseFee + } // aggregators compress calldata, so we must estimate accordingly weiForL1Calldata := arbmath.BigMulByUint(l1GasPrice, params.TxDataNonZeroGasEIP2028) @@ -69,7 +74,12 @@ func (con ArbGasInfo) _preVersion4_GetPricesInWeiWithAggregator( if err != nil { return nil, nil, nil, nil, nil, nil, err } - l2GasPrice := evm.Context.BaseFee + var l2GasPrice *big.Int + if evm.Context.BaseFeeInBlock != nil { + l2GasPrice = evm.Context.BaseFeeInBlock + } else { + l2GasPrice = evm.Context.BaseFee + } // aggregators compress calldata, so we must estimate accordingly weiForL1Calldata := arbmath.BigMulByUint(l1GasPrice, params.TxDataNonZeroGasEIP2028) @@ -101,7 +111,12 @@ func (con ArbGasInfo) GetPricesInArbGasWithAggregator(c ctx, evm mech, aggregato if err != nil { return nil, nil, nil, err } - l2GasPrice := evm.Context.BaseFee + var l2GasPrice *big.Int + if evm.Context.BaseFeeInBlock != nil { + l2GasPrice = evm.Context.BaseFeeInBlock + } else { + l2GasPrice = evm.Context.BaseFee + } // aggregators compress calldata, so we must estimate accordingly weiForL1Calldata := arbmath.BigMulByUint(l1GasPrice, params.TxDataNonZeroGasEIP2028) @@ -121,7 +136,12 @@ func (con ArbGasInfo) _preVersion4_GetPricesInArbGasWithAggregator(c ctx, evm me if err != nil { return nil, nil, nil, err } - l2GasPrice := evm.Context.BaseFee + var l2GasPrice *big.Int + if evm.Context.BaseFeeInBlock != nil { + l2GasPrice = evm.Context.BaseFeeInBlock + } else { + l2GasPrice = evm.Context.BaseFee + } // aggregators compress calldata, so we must estimate accordingly weiForL1Calldata := arbmath.BigMulByUint(l1GasPrice, params.TxDataNonZeroGasEIP2028) diff --git a/precompiles/ArbWasmCache.go b/precompiles/ArbWasmCache.go index 36b4e1ad31..3cada9dd70 100644 --- a/precompiles/ArbWasmCache.go +++ b/precompiles/ArbWasmCache.go @@ -3,6 +3,8 @@ package precompiles +import "github.com/ethereum/go-ethereum/common" + type ArbWasmCache struct { Address addr // 0x72 @@ -20,14 +22,23 @@ func (con ArbWasmCache) AllCacheManagers(c ctx, _ mech) ([]addr, error) { return c.State.Programs().CacheManagers().AllMembers(65536) } -// Caches all programs with the given codehash. Caller must be a cache manager or chain owner. +// Deprecated: replaced with CacheProgram. func (con ArbWasmCache) CacheCodehash(c ctx, evm mech, codehash hash) error { - return con.setProgramCached(c, evm, codehash, true) + return con.setProgramCached(c, evm, common.Address{}, codehash, true) +} + +// Caches all programs with a codehash equal to the given address. Caller must be a cache manager or chain owner. +func (con ArbWasmCache) CacheProgram(c ctx, evm mech, address addr) error { + codehash, err := c.GetCodeHash(address) + if err != nil { + return err + } + return con.setProgramCached(c, evm, address, codehash, true) } // Evicts all programs with the given codehash. Caller must be a cache manager or chain owner. func (con ArbWasmCache) EvictCodehash(c ctx, evm mech, codehash hash) error { - return con.setProgramCached(c, evm, codehash, false) + return con.setProgramCached(c, evm, common.Address{}, codehash, false) } // Gets whether a program is cached. Note that the program may be expired. @@ -36,7 +47,7 @@ func (con ArbWasmCache) CodehashIsCached(c ctx, evm mech, codehash hash) (bool, } // Caches all programs with the given codehash. -func (con ArbWasmCache) setProgramCached(c ctx, evm mech, codehash hash, cached bool) error { +func (con ArbWasmCache) setProgramCached(c ctx, evm mech, address addr, codehash hash, cached bool) error { if !con.hasAccess(c) { return c.BurnOut() } @@ -51,7 +62,7 @@ func (con ArbWasmCache) setProgramCached(c ctx, evm mech, codehash hash, cached return con.UpdateProgramCache(c, evm, c.caller, codehash, cached) } return programs.SetProgramCached( - emitEvent, evm.StateDB, codehash, cached, evm.Context.Time, params, txRunMode, debugMode, + emitEvent, evm.StateDB, codehash, address, cached, evm.Context.Time, params, txRunMode, debugMode, ) } diff --git a/precompiles/precompile.go b/precompiles/precompile.go index c39f2bcb6d..9a6d8885ad 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -72,11 +72,12 @@ type Precompile struct { } type PrecompileMethod struct { - name string - template abi.Method - purity purity - handler reflect.Method - arbosVersion uint64 + name string + template abi.Method + purity purity + handler reflect.Method + arbosVersion uint64 + maxArbosVersion uint64 } type PrecompileEvent struct { @@ -226,6 +227,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr purity, handler, 0, + 0, } methods[id] = &method methodsByName[name] = &method @@ -575,6 +577,8 @@ func Precompiles() map[addr]ArbosPrecompile { for _, method := range ArbWasmCache.methods { method.arbosVersion = ArbWasmCache.arbosVersion } + ArbWasmCache.methodsByName["CacheCodehash"].maxArbosVersion = params.ArbosVersion_Stylus + ArbWasmCache.methodsByName["CacheProgram"].arbosVersion = params.ArbosVersion_StylusFixes ArbRetryableImpl := &ArbRetryableTx{Address: types.ArbRetryableTxAddress} ArbRetryable := insert(MakePrecompile(pgen.ArbRetryableTxMetaData, ArbRetryableImpl)) @@ -680,7 +684,7 @@ func (p *Precompile) Call( } id := *(*[4]byte)(input) method, ok := p.methods[id] - if !ok || arbosVersion < method.arbosVersion { + if !ok || arbosVersion < method.arbosVersion || (method.maxArbosVersion > 0 && arbosVersion > method.maxArbosVersion) { // method does not exist or hasn't yet been activated return nil, 0, vm.ErrExecutionReverted } diff --git a/precompiles/precompile_test.go b/precompiles/precompile_test.go index 86047038dc..ecce77088a 100644 --- a/precompiles/precompile_test.go +++ b/precompiles/precompile_test.go @@ -194,6 +194,7 @@ func TestPrecompilesPerArbosVersion(t *testing.T) { 11: 4, 20: 8, 30: 38, + 31: 1, } precompiles := Precompiles() diff --git a/pubsub/common.go b/pubsub/common.go new file mode 100644 index 0000000000..9f05304e46 --- /dev/null +++ b/pubsub/common.go @@ -0,0 +1,29 @@ +package pubsub + +import ( + "context" + + "github.com/ethereum/go-ethereum/log" + "github.com/go-redis/redis/v8" +) + +// CreateStream tries to create stream with given name, if it already exists +// does not return an error. +func CreateStream(ctx context.Context, streamName string, client redis.UniversalClient) error { + _, err := client.XGroupCreateMkStream(ctx, streamName, streamName, "$").Result() + if err != nil && !StreamExists(ctx, streamName, client) { + return err + } + return nil +} + +// StreamExists returns whether there are any consumer group for specified +// redis stream. +func StreamExists(ctx context.Context, streamName string, client redis.UniversalClient) bool { + got, err := client.Do(ctx, "XINFO", "STREAM", streamName).Result() + if err != nil { + log.Error("Reading redis streams", "error", err) + return false + } + return got != nil +} diff --git a/pubsub/consumer.go b/pubsub/consumer.go index 7a5078ee00..df3695606d 100644 --- a/pubsub/consumer.go +++ b/pubsub/consumer.go @@ -57,14 +57,13 @@ func NewConsumer[Request any, Response any](client redis.UniversalClient, stream if streamName == "" { return nil, fmt.Errorf("redis stream name cannot be empty") } - consumer := &Consumer[Request, Response]{ + return &Consumer[Request, Response]{ id: uuid.NewString(), client: client, redisStream: streamName, redisGroup: streamName, // There is 1-1 mapping of redis stream and consumer group. cfg: cfg, - } - return consumer, nil + }, nil } // Start starts the consumer to iteratively perform heartbeat in configured intervals. @@ -80,16 +79,36 @@ func (c *Consumer[Request, Response]) Start(ctx context.Context) { func (c *Consumer[Request, Response]) StopAndWait() { c.StopWaiter.StopAndWait() + c.deleteHeartBeat(c.GetParentContext()) } func heartBeatKey(id string) string { return fmt.Sprintf("consumer:%s:heartbeat", id) } +func (c *Consumer[Request, Response]) RedisClient() redis.UniversalClient { + return c.client +} + +func (c *Consumer[Request, Response]) StreamName() string { + return c.redisStream +} + func (c *Consumer[Request, Response]) heartBeatKey() string { return heartBeatKey(c.id) } +// deleteHeartBeat deletes the heartbeat to indicate it is being shut down. +func (c *Consumer[Request, Response]) deleteHeartBeat(ctx context.Context) { + if err := c.client.Del(ctx, c.heartBeatKey()).Err(); err != nil { + l := log.Info + if ctx.Err() != nil { + l = log.Error + } + l("Deleting heardbeat", "consumer", c.id, "error", err) + } +} + // heartBeat updates the heartBeat key indicating aliveness. func (c *Consumer[Request, Response]) heartBeat(ctx context.Context) { if err := c.client.Set(ctx, c.heartBeatKey(), time.Now().UnixMilli(), 2*c.cfg.KeepAliveTimeout).Err(); err != nil { diff --git a/pubsub/pubsub_test.go b/pubsub/pubsub_test.go index 31f6d9e20a..72504602e3 100644 --- a/pubsub/pubsub_test.go +++ b/pubsub/pubsub_test.go @@ -4,8 +4,10 @@ import ( "context" "errors" "fmt" + "os" "sort" "testing" + "time" "github.com/ethereum/go-ethereum/log" "github.com/go-redis/redis/v8" @@ -201,6 +203,7 @@ func consume(ctx context.Context, t *testing.T, consumers []*Consumer[testReques } func TestRedisProduce(t *testing.T) { + log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true))) t.Parallel() for _, tc := range []struct { name string @@ -212,7 +215,7 @@ func TestRedisProduce(t *testing.T) { }, { name: "some consumers killed, others should take over their work", - killConsumers: false, + killConsumers: true, }, } { t.Run(tc.name, func(t *testing.T) { @@ -229,6 +232,7 @@ func TestRedisProduce(t *testing.T) { // Consumer messages in every third consumer but don't ack them to check // that other consumers will claim ownership on those messages. for i := 0; i < len(consumers); i += 3 { + consumers[i].Start(ctx) if _, err := consumers[i].Consume(ctx); err != nil { t.Errorf("Error consuming message: %v", err) } @@ -236,6 +240,7 @@ func TestRedisProduce(t *testing.T) { } } + time.Sleep(time.Second) gotMessages, wantResponses := consume(ctx, t, consumers) gotResponses, err := awaitResponses(ctx, promises) if err != nil { @@ -243,7 +248,7 @@ func TestRedisProduce(t *testing.T) { } producer.StopAndWait() for _, c := range consumers { - c.StopWaiter.StopAndWait() + c.StopAndWait() } got, err := mergeValues(gotMessages) if err != nil { @@ -280,6 +285,7 @@ func TestRedisReproduceDisabled(t *testing.T) { // Consumer messages in every third consumer but don't ack them to check // that other consumers will claim ownership on those messages. for i := 0; i < len(consumers); i += 3 { + consumers[i].Start(ctx) if _, err := consumers[i].Consume(ctx); err != nil { t.Errorf("Error consuming message: %v", err) } diff --git a/scripts/split-val-entry.sh b/scripts/split-val-entry.sh index a5ee0709b6..8e1be0f6cc 100755 --- a/scripts/split-val-entry.sh +++ b/scripts/split-val-entry.sh @@ -8,11 +8,12 @@ echo launching validation servers # add their port to wait loop # edit validation-server-configs-list to include the other nodes /usr/local/bin/nitro-val --file-logging.enable=false --auth.addr 127.0.0.10 --auth.origins 127.0.0.1 --auth.jwtsecret /tmp/nitro-val.jwt --auth.port 52000 & -for port in 52000; do +/home/user/nitro-legacy/bin/nitro-val --file-logging.enable=false --auth.addr 127.0.0.10 --auth.origins 127.0.0.1 --auth.jwtsecret /tmp/nitro-val.jwt --auth.port 52001 --validation.wasm.root-path /home/user/nitro-legacy/machines & +for port in 52000 52001; do while ! nc -w1 -z 127.0.0.10 $port; do echo waiting for validation port $port sleep 1 done done echo launching nitro-node -/usr/local/bin/nitro --node.block-validator.pending-upgrade-module-root="0x8b104a2e80ac6165dc58b9048de12f301d70b02a0ab51396c22b4b4b802a16a4" --node.block-validator.validation-server-configs-list='[{"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52000"}]' "$@" +/usr/local/bin/nitro --validation.wasm.allowed-wasm-module-roots /home/user/nitro-legacy/machines,/home/user/target/machines --node.block-validator.validation-server-configs-list='[{"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52000"}, {"jwtsecret":"/tmp/nitro-val.jwt","url":"http://127.0.0.10:52001"}]' "$@" diff --git a/solgen/gen.go b/solgen/gen.go index 770fa08571..92511595d7 100644 --- a/solgen/gen.go +++ b/solgen/gen.go @@ -68,7 +68,7 @@ func main() { } root := filepath.Dir(filename) parent := filepath.Dir(root) - filePaths, err := filepath.Glob(filepath.Join(parent, "contracts", "build", "contracts", "src", "*", "*", "*.json")) + filePaths, err := filepath.Glob(filepath.Join(parent, "contracts", "build", "contracts", "src", "*", "*.sol", "*.json")) if err != nil { log.Fatal(err) } @@ -105,7 +105,7 @@ func main() { modInfo.addArtifact(artifact) } - yulFilePaths, err := filepath.Glob(filepath.Join(parent, "contracts", "out", "yul", "*", "*.json")) + yulFilePaths, err := filepath.Glob(filepath.Join(parent, "contracts", "out", "*", "*.yul", "*.json")) if err != nil { log.Fatal(err) } diff --git a/staker/block_validator.go b/staker/block_validator.go index 159b3a650f..8a370e8b87 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -821,8 +821,9 @@ validationsLoop: } for _, moduleRoot := range wasmRoots { if v.chosenValidator[moduleRoot] == nil { - v.possiblyFatal(fmt.Errorf("did not find spawner for moduleRoot :%v", moduleRoot)) - continue + notFoundErr := fmt.Errorf("did not find spawner for moduleRoot :%v", moduleRoot) + v.possiblyFatal(notFoundErr) + return nil, notFoundErr } if v.chosenValidator[moduleRoot].Room() == 0 { log.Trace("advanceValidations: no more room", "moduleRoot", moduleRoot) @@ -1118,7 +1119,7 @@ func (v *BlockValidator) Initialize(ctx context.Context) error { } // First spawner is always RedisValidationClient if RedisStreams are enabled. if v.redisValidator != nil { - err := v.redisValidator.Initialize(moduleRoots) + err := v.redisValidator.Initialize(ctx, moduleRoots) if err != nil { return err } @@ -1127,13 +1128,18 @@ func (v *BlockValidator) Initialize(ctx context.Context) error { for _, root := range moduleRoots { if v.redisValidator != nil && validator.SpawnerSupportsModule(v.redisValidator, root) { v.chosenValidator[root] = v.redisValidator + log.Info("validator chosen", "WasmModuleRoot", root, "chosen", "redis") } else { for _, spawner := range v.execSpawners { if validator.SpawnerSupportsModule(spawner, root) { v.chosenValidator[root] = spawner + log.Info("validator chosen", "WasmModuleRoot", root, "chosen", spawner.Name()) break } } + if v.chosenValidator[root] == nil { + return fmt.Errorf("cannot validate WasmModuleRoot %v", root) + } } } return nil diff --git a/staker/challenge-cache/cache.go b/staker/challenge-cache/cache.go new file mode 100644 index 0000000000..8cca4bb835 --- /dev/null +++ b/staker/challenge-cache/cache.go @@ -0,0 +1,242 @@ +// Copyright 2023-2024, Offchain Labs, Inc. +// For license information, see https://github.com/offchainlabs/bold/blob/main/LICENSE +/* +* Package challengecache stores hashes required for making history commitments in Arbitrum BOLD. +When a challenge begins, validators need to post Merkle commitments to a series of block hashes to +narrow down their disagreement to a single block. Once a disagreement is reached, another BOLD challenge begins +to narrow down within the execution of a block. This requires using the Arbitrator emulator to compute +the intermediate hashes of executing the block as WASM opcodes. These hashes are expensive to compute, so we +store them in a filesystem cache to avoid recomputing them and for hierarchical access. +Each file contains a list of 32 byte hashes, concatenated together as bytes. +Using this structure, we can namespace hashes by message number and by challenge level. + +Once a validator receives a full list of computed machine hashes for the first time from a validatio node, +it will write the hashes to this filesystem hierarchy for fast access next time these hashes are needed. + +Example uses: +- Obtain all the hashes for the execution of message num 70 to 71 for a given wavm module root. +- Obtain all the hashes from step 100 to 101 at subchallenge level 1 for the execution of message num 70. + + wavm-module-root-0xab/ + rollup-block-hash-0x12...-message-num-70/ + hashes.bin + subchallenge-level-1-big-step-100/ + hashes.bin + +We namespace top-level block challenges by wavm module root. Then, we can retrieve +the hashes for any data within a challenge or associated subchallenge based on the hierarchy above. +*/ + +package challengecache + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +var ( + ErrNotFoundInCache = errors.New("no found in challenge cache") + ErrFileAlreadyExists = errors.New("file already exists") + ErrNoHashes = errors.New("no hashes being written") + hashesFileName = "hashes.bin" + wavmModuleRootPrefix = "wavm-module-root" + rollupBlockHashPrefix = "rollup-block-hash" + messageNumberPrefix = "message-num" + bigStepPrefix = "big-step" + challengeLevelPrefix = "subchallenge-level" +) + +// HistoryCommitmentCacher can retrieve history commitment hashes given lookup keys. +type HistoryCommitmentCacher interface { + Get(lookup *Key, numToRead uint64) ([]common.Hash, error) + Put(lookup *Key, hashes []common.Hash) error +} + +// Cache for history commitments on disk. +type Cache struct { + baseDir string +} + +// New cache from a base directory path. +func New(baseDir string) (*Cache, error) { + if _, err := os.Stat(baseDir); err != nil { + if err := os.MkdirAll(baseDir, os.ModePerm); err != nil { + return nil, fmt.Errorf("could not make base cache directory %s: %w", baseDir, err) + } + } + return &Cache{ + baseDir: baseDir, + }, nil +} + +// Key for cache lookups includes the wavm module root of a challenge, as well +// as the heights for messages and big steps as needed. +type Key struct { + RollupBlockHash common.Hash + WavmModuleRoot common.Hash + MessageHeight uint64 + StepHeights []uint64 +} + +// Get a list of hashes from the cache from index 0 up to a certain index. Hashes are saved as files in the directory +// hierarchy for the cache. If a file is not present, ErrNotFoundInCache +// is returned. +func (c *Cache) Get( + lookup *Key, + numToRead uint64, +) ([]common.Hash, error) { + fName, err := determineFilePath(c.baseDir, lookup) + if err != nil { + return nil, err + } + if _, err := os.Stat(fName); err != nil { + log.Warn("Cache miss", "fileName", fName) + return nil, ErrNotFoundInCache + } + log.Debug("Cache hit", "fileName", fName) + f, err := os.Open(fName) + if err != nil { + return nil, err + } + defer func() { + if err := f.Close(); err != nil { + log.Error("Could not close file after reading", "err", err, "file", fName) + } + }() + return readHashes(f, numToRead) +} + +// Put a list of hashes into the cache. +// Hashes are saved as files in a directory hierarchy for the cache. +// This function first creates a temporary file, writes the hashes to it, and then renames the file +// to the final directory to ensure atomic writes. +func (c *Cache) Put(lookup *Key, hashes []common.Hash) error { + // We should error if trying to put 0 hashes to disk. + if len(hashes) == 0 { + return ErrNoHashes + } + fName, err := determineFilePath(c.baseDir, lookup) + if err != nil { + return err + } + // We create a tmp file to write our hashes to first. If writing fails, + // we don't want to leave a half-written file in our cache directory. + // Once writing succeeds, we rename in an atomic operation to the correct file name + // in the cache directory hierarchy. + tmp, err := os.MkdirTemp(c.baseDir, "tmpdir") + if err != nil { + return err + } + tmpFName := filepath.Join(tmp, fName) + dir := filepath.Dir(tmpFName) + if err := os.MkdirAll(dir, os.ModePerm); err != nil { + return fmt.Errorf("could not make tmp directory %s: %w", dir, err) + } + f, err := os.Create(tmpFName) + if err != nil { + return err + } + defer func() { + if err := f.Close(); err != nil { + log.Error("Could not close file after writing", "err", err, "file", fName) + } + }() + if err := writeHashes(f, hashes); err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(fName), os.ModePerm); err != nil { + return fmt.Errorf("could not make file directory %s: %w", fName, err) + } + // If the file writing was successful, we rename the file from the tmp directory + // into our cache directory. This is an atomic operation. + // For more information on this atomic write pattern, see: + // https://stackoverflow.com/questions/2333872/how-to-make-file-creation-an-atomic-operation + return os.Rename(tmpFName /*old */, fName /* new */) +} + +// Reads 32 bytes at a time from a reader up to a specified height. If none, then read all. +func readHashes(r io.Reader, numToRead uint64) ([]common.Hash, error) { + br := bufio.NewReader(r) + hashes := make([]common.Hash, 0) + buf := make([]byte, 0, common.HashLength) + for totalRead := uint64(0); totalRead < numToRead; totalRead++ { + n, err := br.Read(buf[:cap(buf)]) + if err != nil { + // If we try to read but reach EOF, we break out of the loop. + if err == io.EOF { + break + } + return nil, err + } + buf = buf[:n] + if n != common.HashLength { + return nil, fmt.Errorf("expected to read %d bytes, got %d bytes", common.HashLength, n) + } + hashes = append(hashes, common.BytesToHash(buf)) + } + if numToRead > uint64(len(hashes)) { + return nil, fmt.Errorf( + "wanted to read %d hashes, but only read %d hashes", + numToRead, + len(hashes), + ) + } + return hashes, nil +} + +func writeHashes(w io.Writer, hashes []common.Hash) error { + bw := bufio.NewWriter(w) + for i, rt := range hashes { + n, err := bw.Write(rt[:]) + if err != nil { + return err + } + if n != len(rt) { + return fmt.Errorf( + "for hash %d, wrote %d bytes, expected to write %d bytes", + i, + n, + len(rt), + ) + } + } + return bw.Flush() +} + +/* +* +When provided with a cache lookup struct, this function determines the file path +for the data requested within the cache directory hierarchy. The folder structure +for a given filesystem challenge cache will look as follows: + + wavm-module-root-0xab/ + rollup-block-hash-0x12...-message-num-70/ + hashes.bin + subchallenge-level-1-big-step-100/ + hashes.bin +*/ +func determineFilePath(baseDir string, lookup *Key) (string, error) { + key := make([]string, 0) + key = append(key, fmt.Sprintf("%s-%s", wavmModuleRootPrefix, lookup.WavmModuleRoot.Hex())) + key = append(key, fmt.Sprintf("%s-%s-%s-%d", rollupBlockHashPrefix, lookup.RollupBlockHash.Hex(), messageNumberPrefix, lookup.MessageHeight)) + for challengeLevel, height := range lookup.StepHeights { + key = append(key, fmt.Sprintf( + "%s-%d-%s-%d", + challengeLevelPrefix, + challengeLevel+1, // subchallenges start at 1, as level 0 is the block challenge level. + bigStepPrefix, + height, + ), + ) + + } + key = append(key, hashesFileName) + return filepath.Join(baseDir, filepath.Join(key...)), nil +} diff --git a/staker/challenge-cache/cache_test.go b/staker/challenge-cache/cache_test.go new file mode 100644 index 0000000000..6b15d62af7 --- /dev/null +++ b/staker/challenge-cache/cache_test.go @@ -0,0 +1,323 @@ +// Copyright 2023-2024, Offchain Labs, Inc. +// For license information, see https://github.com/offchainlabs/bold/blob/main/LICENSE +package challengecache + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common" +) + +var _ HistoryCommitmentCacher = (*Cache)(nil) + +func TestCache(t *testing.T) { + basePath := t.TempDir() + if err := os.MkdirAll(basePath, os.ModePerm); err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := os.RemoveAll(basePath); err != nil { + t.Fatal(err) + } + }) + cache, err := New(basePath) + if err != nil { + t.Fatal(err) + } + key := &Key{ + WavmModuleRoot: common.BytesToHash([]byte("foo")), + MessageHeight: 0, + StepHeights: []uint64{0}, + } + t.Run("Not found", func(t *testing.T) { + _, err := cache.Get(key, 0) + if !errors.Is(err, ErrNotFoundInCache) { + t.Fatal(err) + } + }) + t.Run("Putting empty hash fails", func(t *testing.T) { + if err := cache.Put(key, []common.Hash{}); !errors.Is(err, ErrNoHashes) { + t.Fatalf("Unexpected error: %v", err) + } + }) + want := []common.Hash{ + common.BytesToHash([]byte("foo")), + common.BytesToHash([]byte("bar")), + common.BytesToHash([]byte("baz")), + } + err = cache.Put(key, want) + if err != nil { + t.Fatal(err) + } + got, err := cache.Get(key, 3) + if err != nil { + t.Fatal(err) + } + if len(got) != len(want) { + t.Fatalf("Wrong number of hashes. Expected %d, got %d", len(want), len(got)) + } + for i, rt := range got { + if rt != want[i] { + t.Fatalf("Wrong root. Expected %#x, got %#x", want[i], rt) + } + } +} + +func TestReadWriteStatehashes(t *testing.T) { + t.Run("read up to, but had empty reader", func(t *testing.T) { + b := bytes.NewBuffer([]byte{}) + _, err := readHashes(b, 100) + if err == nil { + t.Fatal("Wanted error") + } + if !strings.Contains(err.Error(), "only read 0 hashes") { + t.Fatal("Unexpected error") + } + }) + t.Run("read single root", func(t *testing.T) { + b := bytes.NewBuffer([]byte{}) + want := common.BytesToHash([]byte("foo")) + b.Write(want.Bytes()) + hashes, err := readHashes(b, 1) + if err != nil { + t.Fatal(err) + } + if len(hashes) == 0 { + t.Fatal("Got no hashes") + } + if hashes[0] != want { + t.Fatalf("Wrong root. Expected %#x, got %#x", want, hashes[0]) + } + }) + t.Run("Three hashes exist, want to read only two", func(t *testing.T) { + b := bytes.NewBuffer([]byte{}) + foo := common.BytesToHash([]byte("foo")) + bar := common.BytesToHash([]byte("bar")) + baz := common.BytesToHash([]byte("baz")) + b.Write(foo.Bytes()) + b.Write(bar.Bytes()) + b.Write(baz.Bytes()) + hashes, err := readHashes(b, 2) + if err != nil { + t.Fatal(err) + } + if len(hashes) != 2 { + t.Fatalf("Expected two hashes, got %d", len(hashes)) + } + if hashes[0] != foo { + t.Fatalf("Wrong root. Expected %#x, got %#x", foo, hashes[0]) + } + if hashes[1] != bar { + t.Fatalf("Wrong root. Expected %#x, got %#x", bar, hashes[1]) + } + }) + t.Run("Fails to write enough data to writer", func(t *testing.T) { + m := &mockWriter{wantErr: true} + err := writeHashes(m, []common.Hash{common.BytesToHash([]byte("foo"))}) + if err == nil { + t.Fatal("Wanted error") + } + m = &mockWriter{wantErr: false, numWritten: 16} + err = writeHashes(m, []common.Hash{common.BytesToHash([]byte("foo"))}) + if err == nil { + t.Fatal("Wanted error") + } + if !strings.Contains(err.Error(), "short write") { + t.Fatalf("Got wrong error kind: %v", err) + } + }) +} + +type mockWriter struct { + wantErr bool + numWritten int +} + +func (m *mockWriter) Write(_ []byte) (n int, err error) { + if m.wantErr { + return 0, errors.New("something went wrong") + } + return m.numWritten, nil +} + +type mockReader struct { + wantErr bool + err error + hashes []common.Hash + readIdx int + bytesRead int +} + +func (m *mockReader) Read(out []byte) (n int, err error) { + if m.wantErr { + return 0, m.err + } + if m.readIdx == len(m.hashes) { + return 0, io.EOF + } + copy(out, m.hashes[m.readIdx].Bytes()) + m.readIdx++ + return m.bytesRead, nil +} + +func Test_readHashes(t *testing.T) { + t.Run("Unexpected error", func(t *testing.T) { + want := []common.Hash{ + common.BytesToHash([]byte("foo")), + common.BytesToHash([]byte("bar")), + common.BytesToHash([]byte("baz")), + } + m := &mockReader{wantErr: true, hashes: want, err: errors.New("foo")} + _, err := readHashes(m, 1) + if err == nil { + t.Fatal(err) + } + if !strings.Contains(err.Error(), "foo") { + t.Fatalf("Unexpected error: %v", err) + } + }) + t.Run("EOF, but did not read as much as was expected", func(t *testing.T) { + want := []common.Hash{ + common.BytesToHash([]byte("foo")), + common.BytesToHash([]byte("bar")), + common.BytesToHash([]byte("baz")), + } + m := &mockReader{wantErr: true, hashes: want, err: io.EOF} + _, err := readHashes(m, 100) + if err == nil { + t.Fatal(err) + } + if !strings.Contains(err.Error(), "wanted to read 100") { + t.Fatalf("Unexpected error: %v", err) + } + }) + t.Run("Reads wrong number of bytes", func(t *testing.T) { + want := []common.Hash{ + common.BytesToHash([]byte("foo")), + common.BytesToHash([]byte("bar")), + common.BytesToHash([]byte("baz")), + } + m := &mockReader{wantErr: false, hashes: want, bytesRead: 16} + _, err := readHashes(m, 2) + if err == nil { + t.Fatal(err) + } + if !strings.Contains(err.Error(), "expected to read 32 bytes, got 16") { + t.Fatalf("Unexpected error: %v", err) + } + }) + t.Run("Reads all until EOF", func(t *testing.T) { + want := []common.Hash{ + common.BytesToHash([]byte("foo")), + common.BytesToHash([]byte("bar")), + common.BytesToHash([]byte("baz")), + } + m := &mockReader{wantErr: false, hashes: want, bytesRead: 32} + got, err := readHashes(m, 3) + if err != nil { + t.Fatal(err) + } + if len(want) != len(got) { + t.Fatal("Wrong number of hashes") + } + for i, rt := range got { + if rt != want[i] { + t.Fatal("Wrong root") + } + } + }) +} + +func Test_determineFilePath(t *testing.T) { + type args struct { + baseDir string + key *Key + } + tests := []struct { + name string + args args + want string + wantErr bool + errContains string + }{ + { + name: "OK", + args: args{ + baseDir: "", + key: &Key{ + MessageHeight: 100, + StepHeights: []uint64{50}, + }, + }, + want: "wavm-module-root-0x0000000000000000000000000000000000000000000000000000000000000000/rollup-block-hash-0x0000000000000000000000000000000000000000000000000000000000000000-message-num-100/subchallenge-level-1-big-step-50/hashes.bin", + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := determineFilePath(tt.args.baseDir, tt.args.key) + if (err != nil) != tt.wantErr { + t.Logf("got: %v, and key %+v, got %s", err, tt.args.key, got) + if !strings.Contains(err.Error(), tt.errContains) { + t.Fatalf("Expected %s, got %s", tt.errContains, err.Error()) + } + t.Errorf("determineFilePath() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf( + "determineFilePath() = %v, want %v", + got, + tt.want, + ) + } + }) + } +} + +func BenchmarkCache_Read_32Mb(b *testing.B) { + b.StopTimer() + basePath := os.TempDir() + if err := os.MkdirAll(basePath, os.ModePerm); err != nil { + b.Fatal(err) + } + b.Cleanup(func() { + if err := os.RemoveAll(basePath); err != nil { + b.Fatal(err) + } + }) + cache, err := New(basePath) + if err != nil { + b.Fatal(err) + } + key := &Key{ + WavmModuleRoot: common.BytesToHash([]byte("foo")), + MessageHeight: 0, + StepHeights: []uint64{0}, + } + numHashes := 1 << 20 + hashes := make([]common.Hash, numHashes) + for i := range hashes { + hashes[i] = common.BytesToHash([]byte(fmt.Sprintf("%d", i))) + } + if err := cache.Put(key, hashes); err != nil { + b.Fatal(err) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + readUpTo := uint64(1 << 20) + hashes, err := cache.Get(key, readUpTo) + if err != nil { + b.Fatal(err) + } + if len(hashes) != numHashes { + b.Fatalf("Wrong number of hashes. Expected %d, got %d", hashes, len(hashes)) + } + } +} diff --git a/staker/l1_validator.go b/staker/l1_validator.go index 1a9e5faa4c..474cb077a3 100644 --- a/staker/l1_validator.go +++ b/staker/l1_validator.go @@ -12,6 +12,7 @@ import ( "github.com/offchainlabs/nitro/staker/txbuilder" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/validator" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -187,12 +188,16 @@ func (v *L1Validator) resolveNextNode(ctx context.Context, info *StakerInfo, lat func (v *L1Validator) isRequiredStakeElevated(ctx context.Context) (bool, error) { callOpts := v.getCallOpts(ctx) - requiredStake, err := v.rollup.CurrentRequiredStake(callOpts) + baseStake, err := v.rollup.BaseStake(callOpts) if err != nil { return false, err } - baseStake, err := v.rollup.BaseStake(callOpts) + requiredStake, err := v.rollup.CurrentRequiredStake(callOpts) if err != nil { + if headerreader.ExecutionRevertedRegexp.MatchString(err.Error()) { + log.Warn("execution reverted checking if required state is elevated; assuming elevated", "err", err) + return true, nil + } return false, err } return requiredStake.Cmp(baseStake) > 0, nil diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 30743dd647..1ba4ae11bf 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -452,7 +452,7 @@ func (v *StatelessBlockValidator) ValidateResult( } } if run == nil { - return false, nil, fmt.Errorf("validation woth WasmModuleRoot %v not supported by node", moduleRoot) + return false, nil, fmt.Errorf("validation with WasmModuleRoot %v not supported by node", moduleRoot) } defer run.Cancel() gsEnd, err := run.Await(ctx) diff --git a/staker/validatorwallet/contract.go b/staker/validatorwallet/contract.go index 0ef190e703..77b403b669 100644 --- a/staker/validatorwallet/contract.go +++ b/staker/validatorwallet/contract.go @@ -307,6 +307,7 @@ func (v *Contract) estimateGas(ctx context.Context, value *big.Int, data []byte) if err != nil { return 0, fmt.Errorf("getting suggested gas tip cap: %w", err) } + gasFeeCap.Add(gasFeeCap, gasTipCap) g, err := v.l1Reader.Client().EstimateGas( ctx, ethereum.CallMsg{ diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 6bc22b23da..0ec03e84c4 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -8,7 +8,6 @@ import ( "crypto/rand" "fmt" "math/big" - "net/http" "strings" "testing" "time" @@ -62,14 +61,14 @@ func addNewBatchPoster(ctx context.Context, t *testing.T, builder *NodeBuilder, } } -func externalSignerTestCfg(addr common.Address) (*dataposter.ExternalSignerCfg, error) { +func externalSignerTestCfg(addr common.Address, url string) (*dataposter.ExternalSignerCfg, error) { cp, err := externalsignertest.CertPaths() if err != nil { return nil, fmt.Errorf("getting certificates path: %w", err) } return &dataposter.ExternalSignerCfg{ Address: common.Bytes2Hex(addr.Bytes()), - URL: externalsignertest.SignerURL, + URL: url, Method: externalsignertest.SignerMethod, RootCA: cp.ServerCert, ClientCert: cp.ClientCert, @@ -80,24 +79,13 @@ func externalSignerTestCfg(addr common.Address) (*dataposter.ExternalSignerCfg, func testBatchPosterParallel(t *testing.T, useRedis bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - httpSrv, srv := externalsignertest.NewServer(t) - cp, err := externalsignertest.CertPaths() - if err != nil { - t.Fatalf("Error getting cert paths: %v", err) - } - t.Cleanup(func() { - if err := httpSrv.Shutdown(ctx); err != nil { - t.Fatalf("Error shutting down http server: %v", err) - } - }) + srv := externalsignertest.NewServer(t) go func() { - log.Debug("Server is listening on port 1234...") - if err := httpSrv.ListenAndServeTLS(cp.ServerCert, cp.ServerKey); err != nil && err != http.ErrServerClosed { - log.Debug("ListenAndServeTLS() failed", "error", err) + if err := srv.Start(); err != nil { + log.Error("Failed to start external signer server:", err) return } }() - var redisUrl string if useRedis { redisUrl = redisutil.CreateTestRedis(ctx, t) @@ -114,7 +102,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { builder := NewNodeBuilder(ctx).DefaultConfig(t, true) builder.nodeConfig.BatchPoster.Enable = false builder.nodeConfig.BatchPoster.RedisUrl = redisUrl - signerCfg, err := externalSignerTestCfg(srv.Address) + signerCfg, err := externalSignerTestCfg(srv.Address, srv.URL()) if err != nil { t.Fatalf("Error getting external signer config: %v", err) } @@ -303,3 +291,75 @@ func TestBatchPosterKeepsUp(t *testing.T) { fmt.Printf("backlog: %v message\n", haveMessages-postedMessages) } } + +func testAllowPostingFirstBatchWhenSequencerMessageCountMismatch(t *testing.T, enabled bool) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // creates first node with batch poster disabled + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.BatchPoster.Enable = false + cleanup := builder.Build(t) + defer cleanup() + testClientNonBatchPoster := builder.L2 + + // adds a batch to the sequencer inbox with a wrong next message count, + // should be 2 but it is set to 10 + seqInbox, err := bridgegen.NewSequencerInbox(builder.L1Info.GetAddress("SequencerInbox"), builder.L1.Client) + Require(t, err) + seqOpts := builder.L1Info.GetDefaultTransactOpts("Sequencer", ctx) + tx, err := seqInbox.AddSequencerL2Batch(&seqOpts, big.NewInt(1), nil, big.NewInt(1), common.Address{}, big.NewInt(1), big.NewInt(10)) + Require(t, err) + _, err = builder.L1.EnsureTxSucceeded(tx) + Require(t, err) + + // creates a batch poster + nodeConfigBatchPoster := arbnode.ConfigDefaultL1Test() + nodeConfigBatchPoster.BatchPoster.Dangerous.AllowPostingFirstBatchWhenSequencerMessageCountMismatch = enabled + testClientBatchPoster, cleanupBatchPoster := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: nodeConfigBatchPoster}) + defer cleanupBatchPoster() + + // sends a transaction through the batch poster + accountName := "User2" + builder.L2Info.GenerateAccount(accountName) + tx = builder.L2Info.PrepareTx("Owner", accountName, builder.L2Info.TransferGas, big.NewInt(1e12), nil) + err = testClientBatchPoster.Client.SendTransaction(ctx, tx) + Require(t, err) + _, err = testClientBatchPoster.EnsureTxSucceeded(tx) + Require(t, err) + + if enabled { + // if AllowPostingFirstBatchWhenSequencerMessageCountMismatch is enabled + // then the L2 transaction should be posted to L1, and the non batch + // poster node should be able to see it + _, err = WaitForTx(ctx, testClientNonBatchPoster.Client, tx.Hash(), time.Second*3) + Require(t, err) + l2balance, err := testClientNonBatchPoster.Client.BalanceAt(ctx, builder.L2Info.GetAddress(accountName), nil) + Require(t, err) + if l2balance.Cmp(big.NewInt(1e12)) != 0 { + t.Fatal("Unexpected balance:", l2balance) + } + } else { + // if AllowPostingFirstBatchWhenSequencerMessageCountMismatch is disabled + // then the L2 transaction should not be posted to L1, so the non + // batch poster will not be able to see it + _, err = WaitForTx(ctx, testClientNonBatchPoster.Client, tx.Hash(), time.Second*3) + if err == nil { + Fatal(t, "tx received by non batch poster node with AllowPostingFirstBatchWhenSequencerMessageCountMismatch disabled") + } + l2balance, err := testClientNonBatchPoster.Client.BalanceAt(ctx, builder.L2Info.GetAddress(accountName), nil) + Require(t, err) + if l2balance.Cmp(big.NewInt(0)) != 0 { + t.Fatal("Unexpected balance:", l2balance) + } + } +} + +func TestAllowPostingFirstBatchWhenSequencerMessageCountMismatchEnabled(t *testing.T) { + testAllowPostingFirstBatchWhenSequencerMessageCountMismatch(t, true) +} + +func TestAllowPostingFirstBatchWhenSequencerMessageCountMismatchDisabled(t *testing.T) { + testAllowPostingFirstBatchWhenSequencerMessageCountMismatch(t, false) +} diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index dfd892a079..b4dafbff15 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -27,6 +27,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/redisutil" + "github.com/offchainlabs/nitro/util/testhelpers/github" "github.com/offchainlabs/nitro/validator/client/redis" ) @@ -39,24 +40,34 @@ const ( upgradeArbOs ) -func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops int, workload workloadType, arbitrator bool, useRedisStreams bool) { +type Options struct { + dasModeString string + workloadLoops int + workload workloadType + arbitrator bool + useRedisStreams bool + wasmRootDir string +} + +func testBlockValidatorSimple(t *testing.T, opts Options) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - chainConfig, l1NodeConfigA, lifecycleManager, _, dasSignerKey := setupConfigWithDAS(t, ctx, dasModeString) + chainConfig, l1NodeConfigA, lifecycleManager, _, dasSignerKey := setupConfigWithDAS(t, ctx, opts.dasModeString) defer lifecycleManager.StopAndWaitUntil(time.Second) - if workload == upgradeArbOs { + if opts.workload == upgradeArbOs { chainConfig.ArbitrumChainParams.InitialArbOSVersion = 10 } var delayEvery int - if workloadLoops > 1 { + if opts.workloadLoops > 1 { l1NodeConfigA.BatchPoster.MaxDelay = time.Millisecond * 500 - delayEvery = workloadLoops / 3 + delayEvery = opts.workloadLoops / 3 } builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder = builder.WithWasmRootDir(opts.wasmRootDir) builder.nodeConfig = l1NodeConfigA builder.chainConfig = chainConfig builder.L2Info = nil @@ -70,13 +81,15 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops validatorConfig.DataAvailability = l1NodeConfigA.DataAvailability validatorConfig.DataAvailability.RPCAggregator.Enable = false redisURL := "" - if useRedisStreams { + if opts.useRedisStreams { redisURL = redisutil.CreateTestRedis(ctx, t) - validatorConfig.BlockValidator.RedisValidationClientConfig = redis.DefaultValidationClientConfig + validatorConfig.BlockValidator.RedisValidationClientConfig = redis.TestValidationClientConfig validatorConfig.BlockValidator.RedisValidationClientConfig.RedisURL = redisURL + } else { + validatorConfig.BlockValidator.RedisValidationClientConfig = redis.ValidationClientConfig{} } - AddDefaultValNode(t, ctx, validatorConfig, !arbitrator, redisURL) + AddDefaultValNode(t, ctx, validatorConfig, !opts.arbitrator, redisURL, opts.wasmRootDir) testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: validatorConfig}) defer cleanupB() @@ -85,17 +98,17 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops perTransfer := big.NewInt(1e12) var simple *mocksgen.Simple - if workload != upgradeArbOs { - for i := 0; i < workloadLoops; i++ { + if opts.workload != upgradeArbOs { + for i := 0; i < opts.workloadLoops; i++ { var tx *types.Transaction - if workload == ethSend { + if opts.workload == ethSend { tx = builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, perTransfer, nil) } else { var contractCode []byte var gas uint64 - if workload == smallContract { + if opts.workload == smallContract { contractCode = []byte{byte(vm.PUSH0)} contractCode = append(contractCode, byte(vm.PUSH0)) contractCode = append(contractCode, byte(vm.PUSH1)) @@ -128,7 +141,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops err := builder.L2.Client.SendTransaction(ctx, tx) Require(t, err) _, err = builder.L2.EnsureTxSucceeded(tx) - if workload != depleteGas { + if opts.workload != depleteGas { Require(t, err) } if delayEvery > 0 && i%delayEvery == (delayEvery-1) { @@ -182,7 +195,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops Require(t, err) } - if workload != depleteGas { + if opts.workload != depleteGas { delayedTx := builder.L2Info.PrepareTx("Owner", "User2", 30002, perTransfer, nil) builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ WrapL2ForDelayed(t, delayedTx, builder.L1Info, "User", 100000), @@ -201,11 +214,11 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops Require(t, err) } - if workload == ethSend { + if opts.workload == ethSend { l2balance, err := testClientB.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) - expectedBalance := new(big.Int).Mul(perTransfer, big.NewInt(int64(workloadLoops+1))) + expectedBalance := new(big.Int).Mul(perTransfer, big.NewInt(int64(opts.workloadLoops+1))) if l2balance.Cmp(expectedBalance) != 0 { Fatal(t, "Unexpected balance:", l2balance) } @@ -249,21 +262,65 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops } func TestBlockValidatorSimpleOnchainUpgradeArbOs(t *testing.T) { - testBlockValidatorSimple(t, "onchain", 1, upgradeArbOs, true, false) + opts := Options{ + dasModeString: "onchain", + workloadLoops: 1, + workload: upgradeArbOs, + arbitrator: true, + } + testBlockValidatorSimple(t, opts) } func TestBlockValidatorSimpleOnchain(t *testing.T) { - testBlockValidatorSimple(t, "onchain", 1, ethSend, true, false) + opts := Options{ + dasModeString: "onchain", + workloadLoops: 1, + workload: ethSend, + arbitrator: true, + } + testBlockValidatorSimple(t, opts) +} + +func TestBlockValidatorSimpleOnchainWithPublishedMachine(t *testing.T) { + cr, err := github.LatestConsensusRelease(context.Background()) + Require(t, err) + machPath := populateMachineDir(t, cr) + opts := Options{ + dasModeString: "onchain", + workloadLoops: 1, + workload: ethSend, + arbitrator: true, + wasmRootDir: machPath, + } + testBlockValidatorSimple(t, opts) } func TestBlockValidatorSimpleOnchainWithRedisStreams(t *testing.T) { - testBlockValidatorSimple(t, "onchain", 1, ethSend, true, true) + opts := Options{ + dasModeString: "onchain", + workloadLoops: 1, + workload: ethSend, + arbitrator: true, + useRedisStreams: true, + } + testBlockValidatorSimple(t, opts) } func TestBlockValidatorSimpleLocalDAS(t *testing.T) { - testBlockValidatorSimple(t, "files", 1, ethSend, true, false) + opts := Options{ + dasModeString: "files", + workloadLoops: 1, + workload: ethSend, + arbitrator: true, + } + testBlockValidatorSimple(t, opts) } func TestBlockValidatorSimpleJITOnchain(t *testing.T) { - testBlockValidatorSimple(t, "files", 8, smallContract, false, false) + opts := Options{ + dasModeString: "files", + workloadLoops: 8, + workload: smallContract, + } + testBlockValidatorSimple(t, opts) } diff --git a/system_tests/blocks_reexecutor_test.go b/system_tests/blocks_reexecutor_test.go index c2941ddcc4..66690d1427 100644 --- a/system_tests/blocks_reexecutor_test.go +++ b/system_tests/blocks_reexecutor_test.go @@ -45,16 +45,11 @@ func TestBlocksReExecutorModes(t *testing.T) { } } + // Reexecute blocks at mode full success := make(chan struct{}) + executorFull := blocksreexecutor.New(&blocksreexecutor.TestConfig, blockchain, feedErrChan) + executorFull.Start(ctx, success) - // Reexecute blocks at mode full - go func() { - executorFull := blocksreexecutor.New(&blocksreexecutor.TestConfig, blockchain, feedErrChan) - executorFull.StopWaiter.Start(ctx, executorFull) - executorFull.Impl(ctx) - executorFull.StopAndWait() - success <- struct{}{} - }() select { case err := <-feedErrChan: t.Errorf("error occurred: %v", err) @@ -66,15 +61,12 @@ func TestBlocksReExecutorModes(t *testing.T) { } // Reexecute blocks at mode random - go func() { - c := &blocksreexecutor.TestConfig - c.Mode = "random" - executorRandom := blocksreexecutor.New(c, blockchain, feedErrChan) - executorRandom.StopWaiter.Start(ctx, executorRandom) - executorRandom.Impl(ctx) - executorRandom.StopAndWait() - success <- struct{}{} - }() + success = make(chan struct{}) + c := &blocksreexecutor.TestConfig + c.Mode = "random" + executorRandom := blocksreexecutor.New(c, blockchain, feedErrChan) + executorRandom.Start(ctx, success) + select { case err := <-feedErrChan: t.Errorf("error occurred: %v", err) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index eb5b3442f3..79ae4779cd 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -11,6 +11,7 @@ import ( "io" "math/big" "net" + "net/http" "os" "strconv" "strings" @@ -23,6 +24,7 @@ import ( "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/cmd/chaininfo" + "github.com/offchainlabs/nitro/cmd/conf" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/das" "github.com/offchainlabs/nitro/deploy" @@ -44,6 +46,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" @@ -72,6 +75,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/upgrade_executorgen" "github.com/offchainlabs/nitro/statetransfer" "github.com/offchainlabs/nitro/util/testhelpers" + "github.com/offchainlabs/nitro/util/testhelpers/github" "golang.org/x/exp/slog" ) @@ -154,6 +158,7 @@ type NodeBuilder struct { execConfig *gethexec.Config l1StackConfig *node.Config l2StackConfig *node.Config + valnodeConfig *valnode.Config L1Info info L2Info info @@ -188,6 +193,8 @@ func (b *NodeBuilder) DefaultConfig(t *testing.T, withL1 bool) *NodeBuilder { b.dataDir = t.TempDir() b.l1StackConfig = createStackConfigForTest(t.TempDir()) b.l2StackConfig = createStackConfigForTest(b.dataDir) + cp := valnode.TestValidationConfig + b.valnodeConfig = &cp b.execConfig = gethexec.ConfigDefaultTest() return b } @@ -199,6 +206,11 @@ func (b *NodeBuilder) WithArbOSVersion(arbosVersion uint64) *NodeBuilder { return b } +func (b *NodeBuilder) WithWasmRootDir(wasmRootDir string) *NodeBuilder { + b.valnodeConfig.Wasm.RootPath = wasmRootDir + return b +} + func (b *NodeBuilder) Build(t *testing.T) func() { if b.execConfig.RPC.MaxRecreateStateDepth == arbitrum.UninitializedMaxRecreateStateDepth { if b.execConfig.Caching.Archive { @@ -210,13 +222,13 @@ func (b *NodeBuilder) Build(t *testing.T) func() { if b.withL1 { l1, l2 := NewTestClient(b.ctx), NewTestClient(b.ctx) b.L2Info, l2.ConsensusNode, l2.Client, l2.Stack, b.L1Info, l1.L1Backend, l1.Client, l1.Stack = - createTestNodeOnL1WithConfigImpl(t, b.ctx, b.isSequencer, b.nodeConfig, b.execConfig, b.chainConfig, b.l1StackConfig, b.l2StackConfig, b.L2Info) + createTestNodeOnL1WithConfigImpl(t, b.ctx, b.isSequencer, b.nodeConfig, b.execConfig, b.chainConfig, b.l1StackConfig, b.l2StackConfig, b.valnodeConfig, b.L2Info) b.L1, b.L2 = l1, l2 b.L1.cleanup = func() { requireClose(t, b.L1.Stack) } } else { l2 := NewTestClient(b.ctx) b.L2Info, l2.ConsensusNode, l2.Client = - createTestNode(t, b.ctx, b.L2Info, b.nodeConfig, b.execConfig, b.chainConfig, b.takeOwnership) + createTestNode(t, b.ctx, b.L2Info, b.nodeConfig, b.execConfig, b.chainConfig, b.valnodeConfig, b.takeOwnership) b.L2 = l2 } b.L2.ExecNode = getExecNode(t, b.L2.ConsensusNode) @@ -263,7 +275,7 @@ func (b *NodeBuilder) Build2ndNode(t *testing.T, params *SecondNodeParams) (*Tes l2 := NewTestClient(b.ctx) l2.Client, l2.ConsensusNode = - Create2ndNodeWithConfig(t, b.ctx, b.L2.ConsensusNode, b.L1.Stack, b.L1Info, params.initData, params.nodeConfig, params.execConfig, params.stackConfig) + Create2ndNodeWithConfig(t, b.ctx, b.L2.ConsensusNode, b.L1.Stack, b.L1Info, params.initData, params.nodeConfig, params.execConfig, params.stackConfig, b.valnodeConfig) l2.ExecNode = getExecNode(t, l2.ConsensusNode) l2.cleanup = func() { l2.ConsensusNode.StopAndWait() } return l2, func() { l2.cleanup() } @@ -603,12 +615,13 @@ func currentRootModule(t *testing.T) common.Hash { return locator.LatestWasmModuleRoot() } -func AddDefaultValNode(t *testing.T, ctx context.Context, nodeConfig *arbnode.Config, useJit bool, redisURL string) { +func AddDefaultValNode(t *testing.T, ctx context.Context, nodeConfig *arbnode.Config, useJit bool, redisURL string, wasmRootDir string) { if !nodeConfig.ValidatorRequired() { return } conf := valnode.TestValidationConfig conf.UseJit = useJit + conf.Wasm.RootPath = wasmRootDir // Enable redis streams when URL is specified if redisURL != "" { conf.Arbitrator.RedisValidationServerConfig = rediscons.DefaultValidationServerConfig @@ -706,20 +719,20 @@ func getInitMessage(ctx context.Context, t *testing.T, l1client client, addresse } func DeployOnTestL1( - t *testing.T, ctx context.Context, l1info info, l1client client, chainConfig *params.ChainConfig, hotshotAddr common.Address, + t *testing.T, ctx context.Context, l1info info, l1client client, chainConfig *params.ChainConfig, wasmModuleRoot common.Hash, hotshotAddr common.Address ) (*chaininfo.RollupAddresses, *arbostypes.ParsedInitMessage) { l1info.GenerateAccount("RollupOwner") l1info.GenerateAccount("Sequencer") + l1info.GenerateAccount("Validator") l1info.GenerateAccount("User") SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ l1info.PrepareTx("Faucet", "RollupOwner", 30000, big.NewInt(9223372036854775807), nil), l1info.PrepareTx("Faucet", "Sequencer", 30000, big.NewInt(9223372036854775807), nil), + l1info.PrepareTx("Faucet", "Validator", 30000, big.NewInt(9223372036854775807), nil), l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(9223372036854775807), nil)}) l1TransactionOpts := l1info.GetDefaultTransactOpts("RollupOwner", ctx) - locator, err := server_common.NewMachineLocator("") - Require(t, err) serializedChainConfig, err := json.Marshal(chainConfig) Require(t, err) @@ -738,7 +751,7 @@ func DeployOnTestL1( []common.Address{l1info.GetAddress("Sequencer")}, l1info.GetAddress("RollupOwner"), 0, - arbnode.GenerateRollupConfig(false, locator.LatestWasmModuleRoot(), l1info.GetAddress("RollupOwner"), chainConfig, serializedChainConfig, common.Address{}), + arbnode.GenerateRollupConfig(false, wasmModuleRoot, l1info.GetAddress("RollupOwner"), chainConfig, serializedChainConfig, common.Address{}), nativeToken, maxDataSize, hotshotAddr, @@ -773,9 +786,12 @@ func createL2BlockChainWithStackConfig( stack, err = node.New(stackConfig) Require(t, err) - chainDb, err := stack.OpenDatabase("l2chaindata", 0, 0, "l2chaindata/", false) + chainData, err := stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata")) + Require(t, err) + wasmData, err := stack.OpenDatabaseWithExtraOptions("wasm", 0, 0, "wasm/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("wasm")) Require(t, err) - arbDb, err := stack.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", false) + chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmData, 0) + arbDb, err := stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("arbitrumdata")) Require(t, err) initReader := statetransfer.NewMemoryInitDataReader(&l2info.ArbInitData) @@ -812,7 +828,8 @@ func createTestNodeOnL1WithConfigImpl( execConfig *gethexec.Config, chainConfig *params.ChainConfig, l1StackConfig *node.Config, - l2StackConfig *node.Config, + stackConfig *node.Config, + valnodeConfig *valnode.Config, l2info_in info, ) ( l2info info, currentNode *arbnode.Node, l2client *ethclient.Client, l2stack *node.Node, @@ -840,8 +857,10 @@ func createTestNodeOnL1WithConfigImpl( if nodeConfig.BlockValidator.LightClientAddress != "" { lightClientAddr = common.HexToAddress(nodeConfig.BlockValidator.LightClientAddress) } - addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig, lightClientAddr) - _, l2stack, l2chainDb, l2arbDb, l2blockchain = createL2BlockChainWithStackConfig(t, l2info, "", chainConfig, initMessage, l2StackConfig, &execConfig.Caching) + locator, err := server_common.NewMachineLocator(valnodeConfig.Wasm.RootPath) + Require(t, err) + addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig, locator.LatestWasmModuleRoot(), lightClientAddr) + _, l2stack, l2chainDb, l2arbDb, l2blockchain = createL2BlockChainWithStackConfig(t, l2info, "", chainConfig, initMessage, stackConfig, &execConfig.Caching) var sequencerTxOptsPtr *bind.TransactOpts var dataSigner signature.DataSignerFunc if isSequencer { @@ -857,7 +876,13 @@ func createTestNodeOnL1WithConfigImpl( execConfig.Sequencer.Enable = false } - AddDefaultValNode(t, ctx, nodeConfig, true, "") + var validatorTxOptsPtr *bind.TransactOpts + if nodeConfig.Staker.Enable { + validatorTxOpts := l1info.GetDefaultTransactOpts("Validator", ctx) + validatorTxOptsPtr = &validatorTxOpts + } + + AddDefaultValNode(t, ctx, nodeConfig, true, "", valnodeConfig.Wasm.RootPath) Require(t, execConfig.Validate()) execConfigFetcher := func() *gethexec.Config { return execConfig } @@ -865,7 +890,7 @@ func createTestNodeOnL1WithConfigImpl( Require(t, err) currentNode, err = arbnode.CreateNode( ctx, l2stack, execNode, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, - addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, big.NewInt(1337), nil, + addresses, validatorTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, big.NewInt(1337), nil, ) Require(t, err) @@ -881,7 +906,7 @@ func createTestNodeOnL1WithConfigImpl( // L2 -Only. Enough for tests that needs no interface to L1 // Requires precompiles.AllowDebugPrecompiles = true func createTestNode( - t *testing.T, ctx context.Context, l2Info *BlockchainTestInfo, nodeConfig *arbnode.Config, execConfig *gethexec.Config, chainConfig *params.ChainConfig, takeOwnership bool, + t *testing.T, ctx context.Context, l2Info *BlockchainTestInfo, nodeConfig *arbnode.Config, execConfig *gethexec.Config, chainConfig *params.ChainConfig, valnodeConfig *valnode.Config, takeOwnership bool, ) (*BlockchainTestInfo, *arbnode.Node, *ethclient.Client) { if nodeConfig == nil { nodeConfig = arbnode.ConfigDefaultL2Test() @@ -892,7 +917,7 @@ func createTestNode( feedErrChan := make(chan error, 10) - AddDefaultValNode(t, ctx, nodeConfig, true, "") + AddDefaultValNode(t, ctx, nodeConfig, true, "", valnodeConfig.Wasm.RootPath) l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, l2Info, "", chainConfig, &execConfig.Caching) @@ -964,6 +989,7 @@ func Create2ndNodeWithConfig( nodeConfig *arbnode.Config, execConfig *gethexec.Config, stackConfig *node.Config, + valnodeConfig *valnode.Config, ) (*ethclient.Client, *arbnode.Node) { if nodeConfig == nil { nodeConfig = arbnode.ConfigDefaultL1NonSequencerTest() @@ -981,14 +1007,19 @@ func Create2ndNodeWithConfig( l2stack, err := node.New(stackConfig) Require(t, err) - l2chainDb, err := l2stack.OpenDatabase("l2chaindata", 0, 0, "l2chaindata/", false) + l2chainData, err := l2stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata")) + Require(t, err) + wasmData, err := l2stack.OpenDatabaseWithExtraOptions("wasm", 0, 0, "wasm/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("wasm")) Require(t, err) - l2arbDb, err := l2stack.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", false) + l2chainDb := rawdb.WrapDatabaseWithWasm(l2chainData, wasmData, 0) + + l2arbDb, err := l2stack.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("arbitrumdata")) Require(t, err) initReader := statetransfer.NewMemoryInitDataReader(l2InitData) dataSigner := signature.DataSignerFromPrivateKey(l1info.GetInfoWithPrivKey("Sequencer").PrivateKey) - txOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) + sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) + validatorTxOpts := l1info.GetDefaultTransactOpts("Validator", ctx) firstExec := getExecNode(t, first) chainConfig := firstExec.ArbInterface.BlockChain().Config() @@ -998,7 +1029,7 @@ func Create2ndNodeWithConfig( l2blockchain, err := gethexec.WriteOrTestBlockChain(l2chainDb, coreCacheConfig, initReader, chainConfig, initMessage, gethexec.ConfigDefaultTest().TxLookupLimit, 0) Require(t, err) - AddDefaultValNode(t, ctx, nodeConfig, true, "") + AddDefaultValNode(t, ctx, nodeConfig, true, "", valnodeConfig.Wasm.RootPath) Require(t, execConfig.Validate()) Require(t, nodeConfig.Validate()) @@ -1006,7 +1037,7 @@ func Create2ndNodeWithConfig( currentExec, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, configFetcher) Require(t, err) - currentNode, err := arbnode.CreateNode(ctx, l2stack, currentExec, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, first.DeployInfo, &txOpts, &txOpts, dataSigner, feedErrChan, big.NewInt(1337), nil) + currentNode, err := arbnode.CreateNode(ctx, l2stack, currentExec, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, first.DeployInfo, &validatorTxOpts, &sequencerTxOpts, dataSigner, feedErrChan, big.NewInt(1337), nil) Require(t, err) err = currentNode.Start(ctx) @@ -1116,23 +1147,23 @@ func setupConfigWithDAS( var daReader das.DataAvailabilityServiceReader var daWriter das.DataAvailabilityServiceWriter var daHealthChecker das.DataAvailabilityServiceHealthChecker + var signatureVerifier *das.SignatureVerifier if dasModeString != "onchain" { - daReader, daWriter, daHealthChecker, lifecycleManager, err = das.CreateDAComponentsForDaserver(ctx, dasConfig, nil, nil) + daReader, daWriter, signatureVerifier, daHealthChecker, lifecycleManager, err = das.CreateDAComponentsForDaserver(ctx, dasConfig, nil, nil) Require(t, err) rpcLis, err := net.Listen("tcp", "localhost:0") Require(t, err) restLis, err := net.Listen("tcp", "localhost:0") Require(t, err) - _, err = das.StartDASRPCServerOnListener(ctx, rpcLis, genericconf.HTTPServerTimeoutConfigDefault, daReader, daWriter, daHealthChecker) + _, err = das.StartDASRPCServerOnListener(ctx, rpcLis, genericconf.HTTPServerTimeoutConfigDefault, genericconf.HTTPServerBodyLimitDefault, daReader, daWriter, daHealthChecker, signatureVerifier) Require(t, err) _, err = das.NewRestfulDasServerOnListener(restLis, genericconf.HTTPServerTimeoutConfigDefault, daReader, daHealthChecker) Require(t, err) beConfigA := das.BackendConfig{ - URL: "http://" + rpcLis.Addr().String(), - PubKeyBase64Encoded: blsPubToBase64(dasSignerKey), - SignerMask: 1, + URL: "http://" + rpcLis.Addr().String(), + Pubkey: blsPubToBase64(dasSignerKey), } l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, beConfigA) l1NodeConfigA.DataAvailability.Enable = true @@ -1274,3 +1305,31 @@ func logParser[T any](t *testing.T, source string, name string) func(*types.Log) return event } } + +func populateMachineDir(t *testing.T, cr *github.ConsensusRelease) string { + baseDir := t.TempDir() + machineDir := baseDir + "/machines" + err := os.Mkdir(machineDir, 0755) + Require(t, err) + err = os.Mkdir(machineDir+"/latest", 0755) + Require(t, err) + mrFile, err := os.Create(machineDir + "/latest/module-root.txt") + Require(t, err) + _, err = mrFile.WriteString(cr.WavmModuleRoot) + Require(t, err) + machResp, err := http.Get(cr.MachineWavmURL.String()) + Require(t, err) + defer machResp.Body.Close() + machineFile, err := os.Create(machineDir + "/latest/machine.wavm.br") + Require(t, err) + _, err = io.Copy(machineFile, machResp.Body) + Require(t, err) + replayResp, err := http.Get(cr.ReplayWasmURL.String()) + Require(t, err) + defer replayResp.Body.Close() + replayFile, err := os.Create(machineDir + "/latest/replay.wasm") + Require(t, err) + _, err = io.Copy(replayFile, replayResp.Body) + Require(t, err) + return machineDir +} diff --git a/system_tests/das_test.go b/system_tests/das_test.go index b47dbad87e..adf38a4d0e 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -6,7 +6,6 @@ package arbtest import ( "context" "encoding/base64" - "encoding/json" "io" "math/big" "net" @@ -26,6 +25,7 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/blsSignatures" + "github.com/offchainlabs/nitro/cmd/conf" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/das" "github.com/offchainlabs/nitro/execution/gethexec" @@ -33,6 +33,8 @@ import ( "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/signature" + "github.com/offchainlabs/nitro/validator/server_common" + "github.com/offchainlabs/nitro/validator/valnode" "golang.org/x/exp/slog" ) @@ -60,30 +62,27 @@ func startLocalDASServer( RequestTimeout: 5 * time.Second, } - var syncFromStorageServices []*das.IterableStorageService - var syncToStorageServices []das.StorageService - storageService, lifecycleManager, err := das.CreatePersistentStorageService(ctx, &config, &syncFromStorageServices, &syncToStorageServices) + storageService, lifecycleManager, err := das.CreatePersistentStorageService(ctx, &config) defer lifecycleManager.StopAndWaitUntil(time.Second) Require(t, err) seqInboxCaller, err := bridgegen.NewSequencerInboxCaller(seqInboxAddress, l1client) Require(t, err) - privKey, err := config.Key.BLSPrivKey() + daWriter, err := das.NewSignAfterStoreDASWriter(ctx, config, storageService) Require(t, err) - daWriter, err := das.NewSignAfterStoreDASWriterWithSeqInboxCaller(privKey, seqInboxCaller, storageService, "") + signatureVerifier, err := das.NewSignatureVerifierWithSeqInboxCaller(seqInboxCaller, "") Require(t, err) rpcLis, err := net.Listen("tcp", "localhost:0") Require(t, err) - rpcServer, err := das.StartDASRPCServerOnListener(ctx, rpcLis, genericconf.HTTPServerTimeoutConfigDefault, storageService, daWriter, storageService) + rpcServer, err := das.StartDASRPCServerOnListener(ctx, rpcLis, genericconf.HTTPServerTimeoutConfigDefault, genericconf.HTTPServerBodyLimitDefault, storageService, daWriter, storageService, signatureVerifier) Require(t, err) restLis, err := net.Listen("tcp", "localhost:0") Require(t, err) restServer, err := das.NewRestfulDasServerOnListener(restLis, genericconf.HTTPServerTimeoutConfigDefault, storageService, storageService) Require(t, err) beConfig := das.BackendConfig{ - URL: "http://" + rpcLis.Addr().String(), - PubKeyBase64Encoded: blsPubToBase64(pubkey), - SignerMask: 1, + URL: "http://" + rpcLis.Addr().String(), + Pubkey: blsPubToBase64(pubkey), } return rpcServer, pubkey, beConfig, restServer, "http://" + restLis.Addr().String() } @@ -96,12 +95,11 @@ func blsPubToBase64(pubkey *blsSignatures.PublicKey) string { } func aggConfigForBackend(t *testing.T, backendConfig das.BackendConfig) das.AggregatorConfig { - backendsJsonByte, err := json.Marshal([]das.BackendConfig{backendConfig}) - Require(t, err) return das.AggregatorConfig{ - Enable: true, - AssumedHonest: 1, - Backends: string(backendsJsonByte), + Enable: true, + AssumedHonest: 1, + Backends: das.BackendConfigList{backendConfig}, + MaxStoreChunkBodySize: 512 * 1024, } } @@ -114,7 +112,9 @@ func TestDASRekey(t *testing.T) { l1info, l1client, _, l1stack := createTestL1BlockChain(t, nil) defer requireClose(t, l1stack) feedErrChan := make(chan error, 10) - addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig, common.Address{}) + locator, err := server_common.NewMachineLocator("") + Require(t, err) + addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig, locator.LatestWasmModuleRoot(), common.Address{}) // Setup DAS servers dasDataDir := t.TempDir() @@ -156,13 +156,13 @@ func TestDASRekey(t *testing.T) { l1NodeConfigB.DataAvailability.ParentChainNodeURL = "none" - l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil) + l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil, &valnode.TestValidationConfig) checkBatchPosting(t, ctx, l1client, l2clientA, l1info, l2info, big.NewInt(1e12), l2clientB) nodeA.StopAndWait() nodeB.StopAndWait() } - err := dasRpcServerA.Shutdown(ctx) + err = dasRpcServerA.Shutdown(ctx) Require(t, err) dasRpcServerB, pubkeyB, backendConfigB, _, _ := startLocalDASServer(t, ctx, dasDataDir, l1client, addresses.SequencerInbox) defer func() { @@ -177,10 +177,10 @@ func TestDASRekey(t *testing.T) { l2stackA, err := node.New(stackConfig) Require(t, err) - l2chainDb, err := l2stackA.OpenDatabase("l2chaindata", 0, 0, "l2chaindata/", false) + l2chainDb, err := l2stackA.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata")) Require(t, err) - l2arbDb, err := l2stackA.OpenDatabase("arbitrumdata", 0, 0, "arbitrumdata/", false) + l2arbDb, err := l2stackA.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("arbitrumdata")) Require(t, err) l2blockchain, err := gethexec.GetBlockChain(l2chainDb, nil, chainConfig, gethexec.ConfigDefaultTest().TxLookupLimit) @@ -195,7 +195,7 @@ func TestDASRekey(t *testing.T) { Require(t, nodeA.Start(ctx)) l2clientA := ClientForStack(t, l2stackA) - l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil) + l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil, &valnode.TestValidationConfig) checkBatchPosting(t, ctx, l1client, l2clientA, l1info, l2info, big.NewInt(2e12), l2clientB) nodeA.StopAndWait() @@ -248,8 +248,11 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { Require(t, err) l1Reader.Start(ctx) defer l1Reader.StopAndWait() + feedErrChan := make(chan error, 10) - addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig, common.Address{}) + locator, err := server_common.NewMachineLocator("") + Require(t, err) + addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig, locator.LatestWasmModuleRoot(), common.Address{}) keyDir, fileDataDir, dbDataDir := t.TempDir(), t.TempDir(), t.TempDir() pubkey, _, err := das.GenerateAndStoreKeys(keyDir) @@ -278,12 +281,12 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { // L1NodeURL: normally we would have to set this but we are passing in the already constructed client and addresses to the factory } - daReader, daWriter, daHealthChecker, lifecycleManager, err := das.CreateDAComponentsForDaserver(ctx, &serverConfig, l1Reader, &addresses.SequencerInbox) + daReader, daWriter, signatureVerifier, daHealthChecker, lifecycleManager, err := das.CreateDAComponentsForDaserver(ctx, &serverConfig, l1Reader, &addresses.SequencerInbox) Require(t, err) defer lifecycleManager.StopAndWaitUntil(time.Second) rpcLis, err := net.Listen("tcp", "localhost:0") Require(t, err) - _, err = das.StartDASRPCServerOnListener(ctx, rpcLis, genericconf.HTTPServerTimeoutConfigDefault, daReader, daWriter, daHealthChecker) + _, err = das.StartDASRPCServerOnListener(ctx, rpcLis, genericconf.HTTPServerTimeoutConfigDefault, genericconf.HTTPServerBodyLimitDefault, daReader, daWriter, daHealthChecker, signatureVerifier) Require(t, err) restLis, err := net.Listen("tcp", "localhost:0") Require(t, err) @@ -301,9 +304,8 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { RequestTimeout: 5 * time.Second, } beConfigA := das.BackendConfig{ - URL: "http://" + rpcLis.Addr().String(), - PubKeyBase64Encoded: blsPubToBase64(pubkey), - SignerMask: 1, + URL: "http://" + rpcLis.Addr().String(), + Pubkey: blsPubToBase64(pubkey), } l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, beConfigA) l1NodeConfigA.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig @@ -346,7 +348,7 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { l1NodeConfigB.DataAvailability.RestAggregator.Enable = true l1NodeConfigB.DataAvailability.RestAggregator.Urls = []string{"http://" + restLis.Addr().String()} l1NodeConfigB.DataAvailability.ParentChainNodeURL = "none" - l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil) + l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil, &valnode.TestValidationConfig) checkBatchPosting(t, ctx, l1client, l2clientA, l1info, l2info, big.NewInt(1e12), l2clientB) diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 2fa6967524..56cd055ae8 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -248,7 +248,7 @@ func createL2Nodes(t *testing.T, ctx context.Context, conf *arbnode.Config, chai return consensusNode, execNode } -func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, challengeMsgIdx int64) { +func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, challengeMsgIdx int64, wasmRootDir string) { glogger := log.NewGlogHandler( log.NewTerminalHandler(io.Writer(os.Stderr), false)) glogger.Verbosity(log.LvlInfo) @@ -273,15 +273,19 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall var valStack *node.Node var mockSpawn *mockSpawner + valNodeConfig := &valnode.TestValidationConfig + valNodeConfig.Wasm.RootPath = wasmRootDir if useStubs { - mockSpawn, valStack = createMockValidationNode(t, ctx, &valnode.TestValidationConfig.Arbitrator) + mockSpawn, valStack = createMockValidationNode(t, ctx, &valNodeConfig.Arbitrator) } else { - _, valStack = createTestValidationNode(t, ctx, &valnode.TestValidationConfig) + _, valStack = createTestValidationNode(t, ctx, valNodeConfig) } configByValidationNode(conf, valStack) fatalErrChan := make(chan error, 10) - asserterRollupAddresses, initMessage := DeployOnTestL1(t, ctx, l1Info, l1Backend, chainConfig, common.Address{}) + locator, err := server_common.NewMachineLocator(wasmRootDir) + Require(t, err) + asserterRollupAddresses, initMessage := DeployOnTestL1(t, ctx, l1Info, l1Backend, chainConfig, locator.LatestWasmModuleRoot(), common.Address{}) deployerTxOpts := l1Info.GetDefaultTransactOpts("deployer", ctx) sequencerTxOpts := l1Info.GetDefaultTransactOpts("sequencer", ctx) @@ -295,7 +299,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall asserterRollupAddresses.SequencerInbox = asserterSeqInboxAddr asserterL2Info := NewArbTestInfo(t, chainConfig.ChainID) asserterL2, asserterExec := createL2Nodes(t, ctx, conf, chainConfig, l1Backend, asserterL2Info, asserterRollupAddresses, initMessage, nil, nil, fatalErrChan) - err := asserterL2.Start(ctx) + err = asserterL2.Start(ctx) Require(t, err) challengerRollupAddresses := *asserterRollupAddresses @@ -335,10 +339,6 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall } ospEntry := DeployOneStepProofEntry(t, ctx, &deployerTxOpts, l1Backend, common.Address{}) - locator, err := server_common.NewMachineLocator("") - if err != nil { - Fatal(t, err) - } var wasmModuleRoot common.Hash if useStubs { wasmModuleRoot = mockWasmModuleRoots[0] diff --git a/system_tests/full_challenge_mock_test.go b/system_tests/full_challenge_mock_test.go index d32c2b40ab..82f57dd7ad 100644 --- a/system_tests/full_challenge_mock_test.go +++ b/system_tests/full_challenge_mock_test.go @@ -8,14 +8,16 @@ import "testing" func TestMockChallengeManagerAsserterIncorrect(t *testing.T) { t.Parallel() + defaultWasmRootDir := "" for i := int64(1); i <= makeBatch_MsgsPerBatch*3; i++ { - RunChallengeTest(t, false, true, i) + RunChallengeTest(t, false, true, i, defaultWasmRootDir) } } func TestMockChallengeManagerAsserterCorrect(t *testing.T) { t.Parallel() + defaultWasmRootDir := "" for i := int64(1); i <= makeBatch_MsgsPerBatch*3; i++ { - RunChallengeTest(t, true, true, i) + RunChallengeTest(t, true, true, i, defaultWasmRootDir) } } diff --git a/system_tests/full_challenge_test.go b/system_tests/full_challenge_test.go index d15ee83d1d..96c82848d2 100644 --- a/system_tests/full_challenge_test.go +++ b/system_tests/full_challenge_test.go @@ -6,14 +6,37 @@ package arbtest -import "testing" +import ( + "context" + "testing" + + "github.com/offchainlabs/nitro/util/testhelpers/github" +) func TestChallengeManagerFullAsserterIncorrect(t *testing.T) { t.Parallel() - RunChallengeTest(t, false, false, makeBatch_MsgsPerBatch+1) + defaultWasmRootDir := "" + RunChallengeTest(t, false, false, makeBatch_MsgsPerBatch+1, defaultWasmRootDir) +} + +func TestChallengeManagerFullAsserterIncorrectWithPublishedMachine(t *testing.T) { + t.Parallel() + cr, err := github.LatestConsensusRelease(context.Background()) + Require(t, err) + machPath := populateMachineDir(t, cr) + RunChallengeTest(t, false, true, makeBatch_MsgsPerBatch+1, machPath) } func TestChallengeManagerFullAsserterCorrect(t *testing.T) { t.Parallel() - RunChallengeTest(t, true, false, makeBatch_MsgsPerBatch+2) + defaultWasmRootDir := "" + RunChallengeTest(t, true, false, makeBatch_MsgsPerBatch+2, defaultWasmRootDir) +} + +func TestChallengeManagerFullAsserterCorrectWithPublishedMachine(t *testing.T) { + t.Parallel() + cr, err := github.LatestConsensusRelease(context.Background()) + Require(t, err) + machPath := populateMachineDir(t, cr) + RunChallengeTest(t, true, true, makeBatch_MsgsPerBatch+2, machPath) } diff --git a/system_tests/nodeinterface_test.go b/system_tests/nodeinterface_test.go index 908f5542f5..9c0f4a7158 100644 --- a/system_tests/nodeinterface_test.go +++ b/system_tests/nodeinterface_test.go @@ -19,6 +19,7 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" + "github.com/offchainlabs/nitro/validator/server_common" ) func TestFindBatch(t *testing.T) { @@ -39,7 +40,9 @@ func TestFindBatch(t *testing.T) { chainConfig := params.ArbitrumDevTestChainConfig() fatalErrChan := make(chan error, 10) - rollupAddresses, initMsg := DeployOnTestL1(t, ctx, l1Info, l1Backend, chainConfig, common.Address{}) + locator, err := server_common.NewMachineLocator("") + Require(t, err) + rollupAddresses, initMsg := DeployOnTestL1(t, ctx, l1Info, l1Backend, chainConfig, locator.LatestWasmModuleRoot(), common.Address{}) bridgeAddr, seqInbox, seqInboxAddr := setupSequencerInboxStub(ctx, t, l1Info, l1Backend, chainConfig) @@ -49,7 +52,7 @@ func TestFindBatch(t *testing.T) { rollupAddresses.SequencerInbox = seqInboxAddr l2Info := NewArbTestInfo(t, chainConfig.ChainID) consensus, _ := createL2Nodes(t, ctx, conf, chainConfig, l1Backend, l2Info, rollupAddresses, initMsg, nil, nil, fatalErrChan) - err := consensus.Start(ctx) + err = consensus.Start(ctx) Require(t, err) l2Client := ClientForStack(t, consensus.Stack) diff --git a/system_tests/program_recursive_test.go b/system_tests/program_recursive_test.go new file mode 100644 index 0000000000..245d62fc0c --- /dev/null +++ b/system_tests/program_recursive_test.go @@ -0,0 +1,197 @@ +package arbtest + +import ( + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/solgen/go/mocksgen" + "github.com/offchainlabs/nitro/util/testhelpers" +) + +type multiCallRecurse struct { + Name string + opcode vm.OpCode +} + +func printRecurse(recurse []multiCallRecurse) string { + result := "" + for _, contract := range recurse { + result = result + "(" + contract.Name + "," + contract.opcode.String() + ")" + } + return result +} + +func testProgramRecursiveCall(t *testing.T, builder *NodeBuilder, slotVals map[string]common.Hash, rander *testhelpers.PseudoRandomDataSource, recurse []multiCallRecurse) uint64 { + ctx := builder.ctx + slot := common.HexToHash("0x11223344556677889900aabbccddeeff") + val := common.Hash{} + args := []byte{} + if recurse[0].opcode == vm.SSTORE { + // send event from storage on sstore + val = rander.GetHash() + args = append([]byte{0x1, 0, 0, 0, 65, 0x18}, slot[:]...) + args = append(args, val[:]...) + } else if recurse[0].opcode == vm.SLOAD { + args = append([]byte{0x1, 0, 0, 0, 33, 0x11}, slot[:]...) + } else { + t.Fatal("first level must be sload or sstore") + } + shouldSucceed := true + delegateChangesStorageDest := true + storageDest := recurse[0].Name + for i := 1; i < len(recurse); i++ { + call := recurse[i] + prev := recurse[i-1] + args = argsForMulticall(call.opcode, builder.L2Info.GetAddress(prev.Name), nil, args) + if call.opcode == vm.STATICCALL && recurse[0].opcode == vm.SSTORE { + shouldSucceed = false + } + if delegateChangesStorageDest && call.opcode == vm.DELEGATECALL { + storageDest = call.Name + } else { + delegateChangesStorageDest = false + } + } + if recurse[0].opcode == vm.SLOAD { + // send event from caller on sload + args[5] = args[5] | 0x8 + } + multiCaller, err := mocksgen.NewMultiCallTest(builder.L2Info.GetAddress(recurse[len(recurse)-1].Name), builder.L2.Client) + Require(t, err) + ownerTransact := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + ownerTransact.GasLimit = 10000000 + tx, err := multiCaller.Fallback(&ownerTransact, args) + Require(t, err) + receipt, err := WaitForTx(ctx, builder.L2.Client, tx.Hash(), time.Second*3) + Require(t, err) + + if shouldSucceed { + if receipt.Status != types.ReceiptStatusSuccessful { + log.Error("error when shouldn't", "case", printRecurse(recurse)) + Fatal(t, arbutil.DetailTxError(ctx, builder.L2.Client, tx, receipt)) + } + if len(receipt.Logs) != 1 { + Fatal(t, "incorrect number of logs: ", len(receipt.Logs)) + } + if recurse[0].opcode == vm.SSTORE { + slotVals[storageDest] = val + storageEvt, err := multiCaller.ParseStorage(*receipt.Logs[0]) + Require(t, err) + gotData := common.BytesToHash(storageEvt.Data[:]) + gotSlot := common.BytesToHash(storageEvt.Slot[:]) + if gotData != val || gotSlot != slot || storageEvt.Write != (recurse[0].opcode == vm.SSTORE) { + Fatal(t, "unexpected event", gotData, val, gotSlot, slot, storageEvt.Write, recurse[0].opcode) + } + } else { + calledEvt, err := multiCaller.ParseCalled(*receipt.Logs[0]) + Require(t, err) + gotData := common.BytesToHash(calledEvt.ReturnData) + if gotData != slotVals[storageDest] { + Fatal(t, "unexpected event", gotData, val, slotVals[storageDest]) + } + } + } else if receipt.Status == types.ReceiptStatusSuccessful { + Fatal(t, "should have failed") + } + for contract, expected := range slotVals { + found, err := builder.L2.Client.StorageAt(ctx, builder.L2Info.GetAddress(contract), slot, receipt.BlockNumber) + Require(t, err) + foundHash := common.BytesToHash(found) + if expected != foundHash { + Fatal(t, "contract", contract, "expected", expected, "found", foundHash) + } + } + return receipt.BlockNumber.Uint64() +} + +func testProgramResursiveCalls(t *testing.T, tests [][]multiCallRecurse, jit bool) { + builder, auth, cleanup := setupProgramTest(t, jit) + ctx := builder.ctx + l2client := builder.L2.Client + defer cleanup() + + // set-up contracts + callsAddr := deployWasm(t, ctx, auth, l2client, rustFile("multicall")) + builder.L2Info.SetContract("multicall-rust", callsAddr) + multiCallWasm, _ := readWasmFile(t, rustFile("multicall")) + auth.GasLimit = 32000000 // skip gas estimation + multicallB := deployContract(t, ctx, auth, l2client, multiCallWasm) + builder.L2Info.SetContract("multicall-rust-b", multicallB) + multiAddr, tx, _, err := mocksgen.DeployMultiCallTest(&auth, builder.L2.Client) + builder.L2Info.SetContract("multicall-evm", multiAddr) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, builder.L2.Client, tx) + Require(t, err) + slotVals := make(map[string]common.Hash) + rander := testhelpers.NewPseudoRandomDataSource(t, 0) + + // set-up validator + validatorConfig := arbnode.ConfigDefaultL1NonSequencerTest() + validatorConfig.BlockValidator.Enable = true + emptyRedisURL := "" + defaultWasmRootPath := "" + AddDefaultValNode(t, ctx, validatorConfig, jit, emptyRedisURL, defaultWasmRootPath) + valClient, valCleanup := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: validatorConfig}) + defer valCleanup() + + // store initial values + for _, contract := range []string{"multicall-rust", "multicall-rust-b", "multicall-evm"} { + storeRecure := []multiCallRecurse{ + { + Name: contract, + opcode: vm.SSTORE, + }, + } + testProgramRecursiveCall(t, builder, slotVals, rander, storeRecure) + } + + // execute transactions + blockNum := uint64(0) + for { + item := int(rander.GetUint64()/4) % len(tests) + blockNum = testProgramRecursiveCall(t, builder, slotVals, rander, tests[item]) + tests[item] = tests[len(tests)-1] + tests = tests[:len(tests)-1] + if len(tests)%100 == 0 { + log.Error("running transactions..", "blockNum", blockNum, "remaining", len(tests)) + } + if len(tests) == 0 { + break + } + } + + // wait for validation + for { + got := valClient.ConsensusNode.BlockValidator.WaitForPos(t, ctx, arbutil.MessageIndex(blockNum), time.Second*10) + if got { + break + } + log.Error("validating blocks..", "waiting for", blockNum, "validated", valClient.ConsensusNode.BlockValidator.GetValidated()) + } +} + +func TestProgramCallSimple(t *testing.T) { + tests := [][]multiCallRecurse{ + { + { + Name: "multicall-rust", + opcode: vm.SLOAD, + }, + { + Name: "multicall-rust", + opcode: vm.STATICCALL, + }, + { + Name: "multicall-rust", + opcode: vm.DELEGATECALL, + }, + }, + } + testProgramResursiveCalls(t, tests, true) +} diff --git a/system_tests/program_test.go b/system_tests/program_test.go index b20efe0740..3b4909b639 100644 --- a/system_tests/program_test.go +++ b/system_tests/program_test.go @@ -8,6 +8,7 @@ import ( "context" "encoding/binary" "fmt" + "math" "math/big" "os" "path/filepath" @@ -24,10 +25,14 @@ import ( "github.com/ethereum/go-ethereum/crypto" _ "github.com/ethereum/go-ethereum/eth/tracers/js" "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbcompress" "github.com/offchainlabs/nitro/arbos/programs" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/mocksgen" pgen "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/arbmath" @@ -221,6 +226,102 @@ func testActivateTwice(t *testing.T, jit bool) { validateBlocks(t, 7, jit, builder) } +func TestStylusUpgrade(t *testing.T) { + t.Parallel() + testStylusUpgrade(t, true) +} + +func testStylusUpgrade(t *testing.T, jit bool) { + builder, auth, cleanup := setupProgramTest(t, false, func(b *NodeBuilder) { b.WithArbOSVersion(params.ArbosVersion_Stylus) }) + defer cleanup() + + ctx := builder.ctx + + l2info := builder.L2Info + l2client := builder.L2.Client + + ensure := func(tx *types.Transaction, err error) *types.Receipt { + t.Helper() + Require(t, err) + receipt, err := EnsureTxSucceeded(ctx, l2client, tx) + Require(t, err) + return receipt + } + + arbOwner, err := pgen.NewArbOwner(types.ArbOwnerAddress, l2client) + Require(t, err) + ensure(arbOwner.SetInkPrice(&auth, 1)) + + wasm, _ := readWasmFile(t, rustFile("keccak")) + keccakAddr := deployContract(t, ctx, auth, l2client, wasm) + + colors.PrintBlue("keccak program deployed to ", keccakAddr) + + preimage := []byte("hello, you fool") + + keccakArgs := []byte{0x01} // keccak the preimage once + keccakArgs = append(keccakArgs, preimage...) + + checkFailWith := func(errMessage string) uint64 { + msg := ethereum.CallMsg{ + To: &keccakAddr, + Data: keccakArgs, + } + _, err = l2client.CallContract(ctx, msg, nil) + if err == nil || !strings.Contains(err.Error(), errMessage) { + Fatal(t, "call should have failed with "+errMessage, " got: "+err.Error()) + } + + // execute onchain for proving's sake + tx := l2info.PrepareTxTo("Owner", &keccakAddr, 1e9, nil, keccakArgs) + Require(t, l2client.SendTransaction(ctx, tx)) + return EnsureTxFailed(t, ctx, l2client, tx).BlockNumber.Uint64() + } + + checkSucceeds := func() uint64 { + msg := ethereum.CallMsg{ + To: &keccakAddr, + Data: keccakArgs, + } + _, err = l2client.CallContract(ctx, msg, nil) + if err != nil { + Fatal(t, err) + } + + // execute onchain for proving's sake + tx := l2info.PrepareTxTo("Owner", &keccakAddr, 1e9, nil, keccakArgs) + Require(t, l2client.SendTransaction(ctx, tx)) + receipt, err := EnsureTxSucceeded(ctx, l2client, tx) + if err != nil { + Fatal(t, err) + } + return receipt.BlockNumber.Uint64() + } + + // Calling the contract pre-activation should fail. + blockFail1 := checkFailWith("ProgramNotActivated") + + activateWasm(t, ctx, auth, l2client, keccakAddr, "keccak") + + blockSuccess1 := checkSucceeds() + + tx, err := arbOwner.ScheduleArbOSUpgrade(&auth, 31, 0) + Require(t, err) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + // generate traffic to perform the upgrade + TransferBalance(t, "Owner", "Owner", big.NewInt(1), builder.L2Info, builder.L2.Client, ctx) + + blockFail2 := checkFailWith("ProgramNeedsUpgrade") + + activateWasm(t, ctx, auth, l2client, keccakAddr, "keccak") + + blockSuccess2 := checkSucceeds() + + validateBlockRange(t, []uint64{blockFail1, blockSuccess1, blockFail2, blockSuccess2}, jit, builder) +} + func TestProgramErrors(t *testing.T) { t.Parallel() errorTest(t, true) @@ -1248,7 +1349,7 @@ func TestProgramCacheManager(t *testing.T) { // check ownership assert(arbOwner.IsChainOwner(nil, ownerAuth.From)) ensure(arbWasmCache.EvictCodehash(&ownerAuth, codehash)) - ensure(arbWasmCache.CacheCodehash(&ownerAuth, codehash)) + ensure(arbWasmCache.CacheProgram(&ownerAuth, program)) // de-authorize manager ensure(arbOwner.RemoveWasmCacheManager(&ownerAuth, manager)) @@ -1258,13 +1359,82 @@ func TestProgramCacheManager(t *testing.T) { assert(len(all) == 0, err) } -func setupProgramTest(t *testing.T, jit bool) ( +func testReturnDataCost(t *testing.T, arbosVersion uint64) { + builder, auth, cleanup := setupProgramTest(t, false, func(b *NodeBuilder) { b.WithArbOSVersion(arbosVersion) }) + ctx := builder.ctx + l2client := builder.L2.Client + defer cleanup() + + // use a consistent ink price + arbOwner, err := pgen.NewArbOwner(types.ArbOwnerAddress, l2client) + Require(t, err) + tx, err := arbOwner.SetInkPrice(&auth, 10000) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, builder.L2.Client, tx) + Require(t, err) + + returnSize := big.NewInt(1024 * 1024) // 1MiB + returnSizeBytes := arbmath.U256Bytes(returnSize) + + testCall := func(to common.Address) uint64 { + msg := ethereum.CallMsg{ + To: &to, + Data: returnSizeBytes, + SkipL1Charging: true, + } + ret, err := l2client.CallContract(ctx, msg, nil) + Require(t, err) + + if !arbmath.BigEquals(big.NewInt(int64(len(ret))), returnSize) { + Fatal(t, "unexpected return length", len(ret), "expected", returnSize) + } + + gas, err := l2client.EstimateGas(ctx, msg) + Require(t, err) + + return gas + } + + stylusReturnSizeAddr := deployWasm(t, ctx, auth, l2client, watFile("return-size")) + + stylusGas := testCall(stylusReturnSizeAddr) + + // PUSH32 [returnSizeBytes] + evmBytecode := append([]byte{0x7F}, returnSizeBytes...) + // PUSH0 RETURN + evmBytecode = append(evmBytecode, 0x5F, 0xF3) + evmReturnSizeAddr := deployContract(t, ctx, auth, l2client, evmBytecode) + + evmGas := testCall(evmReturnSizeAddr) + + colors.PrintGrey(fmt.Sprintf("arbosVersion=%v stylusGas=%v evmGas=%v", arbosVersion, stylusGas, evmGas)) + // a bit of gas difference is expected due to EVM PUSH32 and PUSH0 cost (in practice this is 5 gas) + similarGas := math.Abs(float64(stylusGas)-float64(evmGas)) <= 100 + if arbosVersion >= params.ArbosVersion_StylusFixes { + if !similarGas { + Fatal(t, "unexpected gas difference for return data: stylus", stylusGas, ", evm", evmGas) + } + } else if similarGas { + Fatal(t, "gas unexpectedly similar for return data: stylus", stylusGas, ", evm", evmGas) + } +} + +func TestReturnDataCost(t *testing.T) { + testReturnDataCost(t, params.ArbosVersion_Stylus) + testReturnDataCost(t, params.ArbosVersion_StylusFixes) +} + +func setupProgramTest(t *testing.T, jit bool, builderOpts ...func(*NodeBuilder)) ( *NodeBuilder, bind.TransactOpts, func(), ) { ctx, cancel := context.WithCancel(context.Background()) builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + for _, opt := range builderOpts { + opt(builder) + } + builder.nodeConfig.BlockValidator.Enable = false builder.nodeConfig.Staker.Enable = true builder.nodeConfig.BatchPoster.Enable = true @@ -1458,3 +1628,198 @@ func formatTime(duration time.Duration) string { } return fmt.Sprintf("%.2f%s", span, units[unit]) } + +func TestWasmRecreate(t *testing.T) { + builder, auth, cleanup := setupProgramTest(t, true) + ctx := builder.ctx + l2info := builder.L2Info + l2client := builder.L2.Client + defer cleanup() + + storage := deployWasm(t, ctx, auth, l2client, rustFile("storage")) + + zero := common.Hash{} + val := common.HexToHash("0x121233445566") + + // do an onchain call - store value + storeTx := l2info.PrepareTxTo("Owner", &storage, l2info.TransferGas, nil, argsForStorageWrite(zero, val)) + Require(t, l2client.SendTransaction(ctx, storeTx)) + _, err := EnsureTxSucceeded(ctx, l2client, storeTx) + Require(t, err) + + testDir := t.TempDir() + nodeBStack := createStackConfigForTest(testDir) + nodeB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{stackConfig: nodeBStack}) + + _, err = EnsureTxSucceeded(ctx, nodeB.Client, storeTx) + Require(t, err) + + // make sure reading 2nd value succeeds from 2nd node + loadTx := l2info.PrepareTxTo("Owner", &storage, l2info.TransferGas, nil, argsForStorageRead(zero)) + result, err := arbutil.SendTxAsCall(ctx, nodeB.Client, loadTx, l2info.GetAddress("Owner"), nil, true) + Require(t, err) + if common.BytesToHash(result) != val { + Fatal(t, "got wrong value") + } + // close nodeB + cleanupB() + + // delete wasm dir of nodeB + + wasmPath := filepath.Join(testDir, "system_tests.test", "wasm") + dirContents, err := os.ReadDir(wasmPath) + Require(t, err) + if len(dirContents) == 0 { + Fatal(t, "not contents found before delete") + } + os.RemoveAll(wasmPath) + + // recreate nodeB - using same source dir (wasm deleted) + nodeB, cleanupB = builder.Build2ndNode(t, &SecondNodeParams{stackConfig: nodeBStack}) + + // test nodeB - sees existing transaction + _, err = EnsureTxSucceeded(ctx, nodeB.Client, storeTx) + Require(t, err) + + // test nodeB - answers eth_call (requires reloading wasm) + result, err = arbutil.SendTxAsCall(ctx, nodeB.Client, loadTx, l2info.GetAddress("Owner"), nil, true) + Require(t, err) + if common.BytesToHash(result) != val { + Fatal(t, "got wrong value") + } + + // send new tx (requires wasm) and check nodeB sees it as well + Require(t, l2client.SendTransaction(ctx, loadTx)) + + _, err = EnsureTxSucceeded(ctx, l2client, loadTx) + Require(t, err) + + _, err = EnsureTxSucceeded(ctx, nodeB.Client, loadTx) + Require(t, err) + + cleanupB() + dirContents, err = os.ReadDir(wasmPath) + Require(t, err) + if len(dirContents) == 0 { + Fatal(t, "not contents found before delete") + } + os.RemoveAll(wasmPath) + +} + +// createMapFromDb is used in verifying if wasm store rebuilding works +func createMapFromDb(db ethdb.KeyValueStore) (map[string][]byte, error) { + iter := db.NewIterator(nil, nil) + defer iter.Release() + + dataMap := make(map[string][]byte) + + for iter.Next() { + key := iter.Key() + value := iter.Value() + + dataMap[string(key)] = value + } + + if err := iter.Error(); err != nil { + return nil, fmt.Errorf("iterator error: %w", err) + } + + return dataMap, nil +} + +func TestWasmStoreRebuilding(t *testing.T) { + builder, auth, cleanup := setupProgramTest(t, true) + ctx := builder.ctx + l2info := builder.L2Info + l2client := builder.L2.Client + defer cleanup() + + storage := deployWasm(t, ctx, auth, l2client, rustFile("storage")) + + zero := common.Hash{} + val := common.HexToHash("0x121233445566") + + // do an onchain call - store value + storeTx := l2info.PrepareTxTo("Owner", &storage, l2info.TransferGas, nil, argsForStorageWrite(zero, val)) + Require(t, l2client.SendTransaction(ctx, storeTx)) + _, err := EnsureTxSucceeded(ctx, l2client, storeTx) + Require(t, err) + + testDir := t.TempDir() + nodeBStack := createStackConfigForTest(testDir) + nodeB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{stackConfig: nodeBStack}) + + _, err = EnsureTxSucceeded(ctx, nodeB.Client, storeTx) + Require(t, err) + + // make sure reading 2nd value succeeds from 2nd node + loadTx := l2info.PrepareTxTo("Owner", &storage, l2info.TransferGas, nil, argsForStorageRead(zero)) + result, err := arbutil.SendTxAsCall(ctx, nodeB.Client, loadTx, l2info.GetAddress("Owner"), nil, true) + Require(t, err) + if common.BytesToHash(result) != val { + Fatal(t, "got wrong value") + } + + wasmDb := nodeB.ExecNode.Backend.ArbInterface().BlockChain().StateCache().WasmStore() + + storeMap, err := createMapFromDb(wasmDb) + Require(t, err) + + // close nodeB + cleanupB() + + // delete wasm dir of nodeB + wasmPath := filepath.Join(testDir, "system_tests.test", "wasm") + dirContents, err := os.ReadDir(wasmPath) + Require(t, err) + if len(dirContents) == 0 { + Fatal(t, "not contents found before delete") + } + os.RemoveAll(wasmPath) + + // recreate nodeB - using same source dir (wasm deleted) + nodeB, cleanupB = builder.Build2ndNode(t, &SecondNodeParams{stackConfig: nodeBStack}) + bc := nodeB.ExecNode.Backend.ArbInterface().BlockChain() + + wasmDbAfterDelete := nodeB.ExecNode.Backend.ArbInterface().BlockChain().StateCache().WasmStore() + storeMapAfterDelete, err := createMapFromDb(wasmDbAfterDelete) + Require(t, err) + if len(storeMapAfterDelete) != 0 { + Fatal(t, "non-empty wasm store after it was previously deleted") + } + + // Start rebuilding and wait for it to finish + log.Info("starting rebuilding of wasm store") + Require(t, gethexec.RebuildWasmStore(ctx, wasmDbAfterDelete, nodeB.ExecNode.ChainDB, nodeB.ExecNode.ConfigFetcher().RPC.MaxRecreateStateDepth, bc, common.Hash{}, bc.CurrentBlock().Hash())) + + wasmDbAfterRebuild := nodeB.ExecNode.Backend.ArbInterface().BlockChain().StateCache().WasmStore() + + // Before comparing, check if rebuilding was set to done and then delete the keys that are used to track rebuilding status + status, err := gethexec.ReadFromKeyValueStore[common.Hash](wasmDbAfterRebuild, gethexec.RebuildingPositionKey) + Require(t, err) + if status != gethexec.RebuildingDone { + Fatal(t, "rebuilding was not set to done after successful completion") + } + Require(t, wasmDbAfterRebuild.Delete(gethexec.RebuildingPositionKey)) + Require(t, wasmDbAfterRebuild.Delete(gethexec.RebuildingStartBlockHashKey)) + + rebuiltStoreMap, err := createMapFromDb(wasmDbAfterRebuild) + Require(t, err) + + // Check if rebuilding worked + if len(storeMap) != len(rebuiltStoreMap) { + Fatal(t, "size mismatch while rebuilding wasm store:", "want", len(storeMap), "got", len(rebuiltStoreMap)) + } + for key, value1 := range storeMap { + value2, exists := rebuiltStoreMap[key] + if !exists { + Fatal(t, "rebuilt wasm store doesn't have key from original") + } + if !bytes.Equal(value1, value2) { + Fatal(t, "rebuilt wasm store has incorrect value from original") + } + } + + cleanupB() +} diff --git a/system_tests/pruning_test.go b/system_tests/pruning_test.go index 8efc8653e6..041781ac48 100644 --- a/system_tests/pruning_test.go +++ b/system_tests/pruning_test.go @@ -65,7 +65,7 @@ func TestPruning(t *testing.T) { stack, err := node.New(builder.l2StackConfig) Require(t, err) defer stack.Close() - chainDb, err := stack.OpenDatabase("l2chaindata", 0, 0, "l2chaindata/", false) + chainDb, err := stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata")) Require(t, err) defer chainDb.Close() chainDbEntriesBeforePruning := countStateEntries(chainDb) @@ -89,7 +89,8 @@ func TestPruning(t *testing.T) { initConfig := conf.InitConfigDefault initConfig.Prune = "full" coreCacheConfig := gethexec.DefaultCacheConfigFor(stack, &builder.execConfig.Caching) - err = pruning.PruneChainDb(ctx, chainDb, stack, &initConfig, coreCacheConfig, builder.L1.Client, *builder.L2.ConsensusNode.DeployInfo, false) + persistentConfig := conf.PersistentConfigDefault + err = pruning.PruneChainDb(ctx, chainDb, stack, &initConfig, coreCacheConfig, &persistentConfig, builder.L1.Client, *builder.L2.ConsensusNode.DeployInfo, false) Require(t, err) for _, key := range testKeys { diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index 777ed17961..bf321808de 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -449,7 +449,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig } func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { - cacheConfig := gethexec.DefaultCachingConfig + cacheConfig := gethexec.TestCachingConfig cacheConfig.Archive = true cacheConfig.SnapshotCache = 0 // disable snapshots cacheConfig.BlockAge = 0 // use only Caching.BlockCount to keep only last N blocks in dirties cache, no matter how new they are diff --git a/system_tests/seq_coordinator_test.go b/system_tests/seq_coordinator_test.go index 886a0528c7..43d55f40c9 100644 --- a/system_tests/seq_coordinator_test.go +++ b/system_tests/seq_coordinator_test.go @@ -8,12 +8,14 @@ import ( "errors" "fmt" "math/big" + "net" "testing" "time" "github.com/go-redis/redis/v8" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/arbostypes" @@ -21,6 +23,7 @@ import ( "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util/redisutil" + "github.com/offchainlabs/nitro/util/testhelpers" ) func initRedisForTest(t *testing.T, ctx context.Context, redisUrl string, nodeNames []string) { @@ -270,6 +273,8 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { } func testCoordinatorMessageSync(t *testing.T, successCase bool) { + logHandler := testhelpers.InitTestLog(t, log.LvlTrace) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -304,16 +309,25 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { nodeConfigDup := *builder.nodeConfig builder.nodeConfig = &nodeConfigDup - + builder.nodeConfig.Feed.Output = *newBroadcasterConfigTest() builder.nodeConfig.SeqCoordinator.MyUrl = nodeNames[1] if !successCase { builder.nodeConfig.SeqCoordinator.Signer.ECDSA.AcceptSequencer = false builder.nodeConfig.SeqCoordinator.Signer.ECDSA.AllowedAddresses = []string{builder.L2Info.GetAddress("User2").Hex()} } - testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: builder.nodeConfig}) defer cleanupB() + // Build nodeBOutputFeedReader. + // nodeB doesn't sequence transactions, but adds messages related to them to its output feed. + // nodeBOutputFeedReader reads those messages from this feed and processes them. + // nodeBOutputFeedReader doesn't read messages from L1 since none of the nodes posts to L1. + nodeBPort := testClientB.ConsensusNode.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port + nodeConfigNodeBOutputFeedReader := arbnode.ConfigDefaultL1NonSequencerTest() + nodeConfigNodeBOutputFeedReader.Feed.Input = *newBroadcastClientConfigTest(nodeBPort) + testClientNodeBOutputFeedReader, cleanupNodeBOutputFeedReader := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: nodeConfigNodeBOutputFeedReader}) + defer cleanupNodeBOutputFeedReader() + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) err = builder.L2.Client.SendTransaction(ctx, tx) @@ -330,6 +344,19 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { if l2balance.Cmp(big.NewInt(1e12)) != 0 { t.Fatal("Unexpected balance:", l2balance) } + + // check that nodeBOutputFeedReader also processed the transaction + _, err = WaitForTx(ctx, testClientNodeBOutputFeedReader.Client, tx.Hash(), time.Second*5) + Require(t, err) + l2balance, err = testClientNodeBOutputFeedReader.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) + Require(t, err) + if l2balance.Cmp(big.NewInt(1e12)) != 0 { + t.Fatal("Unexpected balance:", l2balance) + } + + if logHandler.WasLogged(arbnode.BlockHashMismatchLogMsg) { + t.Fatal("BlockHashMismatchLogMsg was logged unexpectedly") + } } else { _, err = WaitForTx(ctx, testClientB.Client, tx.Hash(), time.Second) if err == nil { diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index 749a91e3b1..ab30598b60 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -11,10 +11,19 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/broadcastclient" + "github.com/offchainlabs/nitro/broadcaster/backlog" + "github.com/offchainlabs/nitro/broadcaster/message" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/relay" "github.com/offchainlabs/nitro/util/signature" + "github.com/offchainlabs/nitro/util/testhelpers" "github.com/offchainlabs/nitro/wsbroadcastserver" ) @@ -38,7 +47,8 @@ func newBroadcastClientConfigTest(port int) *broadcastclient.Config { } func TestSequencerFeed(t *testing.T) { - t.Parallel() + logHandler := testhelpers.InitTestLog(t, log.LvlTrace) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -73,6 +83,10 @@ func TestSequencerFeed(t *testing.T) { if l2balance.Cmp(big.NewInt(1e12)) != 0 { t.Fatal("Unexpected balance:", l2balance) } + + if logHandler.WasLogged(arbnode.BlockHashMismatchLogMsg) { + t.Fatal("BlockHashMismatchLogMsg was logged unexpectedly") + } } func TestRelayedSequencerFeed(t *testing.T) { @@ -250,3 +264,101 @@ func TestLyingSequencer(t *testing.T) { func TestLyingSequencerLocalDAS(t *testing.T) { testLyingSequencer(t, "files") } + +func testBlockHashComparison(t *testing.T, blockHash *common.Hash, mustMismatch bool) { + logHandler := testhelpers.InitTestLog(t, log.LvlTrace) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + backlogConfiFetcher := func() *backlog.Config { + return &backlog.DefaultTestConfig + } + bklg := backlog.NewBacklog(backlogConfiFetcher) + + wsBroadcastServer := wsbroadcastserver.NewWSBroadcastServer( + newBroadcasterConfigTest, + bklg, + 412346, + nil, + ) + err := wsBroadcastServer.Initialize() + if err != nil { + t.Fatal("error initializing wsBroadcastServer:", err) + } + err = wsBroadcastServer.Start(ctx) + if err != nil { + t.Fatal("error starting wsBroadcastServer:", err) + } + defer wsBroadcastServer.StopAndWait() + + port := wsBroadcastServer.ListenerAddr().(*net.TCPAddr).Port + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.Feed.Input = *newBroadcastClientConfigTest(port) + cleanup := builder.Build(t) + defer cleanup() + testClient := builder.L2 + + userAccount := "User2" + builder.L2Info.GenerateAccount(userAccount) + tx := builder.L2Info.PrepareTx("Owner", userAccount, builder.L2Info.TransferGas, big.NewInt(1e12), nil) + l1IncomingMsgHeader := arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_L2Message, + Poster: l1pricing.BatchPosterAddress, + BlockNumber: 29, + Timestamp: 1715295980, + RequestId: nil, + L1BaseFee: nil, + } + l1IncomingMsg, err := gethexec.MessageFromTxes( + &l1IncomingMsgHeader, + types.Transactions{tx}, + []error{nil}, + ) + Require(t, err) + + broadcastMessage := message.BroadcastMessage{ + Version: 1, + Messages: []*message.BroadcastFeedMessage{ + { + SequenceNumber: 1, + Message: arbostypes.MessageWithMetadata{ + Message: l1IncomingMsg, + DelayedMessagesRead: 1, + }, + BlockHash: blockHash, + }, + }, + } + wsBroadcastServer.Broadcast(&broadcastMessage) + + // By now, even though block hash mismatch, the transaction should still be processed + _, err = WaitForTx(ctx, testClient.Client, tx.Hash(), time.Second*15) + if err != nil { + t.Fatal("error waiting for tx:", err) + } + l2balance, err := testClient.Client.BalanceAt(ctx, builder.L2Info.GetAddress(userAccount), nil) + if err != nil { + t.Fatal("error getting balance:", err) + } + if l2balance.Cmp(big.NewInt(1e12)) != 0 { + t.Fatal("Unexpected balance:", l2balance) + } + + mismatched := logHandler.WasLogged(arbnode.BlockHashMismatchLogMsg) + if mustMismatch && !mismatched { + t.Fatal("Failed to log BlockHashMismatchLogMsg") + } else if !mustMismatch && mismatched { + t.Fatal("BlockHashMismatchLogMsg was logged unexpectedly") + } +} + +func TestBlockHashFeedMismatch(t *testing.T) { + blockHash := common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") + testBlockHashComparison(t, &blockHash, true) +} + +func TestBlockHashFeedNil(t *testing.T) { + testBlockHashComparison(t, nil, false) +} diff --git a/system_tests/snap_sync_test.go b/system_tests/snap_sync_test.go new file mode 100644 index 0000000000..dd22bb027c --- /dev/null +++ b/system_tests/snap_sync_test.go @@ -0,0 +1,189 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbtest + +import ( + "context" + "math/big" + "os" + "testing" + "time" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/params" + + "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbos/l2pricing" + "github.com/offchainlabs/nitro/util" +) + +func TestSnapSync(t *testing.T) { + ctx, cancelCtx := context.WithCancel(context.Background()) + defer cancelCtx() + + var transferGas = util.NormalizeL2GasForL1GasInitial(800_000, params.GWei) // include room for aggregator L1 costs + + // 1st node with sequencer, stays up all the time. + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.L2Info = NewBlockChainTestInfo( + t, + types.NewArbitrumSigner(types.NewLondonSigner(builder.chainConfig.ChainID)), big.NewInt(l2pricing.InitialBaseFeeWei*2), + transferGas, + ) + cleanup := builder.Build(t) + defer cleanup() + + // 2nd node without sequencer, syncs up to the first node. + // This node will be stopped in middle and arbitrumdata will be deleted. + testDir := t.TempDir() + nodeBStack := createStackConfigForTest(testDir) + nodeB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{stackConfig: nodeBStack}) + + builder.BridgeBalance(t, "Faucet", big.NewInt(1).Mul(big.NewInt(params.Ether), big.NewInt(10000))) + + builder.L2Info.GenerateAccount("BackgroundUser") + + // Create transactions till batch count is 10 + createTransactionTillBatchCount(ctx, t, builder, 10) + // Wait for nodeB to sync up to the first node + waitForBlocksToCatchup(ctx, t, builder.L2.Client, nodeB.Client) + + // Create a config with snap sync enabled and same database directory as the 2nd node + nodeConfig := createNodeConfigWithSnapSync(t, builder) + // Cleanup the message data of 2nd node, but keep the block state data. + // This is to simulate a snap sync environment where we’ve just gotten the block state but don’t have any messages. + err := os.RemoveAll(nodeB.ConsensusNode.Stack.ResolvePath("arbitrumdata")) + Require(t, err) + + // Cleanup the 2nd node to release the database lock + cleanupB() + // New node with snap sync enabled, and the same database directory as the 2nd node but with no message data. + nodeC, cleanupC := builder.Build2ndNode(t, &SecondNodeParams{stackConfig: nodeBStack, nodeConfig: nodeConfig}) + defer cleanupC() + + // Create transactions till batch count is 20 + createTransactionTillBatchCount(ctx, t, builder, 20) + // Wait for nodeB to sync up to the first node + waitForBatchCountToCatchup(ctx, t, builder.L2.ConsensusNode.InboxTracker, nodeC.ConsensusNode.InboxTracker) + // Once the node is synced up, check if the batch metadata is the same for the last batch + // This is to ensure that the snap sync worked correctly + count, err := builder.L2.ConsensusNode.InboxTracker.GetBatchCount() + Require(t, err) + metadata, err := builder.L2.ConsensusNode.InboxTracker.GetBatchMetadata(count - 1) + Require(t, err) + metadataNodeC, err := nodeC.ConsensusNode.InboxTracker.GetBatchMetadata(count - 1) + Require(t, err) + if metadata != metadataNodeC { + t.Error("Batch metadata mismatch") + } + finalMessageCount := uint64(metadata.MessageCount) + waitForBlockToCatchupToMessageCount(ctx, t, builder.L2.Client, finalMessageCount) + waitForBlockToCatchupToMessageCount(ctx, t, nodeC.Client, finalMessageCount) + // Fetching message count - 1 instead on the latest block number as the latest block number might not be + // present in the snap sync node since it does not have the sequencer feed. + header, err := builder.L2.Client.HeaderByNumber(ctx, big.NewInt(int64(finalMessageCount)-1)) + Require(t, err) + headerNodeC, err := nodeC.Client.HeaderByNumber(ctx, big.NewInt(int64(finalMessageCount)-1)) + Require(t, err) + // Once the node is synced up, check if the block hash is the same for the last block + // This is to ensure that the snap sync worked correctly + if header.Hash().Cmp(headerNodeC.Hash()) != 0 { + t.Error("Block hash mismatch") + } + // This to ensure that the node did a snap sync and did not sync the batch before the snap sync batch. + _, err = nodeC.ConsensusNode.InboxTracker.GetBatchMetadata(nodeConfig.SnapSyncTest.BatchCount - 3) + if err == nil { + t.Error("Batch metadata should not be present for the batch before the snap sync batch") + } +} + +func waitForBlockToCatchupToMessageCount( + ctx context.Context, + t *testing.T, + client *ethclient.Client, + finalMessageCount uint64, +) { + for { + select { + case <-ctx.Done(): + return + case <-time.After(10 * time.Millisecond): + latestHeaderNodeC, err := client.HeaderByNumber(ctx, nil) + Require(t, err) + if latestHeaderNodeC.Number.Uint64() >= uint64(finalMessageCount)-1 { + return + } + } + } +} + +func waitForBlocksToCatchup(ctx context.Context, t *testing.T, clientA *ethclient.Client, clientB *ethclient.Client) { + for { + select { + case <-ctx.Done(): + return + case <-time.After(10 * time.Millisecond): + headerA, err := clientA.HeaderByNumber(ctx, nil) + Require(t, err) + headerB, err := clientB.HeaderByNumber(ctx, nil) + Require(t, err) + if headerA.Number.Cmp(headerB.Number) == 0 { + return + } + } + } +} + +func waitForBatchCountToCatchup(ctx context.Context, t *testing.T, inboxTrackerA *arbnode.InboxTracker, inboxTrackerB *arbnode.InboxTracker) { + for { + select { + case <-ctx.Done(): + return + case <-time.After(10 * time.Millisecond): + countA, err := inboxTrackerA.GetBatchCount() + Require(t, err) + countB, err := inboxTrackerB.GetBatchCount() + Require(t, err) + if countA == countB { + return + } + } + + } +} + +func createTransactionTillBatchCount(ctx context.Context, t *testing.T, builder *NodeBuilder, finalCount uint64) { + for { + Require(t, ctx.Err()) + tx := builder.L2Info.PrepareTx("Faucet", "BackgroundUser", builder.L2Info.TransferGas, big.NewInt(1), nil) + err := builder.L2.Client.SendTransaction(ctx, tx) + Require(t, err) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + count, err := builder.L2.ConsensusNode.InboxTracker.GetBatchCount() + Require(t, err) + if count > finalCount { + break + } + } +} + +func createNodeConfigWithSnapSync(t *testing.T, builder *NodeBuilder) *arbnode.Config { + batchCount, err := builder.L2.ConsensusNode.InboxTracker.GetBatchCount() + Require(t, err) + // Last batch is batchCount - 1, so prev batch is batchCount - 2 + prevBatchMetaData, err := builder.L2.ConsensusNode.InboxTracker.GetBatchMetadata(batchCount - 2) + Require(t, err) + prevMessage, err := builder.L2.ConsensusNode.TxStreamer.GetMessage(prevBatchMetaData.MessageCount - 1) + Require(t, err) + // Create a config with snap sync enabled and same database directory as the 2nd node + nodeConfig := builder.nodeConfig + nodeConfig.SnapSyncTest.Enabled = true + nodeConfig.SnapSyncTest.BatchCount = batchCount + nodeConfig.SnapSyncTest.DelayedCount = prevBatchMetaData.DelayedMessageCount - 1 + nodeConfig.SnapSyncTest.PrevDelayedRead = prevMessage.DelayedMessagesRead + nodeConfig.SnapSyncTest.PrevBatchMessageCount = uint64(prevBatchMetaData.MessageCount) + return nodeConfig +} diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index e1315fc220..d63185e638 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -12,7 +12,6 @@ import ( "errors" "fmt" "math/big" - "net/http" "strings" "testing" "time" @@ -61,20 +60,10 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) t.Parallel() ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() - httpSrv, srv := externalsignertest.NewServer(t) - cp, err := externalsignertest.CertPaths() - if err != nil { - t.Fatalf("Error getting cert paths: %v", err) - } - t.Cleanup(func() { - if err := httpSrv.Shutdown(ctx); err != nil { - t.Fatalf("Error shutting down http server: %v", err) - } - }) + srv := externalsignertest.NewServer(t) go func() { - log.Debug("Server is listening on port 1234...") - if err := httpSrv.ListenAndServeTLS(cp.ServerCert, cp.ServerKey); err != nil && err != http.ErrServerClosed { - log.Debug("ListenAndServeTLS() failed", "error", err) + if err := srv.Start(); err != nil { + log.Error("Failed to start external signer server:", err) return } }() @@ -235,7 +224,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) } Require(t, err) cfg := arbnode.ConfigDefaultL1NonSequencerTest() - signerCfg, err := externalSignerTestCfg(srv.Address) + signerCfg, err := externalSignerTestCfg(srv.Address, srv.URL()) if err != nil { t.Fatalf("Error getting external signer config: %v", err) } diff --git a/system_tests/staterecovery_test.go b/system_tests/staterecovery_test.go index 632e748da8..1dd4be2444 100644 --- a/system_tests/staterecovery_test.go +++ b/system_tests/staterecovery_test.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/trie" + "github.com/offchainlabs/nitro/cmd/conf" "github.com/offchainlabs/nitro/cmd/staterecovery" "github.com/offchainlabs/nitro/execution/gethexec" ) @@ -49,10 +50,10 @@ func TestRectreateMissingStates(t *testing.T) { stack, err := node.New(builder.l2StackConfig) Require(t, err) defer stack.Close() - chainDb, err := stack.OpenDatabase("l2chaindata", 0, 0, "l2chaindata/", false) + chainDb, err := stack.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata")) Require(t, err) defer chainDb.Close() - cacheConfig := gethexec.DefaultCacheConfigFor(stack, &gethexec.DefaultCachingConfig) + cacheConfig := gethexec.DefaultCacheConfigFor(stack, &gethexec.TestCachingConfig) bc, err := gethexec.GetBlockChain(chainDb, cacheConfig, builder.chainConfig, builder.execConfig.TxLookupLimit) Require(t, err) err = staterecovery.RecreateMissingStates(chainDb, bc, cacheConfig, 1) diff --git a/system_tests/stylus_test.go b/system_tests/stylus_test.go index 46a9103b04..21f54a763c 100644 --- a/system_tests/stylus_test.go +++ b/system_tests/stylus_test.go @@ -8,6 +8,8 @@ package arbtest import ( "testing" + + "github.com/ethereum/go-ethereum/core/vm" ) func TestProgramArbitratorKeccak(t *testing.T) { @@ -67,3 +69,46 @@ func TestProgramArbitratorActivateFails(t *testing.T) { func TestProgramArbitratorEarlyExit(t *testing.T) { testEarlyExit(t, false) } + +func fullRecurseTest() [][]multiCallRecurse { + result := make([][]multiCallRecurse, 0) + for _, op0 := range []vm.OpCode{vm.SSTORE, vm.SLOAD} { + for _, contract0 := range []string{"multicall-rust", "multicall-evm"} { + for _, op1 := range []vm.OpCode{vm.CALL, vm.STATICCALL, vm.DELEGATECALL} { + for _, contract1 := range []string{"multicall-rust", "multicall-rust-b", "multicall-evm"} { + for _, op2 := range []vm.OpCode{vm.CALL, vm.STATICCALL, vm.DELEGATECALL} { + for _, contract2 := range []string{"multicall-rust", "multicall-rust-b", "multicall-evm"} { + for _, op3 := range []vm.OpCode{vm.CALL, vm.STATICCALL, vm.DELEGATECALL} { + for _, contract3 := range []string{"multicall-rust", "multicall-rust-b", "multicall-evm"} { + recurse := make([]multiCallRecurse, 4) + recurse[0].opcode = op0 + recurse[0].Name = contract0 + recurse[1].opcode = op1 + recurse[1].Name = contract1 + recurse[2].opcode = op2 + recurse[2].Name = contract2 + recurse[3].opcode = op3 + recurse[3].Name = contract3 + result = append(result, recurse) + } + } + } + } + } + } + } + } + return result +} + +func TestProgramLongCall(t *testing.T) { + testProgramResursiveCalls(t, fullRecurseTest(), true) +} + +func TestProgramLongArbitratorCall(t *testing.T) { + testProgramResursiveCalls(t, fullRecurseTest(), false) +} + +func TestProgramArbitratorStylusUpgrade(t *testing.T) { + testStylusUpgrade(t, false) +} diff --git a/util/arbmath/math.go b/util/arbmath/math.go index 1c11c6ad58..7413955409 100644 --- a/util/arbmath/math.go +++ b/util/arbmath/math.go @@ -74,14 +74,6 @@ func MaxInt[T Number](values ...T) T { return max } -// AbsValue the absolute value of a number -func AbsValue[T Number](value T) T { - if value < 0 { - return -value // never happens for unsigned types - } - return value -} - // Checks if two ints are sufficiently close to one another func Within[T Unsigned](a, b, bound T) bool { min := MinInt(a, b) @@ -267,14 +259,22 @@ func BigFloatMulByUint(multiplicand *big.Float, multiplier uint64) *big.Float { return new(big.Float).Mul(multiplicand, UintToBigFloat(multiplier)) } +func MaxSignedValue[T Signed]() T { + return T((uint64(1) << (8*unsafe.Sizeof(T(0)) - 1)) - 1) +} + +func MinSignedValue[T Signed]() T { + return T(uint64(1) << ((8 * unsafe.Sizeof(T(0))) - 1)) +} + // SaturatingAdd add two integers without overflow func SaturatingAdd[T Signed](a, b T) T { sum := a + b if b > 0 && sum < a { - sum = ^T(0) >> 1 + sum = MaxSignedValue[T]() } if b < 0 && sum > a { - sum = (^T(0) >> 1) + 1 + sum = MinSignedValue[T]() } return sum } @@ -290,7 +290,11 @@ func SaturatingUAdd[T Unsigned](a, b T) T { // SaturatingSub subtract an int64 from another without overflow func SaturatingSub(minuend, subtrahend int64) int64 { - return SaturatingAdd(minuend, -subtrahend) + if subtrahend == math.MinInt64 { + // The absolute value of MinInt64 is one greater than MaxInt64 + return SaturatingAdd(SaturatingAdd(minuend, math.MaxInt64), 1) + } + return SaturatingAdd(minuend, SaturatingNeg(subtrahend)) } // SaturatingUSub subtract an integer from another without underflow @@ -301,7 +305,7 @@ func SaturatingUSub[T Unsigned](a, b T) T { return a - b } -// SaturatingMul multiply two integers without over/underflow +// SaturatingUMul multiply two integers without over/underflow func SaturatingUMul[T Unsigned](a, b T) T { product := a * b if b != 0 && product/b != a { @@ -315,9 +319,9 @@ func SaturatingMul[T Signed](a, b T) T { product := a * b if b != 0 && product/b != a { if (a > 0 && b > 0) || (a < 0 && b < 0) { - product = ^T(0) >> 1 + product = MaxSignedValue[T]() } else { - product = (^T(0) >> 1) + 1 + product = MinSignedValue[T]() } } return product @@ -367,8 +371,8 @@ func SaturatingCastToUint(value *big.Int) uint64 { // Negates an int without underflow func SaturatingNeg[T Signed](value T) T { - if value == ^T(0) { - return (^T(0)) >> 1 + if value < 0 && value == MinSignedValue[T]() { + return MaxSignedValue[T]() } return -value } diff --git a/util/arbmath/math_fuzz_test.go b/util/arbmath/math_fuzz_test.go new file mode 100644 index 0000000000..591d699de0 --- /dev/null +++ b/util/arbmath/math_fuzz_test.go @@ -0,0 +1,112 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbmath + +import ( + "math/big" + "testing" +) + +func toBig[T Signed](a T) *big.Int { + return big.NewInt(int64(a)) +} + +func saturatingBigToInt[T Signed](a *big.Int) T { + // MinSignedValue and MaxSignedValue are already separately tested + if a.Cmp(toBig(MaxSignedValue[T]())) > 0 { + return MaxSignedValue[T]() + } + if a.Cmp(toBig(MinSignedValue[T]())) < 0 { + return MinSignedValue[T]() + } + return T(a.Int64()) +} + +func fuzzSaturatingAdd[T Signed](f *testing.F) { + f.Fuzz(func(t *testing.T, a, b T) { + got := SaturatingAdd(a, b) + expected := saturatingBigToInt[T](new(big.Int).Add(toBig(a), toBig(b))) + if got != expected { + t.Errorf("SaturatingAdd(%v, %v) = %v, expected %v", a, b, got, expected) + } + }) +} + +func fuzzSaturatingMul[T Signed](f *testing.F) { + f.Fuzz(func(t *testing.T, a, b T) { + got := SaturatingMul(a, b) + expected := saturatingBigToInt[T](new(big.Int).Mul(toBig(a), toBig(b))) + if got != expected { + t.Errorf("SaturatingMul(%v, %v) = %v, expected %v", a, b, got, expected) + } + }) +} + +func fuzzSaturatingNeg[T Signed](f *testing.F) { + f.Fuzz(func(t *testing.T, a T) { + got := SaturatingNeg(a) + expected := saturatingBigToInt[T](new(big.Int).Neg(toBig(a))) + if got != expected { + t.Errorf("SaturatingNeg(%v) = %v, expected %v", a, got, expected) + } + }) +} + +func FuzzSaturatingAddInt8(f *testing.F) { + fuzzSaturatingAdd[int8](f) +} + +func FuzzSaturatingAddInt16(f *testing.F) { + fuzzSaturatingAdd[int16](f) +} + +func FuzzSaturatingAddInt32(f *testing.F) { + fuzzSaturatingAdd[int32](f) +} + +func FuzzSaturatingAddInt64(f *testing.F) { + fuzzSaturatingAdd[int64](f) +} + +func FuzzSaturatingSub(f *testing.F) { + f.Fuzz(func(t *testing.T, a, b int64) { + got := SaturatingSub(a, b) + expected := saturatingBigToInt[int64](new(big.Int).Sub(toBig(a), toBig(b))) + if got != expected { + t.Errorf("SaturatingSub(%v, %v) = %v, expected %v", a, b, got, expected) + } + }) +} + +func FuzzSaturatingMulInt8(f *testing.F) { + fuzzSaturatingMul[int8](f) +} + +func FuzzSaturatingMulInt16(f *testing.F) { + fuzzSaturatingMul[int16](f) +} + +func FuzzSaturatingMulInt32(f *testing.F) { + fuzzSaturatingMul[int32](f) +} + +func FuzzSaturatingMulInt64(f *testing.F) { + fuzzSaturatingMul[int64](f) +} + +func FuzzSaturatingNegInt8(f *testing.F) { + fuzzSaturatingNeg[int8](f) +} + +func FuzzSaturatingNegInt16(f *testing.F) { + fuzzSaturatingNeg[int16](f) +} + +func FuzzSaturatingNegInt32(f *testing.F) { + fuzzSaturatingNeg[int32](f) +} + +func FuzzSaturatingNegInt64(f *testing.F) { + fuzzSaturatingNeg[int64](f) +} diff --git a/util/arbmath/math_test.go b/util/arbmath/math_test.go index 2e2f14795a..1be60dc58b 100644 --- a/util/arbmath/math_test.go +++ b/util/arbmath/math_test.go @@ -5,6 +5,7 @@ package arbmath import ( "bytes" + "fmt" "math" "math/rand" "testing" @@ -120,6 +121,110 @@ func TestSlices(t *testing.T) { assert_eq(SliceWithRunoff(data, 7, 8), []uint8{}) } +func testMinMaxSignedValues[T Signed](t *testing.T, min T, max T) { + gotMin := MinSignedValue[T]() + if gotMin != min { + Fail(t, "expected min", min, "but got", gotMin) + } + gotMax := MaxSignedValue[T]() + if gotMax != max { + Fail(t, "expected max", max, "but got", gotMax) + } +} + +func TestMinMaxSignedValues(t *testing.T) { + testMinMaxSignedValues[int8](t, math.MinInt8, math.MaxInt8) + testMinMaxSignedValues[int16](t, math.MinInt16, math.MaxInt16) + testMinMaxSignedValues[int32](t, math.MinInt32, math.MaxInt32) + testMinMaxSignedValues[int64](t, math.MinInt64, math.MaxInt64) +} + +func TestSaturatingAdd(t *testing.T) { + tests := []struct { + a, b, expected int64 + }{ + {2, 3, 5}, + {-1, -2, -3}, + {math.MaxInt64, 1, math.MaxInt64}, + {math.MaxInt64, math.MaxInt64, math.MaxInt64}, + {math.MinInt64, -1, math.MinInt64}, + {math.MinInt64, math.MinInt64, math.MinInt64}, + } + + for _, tc := range tests { + t.Run(fmt.Sprintf("%v + %v = %v", tc.a, tc.b, tc.expected), func(t *testing.T) { + sum := SaturatingAdd(int64(tc.a), int64(tc.b)) + if sum != tc.expected { + t.Errorf("SaturatingAdd(%v, %v) = %v; want %v", tc.a, tc.b, sum, tc.expected) + } + }) + } +} + +func TestSaturatingSub(t *testing.T) { + tests := []struct { + a, b, expected int64 + }{ + {5, 3, 2}, + {-3, -2, -1}, + {math.MinInt64, 1, math.MinInt64}, + {math.MinInt64, -1, math.MinInt64 + 1}, + {math.MinInt64, math.MinInt64, 0}, + {0, math.MinInt64, math.MaxInt64}, + } + + for _, tc := range tests { + t.Run("", func(t *testing.T) { + sum := SaturatingSub(int64(tc.a), int64(tc.b)) + if sum != tc.expected { + t.Errorf("SaturatingSub(%v, %v) = %v; want %v", tc.a, tc.b, sum, tc.expected) + } + }) + } +} + +func TestSaturatingMul(t *testing.T) { + tests := []struct { + a, b, expected int64 + }{ + {5, 3, 15}, + {-3, -2, 6}, + {math.MaxInt64, 2, math.MaxInt64}, + {math.MinInt64, 2, math.MinInt64}, + } + + for _, tc := range tests { + t.Run(fmt.Sprintf("%v - %v = %v", tc.a, tc.b, tc.expected), func(t *testing.T) { + sum := SaturatingMul(int64(tc.a), int64(tc.b)) + if sum != tc.expected { + t.Errorf("SaturatingMul(%v, %v) = %v; want %v", tc.a, tc.b, sum, tc.expected) + } + }) + } +} + +func TestSaturatingNeg(t *testing.T) { + tests := []struct { + value int64 + expected int64 + }{ + {0, 0}, + {5, -5}, + {-5, 5}, + {math.MinInt64, math.MaxInt64}, + {math.MaxInt64, math.MinInt64 + 1}, + } + + for _, tc := range tests { + t.Run(fmt.Sprintf("-%v = %v", tc.value, tc.expected), func(t *testing.T) { + result := SaturatingNeg(tc.value) + if result != tc.expected { + t.Errorf("SaturatingNeg(%v) = %v: expected %v", tc.value, result, tc.expected) + } + }) + } +} + func Fail(t *testing.T, printables ...interface{}) { t.Helper() testhelpers.FailImpl(t, printables...) diff --git a/util/headerreader/blob_client.go b/util/headerreader/blob_client.go index 664dbb5e30..2b47a940c3 100644 --- a/util/headerreader/blob_client.go +++ b/util/headerreader/blob_client.go @@ -13,6 +13,7 @@ import ( "net/url" "os" "path" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -188,8 +189,14 @@ const trailingCharsOfResponse = 25 func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHashes []common.Hash) ([]kzg4844.Blob, error) { rawData, err := beaconRequest[json.RawMessage](b, ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot)) - if err != nil { - return nil, fmt.Errorf("error calling beacon client in blobSidecars: %w", err) + if err != nil || len(rawData) == 0 { + // blobs are pruned after 4096 epochs (1 epoch = 32 slots), we determine if the requested slot were to be pruned by a non-archive endpoint + roughAgeOfSlot := uint64(time.Now().Unix()) - (b.genesisTime + slot*b.secondsPerSlot) + if roughAgeOfSlot > b.secondsPerSlot*32*4096 { + return nil, fmt.Errorf("beacon client in blobSidecars got error or empty response fetching older blobs in slot: %d, an archive endpoint is required, please refer to https://docs.arbitrum.io/run-arbitrum-node/l1-ethereum-beacon-chain-rpc-providers, err: %w", slot, err) + } else { + return nil, fmt.Errorf("beacon client in blobSidecars got error or empty response fetching non-expired blobs in slot: %d, if using a prysm endpoint, try --enable-experimental-backfill flag, err: %w", slot, err) + } } var response []blobResponseItem if err := json.Unmarshal(rawData, &response); err != nil { @@ -222,10 +229,11 @@ func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHas var found bool for outputIdx = range versionedHashes { if versionedHashes[outputIdx] == versionedHash { - found = true if outputsFound[outputIdx] { - return nil, fmt.Errorf("found blob with versioned hash %v twice", versionedHash) + // Duplicate, skip this one + break } + found = true outputsFound[outputIdx] = true break } diff --git a/util/iostat/iostat.go b/util/iostat/iostat.go new file mode 100644 index 0000000000..9bc5ff800c --- /dev/null +++ b/util/iostat/iostat.go @@ -0,0 +1,114 @@ +package iostat + +import ( + "bufio" + "context" + "fmt" + "os/exec" + "runtime" + "strconv" + "strings" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" +) + +func RegisterAndPopulateMetrics(ctx context.Context, spawnInterval, maxDeviceCount int) { + if runtime.GOOS != "linux" { + log.Warn("Iostat command not supported disabling corresponding metrics") + return + } + deviceMetrics := make(map[string]map[string]metrics.GaugeFloat64) + statReceiver := make(chan DeviceStats) + go Run(ctx, spawnInterval, statReceiver) + for { + stat, ok := <-statReceiver + if !ok { + log.Info("Iostat statReceiver channel was closed due to error or command being completed") + return + } + if _, ok := deviceMetrics[stat.DeviceName]; !ok { + // Register metrics for a maximum of maxDeviceCount (fail safe incase iostat command returns incorrect names indefinitely) + if len(deviceMetrics) < maxDeviceCount { + baseMetricName := fmt.Sprintf("isotat/%s/", stat.DeviceName) + deviceMetrics[stat.DeviceName] = make(map[string]metrics.GaugeFloat64) + deviceMetrics[stat.DeviceName]["readspersecond"] = metrics.NewRegisteredGaugeFloat64(baseMetricName+"readspersecond", nil) + deviceMetrics[stat.DeviceName]["writespersecond"] = metrics.NewRegisteredGaugeFloat64(baseMetricName+"writespersecond", nil) + deviceMetrics[stat.DeviceName]["await"] = metrics.NewRegisteredGaugeFloat64(baseMetricName+"await", nil) + } else { + continue + } + } + deviceMetrics[stat.DeviceName]["readspersecond"].Update(stat.ReadsPerSecond) + deviceMetrics[stat.DeviceName]["writespersecond"].Update(stat.WritesPerSecond) + deviceMetrics[stat.DeviceName]["await"].Update(stat.Await) + } +} + +type DeviceStats struct { + DeviceName string + ReadsPerSecond float64 + WritesPerSecond float64 + Await float64 +} + +func Run(ctx context.Context, interval int, receiver chan DeviceStats) { + defer close(receiver) + // #nosec G204 + cmd := exec.CommandContext(ctx, "iostat", "-dNxy", strconv.Itoa(interval)) + stdout, err := cmd.StdoutPipe() + if err != nil { + log.Error("Failed to get stdout", "err", err) + return + } + if err := cmd.Start(); err != nil { + log.Error("Failed to start iostat command", "err", err) + return + } + var fields []string + scanner := bufio.NewScanner(stdout) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, "Device") { + fields = strings.Fields(line) + continue + } + data := strings.Fields(line) + if len(data) == 0 { + continue + } + stat := DeviceStats{} + var err error + for i, field := range fields { + switch field { + case "Device", "Device:": + stat.DeviceName = data[i] + case "r/s": + stat.ReadsPerSecond, err = strconv.ParseFloat(data[i], 64) + case "w/s": + stat.WritesPerSecond, err = strconv.ParseFloat(data[i], 64) + case "await": + stat.Await, err = strconv.ParseFloat(data[i], 64) + } + if err != nil { + log.Error("Error parsing command result from iostat", "err", err) + continue + } + } + if stat.DeviceName == "" { + continue + } + receiver <- stat + } + if scanner.Err() != nil { + log.Error("Iostat scanner error", err, scanner.Err()) + } + if err := cmd.Process.Kill(); err != nil { + log.Error("Failed to kill iostat process", "err", err) + } + if err := cmd.Wait(); err != nil { + log.Error("Error waiting for iostat to exit", "err", err) + } + stdout.Close() + log.Info("Iostat command terminated") +} diff --git a/util/rpcclient/rpcclient.go b/util/rpcclient/rpcclient.go index 02b41cf15d..56aebef396 100644 --- a/util/rpcclient/rpcclient.go +++ b/util/rpcclient/rpcclient.go @@ -21,14 +21,15 @@ import ( ) type ClientConfig struct { - URL string `json:"url,omitempty" koanf:"url"` - JWTSecret string `json:"jwtsecret,omitempty" koanf:"jwtsecret"` - Timeout time.Duration `json:"timeout,omitempty" koanf:"timeout" reload:"hot"` - Retries uint `json:"retries,omitempty" koanf:"retries" reload:"hot"` - ConnectionWait time.Duration `json:"connection-wait,omitempty" koanf:"connection-wait"` - ArgLogLimit uint `json:"arg-log-limit,omitempty" koanf:"arg-log-limit" reload:"hot"` - RetryErrors string `json:"retry-errors,omitempty" koanf:"retry-errors" reload:"hot"` - RetryDelay time.Duration `json:"retry-delay,omitempty" koanf:"retry-delay"` + URL string `json:"url,omitempty" koanf:"url"` + JWTSecret string `json:"jwtsecret,omitempty" koanf:"jwtsecret"` + Timeout time.Duration `json:"timeout,omitempty" koanf:"timeout" reload:"hot"` + Retries uint `json:"retries,omitempty" koanf:"retries" reload:"hot"` + ConnectionWait time.Duration `json:"connection-wait,omitempty" koanf:"connection-wait"` + ArgLogLimit uint `json:"arg-log-limit,omitempty" koanf:"arg-log-limit" reload:"hot"` + RetryErrors string `json:"retry-errors,omitempty" koanf:"retry-errors" reload:"hot"` + RetryDelay time.Duration `json:"retry-delay,omitempty" koanf:"retry-delay"` + WebsocketMessageSizeLimit int64 `json:"websocket-message-size-limit,omitempty" koanf:"websocket-message-size-limit"` retryErrors *regexp.Regexp } @@ -46,16 +47,18 @@ func (c *ClientConfig) Validate() error { type ClientConfigFetcher func() *ClientConfig var TestClientConfig = ClientConfig{ - URL: "self", - JWTSecret: "", + URL: "self", + JWTSecret: "", + WebsocketMessageSizeLimit: 256 * 1024 * 1024, } var DefaultClientConfig = ClientConfig{ - URL: "self-auth", - JWTSecret: "", - Retries: 3, - RetryErrors: "websocket: close.*|dial tcp .*|.*i/o timeout|.*connection reset by peer|.*connection refused", - ArgLogLimit: 2048, + URL: "self-auth", + JWTSecret: "", + Retries: 3, + RetryErrors: "websocket: close.*|dial tcp .*|.*i/o timeout|.*connection reset by peer|.*connection refused", + ArgLogLimit: 2048, + WebsocketMessageSizeLimit: 256 * 1024 * 1024, } func RPCClientAddOptions(prefix string, f *flag.FlagSet, defaultConfig *ClientConfig) { @@ -67,6 +70,7 @@ func RPCClientAddOptions(prefix string, f *flag.FlagSet, defaultConfig *ClientCo f.Uint(prefix+".retries", defaultConfig.Retries, "number of retries in case of failure(0 mean one attempt)") f.String(prefix+".retry-errors", defaultConfig.RetryErrors, "Errors matching this regular expression are automatically retried") f.Duration(prefix+".retry-delay", defaultConfig.RetryDelay, "delay between retries") + f.Int64(prefix+".websocket-message-size-limit", defaultConfig.WebsocketMessageSizeLimit, "websocket message size limit used by the RPC client. 0 means no limit") } type RpcClient struct { @@ -256,9 +260,9 @@ func (c *RpcClient) Start(ctx_in context.Context) error { var err error var client *rpc.Client if jwt == nil { - client, err = rpc.DialContext(ctx, url) + client, err = rpc.DialOptions(ctx, url, rpc.WithWebsocketMessageSizeLimit(c.config().WebsocketMessageSizeLimit)) } else { - client, err = rpc.DialOptions(ctx, url, rpc.WithHTTPAuth(node.NewJWTAuth([32]byte(*jwt)))) + client, err = rpc.DialOptions(ctx, url, rpc.WithHTTPAuth(node.NewJWTAuth([32]byte(*jwt))), rpc.WithWebsocketMessageSizeLimit(c.config().WebsocketMessageSizeLimit)) } cancelCtx() if err == nil { diff --git a/util/testhelpers/github/releases.go b/util/testhelpers/github/releases.go new file mode 100644 index 0000000000..59f591d92c --- /dev/null +++ b/util/testhelpers/github/releases.go @@ -0,0 +1,83 @@ +package github + +import ( + "context" + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/google/go-github/v62/github" +) + +var wasmRootExp = regexp.MustCompile(`\*\*WAVM Module Root\*\*: (0x[a-f0-9]{64})`) + +type ConsensusRelease struct { + WavmModuleRoot string + MachineWavmURL url.URL + ReplayWasmURL url.URL +} + +// NitroReleases returns the most recent 50 releases of the Nitro repository. +func NitroReleases(ctx context.Context) ([]*github.RepositoryRelease, error) { + client := github.NewClient(nil) + opts := &github.ListOptions{ + PerPage: 50, + } + releases, _, err := client.Repositories.ListReleases(ctx, "OffchainLabs", "nitro", opts) + return releases, err +} + +// LatestConsensusRelease returns data about the latest consensus release. +func LatestConsensusRelease(ctx context.Context) (*ConsensusRelease, error) { + releases, err := NitroReleases(ctx) + if err != nil { + return nil, err + } + var found *ConsensusRelease + for _, release := range releases { + if strings.HasPrefix(release.GetTagName(), "consensus") { + if found, err = fromRelease(release); err != nil { + return nil, err + } + break + } + } + if found == nil { + return nil, fmt.Errorf("no consensus release found") + } + return found, nil +} + +func fromRelease(release *github.RepositoryRelease) (*ConsensusRelease, error) { + // TODO(eljobe): Consider making the module-root.txt a release asset. + // This is currently brittle because it relies on the release body format. + matches := wasmRootExp.FindStringSubmatch(release.GetBody()) + if len(matches) != 2 { + return nil, fmt.Errorf("no WAVM module root found in release body") + } + wavmModuleRoot := matches[1] + var machineWavmURL url.URL + var replayWasmURL url.URL + for _, asset := range release.Assets { + if asset.GetName() == "machine.wavm.br" { + wURL, err := url.Parse(asset.GetBrowserDownloadURL()) + if err != nil { + return nil, err + } + machineWavmURL = *wURL + } + if asset.GetName() == "replay.wasm" { + rURL, err := url.Parse(asset.GetBrowserDownloadURL()) + if err != nil { + return nil, err + } + replayWasmURL = *rURL + } + } + return &ConsensusRelease{ + WavmModuleRoot: wavmModuleRoot, + MachineWavmURL: machineWavmURL, + ReplayWasmURL: replayWasmURL, + }, nil +} diff --git a/util/testhelpers/github/releases_test.go b/util/testhelpers/github/releases_test.go new file mode 100644 index 0000000000..a25d68c543 --- /dev/null +++ b/util/testhelpers/github/releases_test.go @@ -0,0 +1,38 @@ +package github + +import ( + "context" + "testing" +) + +func TestReleases(t *testing.T) { + rels, err := NitroReleases(context.Background()) + if err != nil { + t.Error(err) + } + if len(rels) == 0 { + t.Error("No releases found") + } + if len(rels) != 50 { + t.Errorf("Expected 50 releases, got %d", len(rels)) + } +} + +func TestLatestConsensusRelease(t *testing.T) { + rel, err := LatestConsensusRelease(context.Background()) + if err != nil { + t.Fatal(err) + } + if rel == nil { + t.Fatal("No consensus release found") + } + if rel.WavmModuleRoot == "" { + t.Error("Unexpected empty WAVM module root.") + } + if rel.MachineWavmURL.String() == "" { + t.Error("Unexpected empty machine WAVM URL.") + } + if rel.ReplayWasmURL.String() == "" { + t.Error("Unexpected empty replay WASM URL.") + } +} diff --git a/util/testhelpers/port.go b/util/testhelpers/port.go new file mode 100644 index 0000000000..d31fa41cdc --- /dev/null +++ b/util/testhelpers/port.go @@ -0,0 +1,17 @@ +package testhelpers + +import ( + "net" +) + +// FreeTCPPortListener returns a listener listening on an unused local port. +// +// This is useful for tests that need to bind to a port without risking a conflict. +func FreeTCPPortListener() (net.Listener, error) { + // This works because the kernel will assign an unused port when ":0" is opened. + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return l, nil +} diff --git a/util/testhelpers/port_test.go b/util/testhelpers/port_test.go new file mode 100644 index 0000000000..ef9bb18537 --- /dev/null +++ b/util/testhelpers/port_test.go @@ -0,0 +1,23 @@ +package testhelpers + +import ( + "net" + "testing" +) + +func TestFreeTCPPortListener(t *testing.T) { + aListener, err := FreeTCPPortListener() + if err != nil { + t.Fatal(err) + } + bListener, err := FreeTCPPortListener() + if err != nil { + t.Fatal(err) + } + if aListener.Addr().(*net.TCPAddr).Port == bListener.Addr().(*net.TCPAddr).Port { + t.Errorf("FreeTCPPortListener() got same port: %v, %v", aListener, bListener) + } + if aListener.Addr().(*net.TCPAddr).Port == 0 || bListener.Addr().(*net.TCPAddr).Port == 0 { + t.Errorf("FreeTCPPortListener() got port 0") + } +} diff --git a/validator/client/redis/producer.go b/validator/client/redis/producer.go index 1055d93968..4aa4031350 100644 --- a/validator/client/redis/producer.go +++ b/validator/client/redis/producer.go @@ -23,6 +23,7 @@ type ValidationClientConfig struct { Room int32 `koanf:"room"` RedisURL string `koanf:"redis-url"` ProducerConfig pubsub.ProducerConfig `koanf:"producer-config"` + CreateStreams bool `koanf:"create-streams"` } func (c ValidationClientConfig) Enabled() bool { @@ -34,6 +35,7 @@ var DefaultValidationClientConfig = ValidationClientConfig{ Room: 2, RedisURL: "", ProducerConfig: pubsub.DefaultProducerConfig, + CreateStreams: true, } var TestValidationClientConfig = ValidationClientConfig{ @@ -41,12 +43,15 @@ var TestValidationClientConfig = ValidationClientConfig{ Room: 2, RedisURL: "", ProducerConfig: pubsub.TestProducerConfig, + CreateStreams: false, } func ValidationClientConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".name", DefaultValidationClientConfig.Name, "validation client name") f.Int32(prefix+".room", DefaultValidationClientConfig.Room, "validation client room") + f.String(prefix+".redis-url", DefaultValidationClientConfig.RedisURL, "redis url") pubsub.ProducerAddConfigAddOptions(prefix+".producer-config", f) + f.Bool(prefix+".create-streams", DefaultValidationClientConfig.CreateStreams, "create redis streams if it does not exist") } // ValidationClient implements validation client through redis streams. @@ -59,6 +64,7 @@ type ValidationClient struct { producerConfig pubsub.ProducerConfig redisClient redis.UniversalClient moduleRoots []common.Hash + createStreams bool } func NewValidationClient(cfg *ValidationClientConfig) (*ValidationClient, error) { @@ -75,11 +81,17 @@ func NewValidationClient(cfg *ValidationClientConfig) (*ValidationClient, error) producers: make(map[common.Hash]*pubsub.Producer[*validator.ValidationInput, validator.GoGlobalState]), producerConfig: cfg.ProducerConfig, redisClient: redisClient, + createStreams: cfg.CreateStreams, }, nil } -func (c *ValidationClient) Initialize(moduleRoots []common.Hash) error { +func (c *ValidationClient) Initialize(ctx context.Context, moduleRoots []common.Hash) error { for _, mr := range moduleRoots { + if c.createStreams { + if err := pubsub.CreateStream(ctx, server_api.RedisStreamForRoot(mr), c.redisClient); err != nil { + return fmt.Errorf("creating redis stream: %w", err) + } + } if _, exists := c.producers[mr]; exists { log.Warn("Producer already existsw for module root", "hash", mr) continue diff --git a/validator/client/validation_client.go b/validator/client/validation_client.go index e7f7627d7f..fa6b9000f2 100644 --- a/validator/client/validation_client.go +++ b/validator/client/validation_client.go @@ -11,11 +11,9 @@ import ( "sync/atomic" "time" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/validator" "github.com/offchainlabs/nitro/util/containers" - "github.com/offchainlabs/nitro/util/jsonapi" "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -44,7 +42,7 @@ func NewValidationClient(config rpcclient.ClientConfigFetcher, stack *node.Node) func (c *ValidationClient) Launch(entry *validator.ValidationInput, moduleRoot common.Hash) validator.ValidationRun { atomic.AddInt32(&c.room, -1) promise := stopwaiter.LaunchPromiseThread[validator.GoGlobalState](c, func(ctx context.Context) (validator.GoGlobalState, error) { - input := ValidationInputToJson(entry) + input := server_api.ValidationInputToJson(entry) var res validator.GoGlobalState err := c.client.CallContext(ctx, &res, server_api.Namespace+"_validate", input, moduleRoot) atomic.AddInt32(&c.room, 1) @@ -104,10 +102,7 @@ func (c *ValidationClient) Stop() { } func (c *ValidationClient) Name() string { - if c.Started() { - return c.name - } - return "(not started)" + return c.name } func (c *ValidationClient) Room() int { @@ -131,7 +126,7 @@ func NewExecutionClient(config rpcclient.ClientConfigFetcher, stack *node.Node) func (c *ExecutionClient) CreateExecutionRun(wasmModuleRoot common.Hash, input *validator.ValidationInput) containers.PromiseInterface[validator.ExecutionRun] { return stopwaiter.LaunchPromiseThread[validator.ExecutionRun](c, func(ctx context.Context) (validator.ExecutionRun, error) { var res uint64 - err := c.client.CallContext(ctx, &res, server_api.Namespace+"_createExecutionRun", wasmModuleRoot, ValidationInputToJson(input)) + err := c.client.CallContext(ctx, &res, server_api.Namespace+"_createExecutionRun", wasmModuleRoot, server_api.ValidationInputToJson(input)) if err != nil { return nil, err } @@ -162,7 +157,12 @@ func (c *ExecutionClient) LatestWasmModuleRoot() containers.PromiseInterface[com } func (c *ExecutionClient) WriteToFile(input *validator.ValidationInput, expOut validator.GoGlobalState, moduleRoot common.Hash) containers.PromiseInterface[struct{}] { - jsonInput := ValidationInputToJson(input) + jsonInput := server_api.ValidationInputToJson(input) + if err := jsonInput.WriteToFile(); err != nil { + return stopwaiter.LaunchPromiseThread[struct{}](c, func(ctx context.Context) (struct{}, error) { + return struct{}{}, err + }) + } return stopwaiter.LaunchPromiseThread[struct{}](c, func(ctx context.Context) (struct{}, error) { err := c.client.CallContext(ctx, nil, server_api.Namespace+"_writeToFile", jsonInput, expOut, moduleRoot) return struct{}{}, err @@ -231,36 +231,3 @@ func (r *ExecutionClientRun) Close() { } }) } - -func ValidationInputToJson(entry *validator.ValidationInput) *server_api.InputJSON { - jsonPreimagesMap := make(map[arbutil.PreimageType]*jsonapi.PreimagesMapJson) - for ty, preimages := range entry.Preimages { - jsonPreimagesMap[ty] = jsonapi.NewPreimagesMapJson(preimages) - } - res := &server_api.InputJSON{ - Id: entry.Id, - HasDelayedMsg: entry.HasDelayedMsg, - DelayedMsgNr: entry.DelayedMsgNr, - DelayedMsgB64: base64.StdEncoding.EncodeToString(entry.DelayedMsg), - StartState: entry.StartState, - PreimagesB64: jsonPreimagesMap, - UserWasms: make(map[common.Hash]server_api.UserWasmJson), - DebugChain: entry.DebugChain, - - L1BlockHeight: entry.BlockHeight, - HotShotCommitment: entry.HotShotCommitment, - HotShotLiveness: entry.HotShotLiveness, - } - for _, binfo := range entry.BatchInfo { - encData := base64.StdEncoding.EncodeToString(binfo.Data) - res.BatchInfo = append(res.BatchInfo, server_api.BatchInfoJson{Number: binfo.Number, DataB64: encData}) - } - for moduleHash, info := range entry.UserWasms { - encWasm := server_api.UserWasmJson{ - Asm: base64.StdEncoding.EncodeToString(info.Asm), - Module: base64.StdEncoding.EncodeToString(info.Module), - } - res.UserWasms[moduleHash] = encWasm - } - return res -} diff --git a/validator/server_api/json.go b/validator/server_api/json.go index 5f963a5402..641b3f19f0 100644 --- a/validator/server_api/json.go +++ b/validator/server_api/json.go @@ -4,10 +4,14 @@ package server_api import ( + "encoding/base64" + "encoding/json" "fmt" + "os" espressoTypes "github.com/EspressoSystems/espresso-sequencer-go/types" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/jsonapi" @@ -67,6 +71,17 @@ type InputJSON struct { HotShotCommitment espressoTypes.Commitment } +func (i *InputJSON) WriteToFile() error { + contents, err := json.MarshalIndent(i, "", " ") + if err != nil { + return err + } + if err = os.WriteFile(fmt.Sprintf("block_inputs_%d.json", i.Id), contents, 0600); err != nil { + return err + } + return nil +} + type UserWasmJson struct { Module string Asm string @@ -76,3 +91,80 @@ type BatchInfoJson struct { Number uint64 DataB64 string } + +func ValidationInputToJson(entry *validator.ValidationInput) *InputJSON { + jsonPreimagesMap := make(map[arbutil.PreimageType]*jsonapi.PreimagesMapJson) + for ty, preimages := range entry.Preimages { + jsonPreimagesMap[ty] = jsonapi.NewPreimagesMapJson(preimages) + } + res := &InputJSON{ + Id: entry.Id, + HasDelayedMsg: entry.HasDelayedMsg, + DelayedMsgNr: entry.DelayedMsgNr, + DelayedMsgB64: base64.StdEncoding.EncodeToString(entry.DelayedMsg), + StartState: entry.StartState, + PreimagesB64: jsonPreimagesMap, + UserWasms: make(map[common.Hash]UserWasmJson), + DebugChain: entry.DebugChain, + } + for _, binfo := range entry.BatchInfo { + encData := base64.StdEncoding.EncodeToString(binfo.Data) + res.BatchInfo = append(res.BatchInfo, BatchInfoJson{Number: binfo.Number, DataB64: encData}) + } + for moduleHash, info := range entry.UserWasms { + encWasm := UserWasmJson{ + Asm: base64.StdEncoding.EncodeToString(info.Asm), + Module: base64.StdEncoding.EncodeToString(info.Module), + } + res.UserWasms[moduleHash] = encWasm + } + return res +} + +func ValidationInputFromJson(entry *InputJSON) (*validator.ValidationInput, error) { + preimages := make(map[arbutil.PreimageType]map[common.Hash][]byte) + for ty, jsonPreimages := range entry.PreimagesB64 { + preimages[ty] = jsonPreimages.Map + } + valInput := &validator.ValidationInput{ + Id: entry.Id, + HasDelayedMsg: entry.HasDelayedMsg, + DelayedMsgNr: entry.DelayedMsgNr, + StartState: entry.StartState, + Preimages: preimages, + UserWasms: make(state.UserWasms), + DebugChain: entry.DebugChain, + } + delayed, err := base64.StdEncoding.DecodeString(entry.DelayedMsgB64) + if err != nil { + return nil, err + } + valInput.DelayedMsg = delayed + for _, binfo := range entry.BatchInfo { + data, err := base64.StdEncoding.DecodeString(binfo.DataB64) + if err != nil { + return nil, err + } + decInfo := validator.BatchInfo{ + Number: binfo.Number, + Data: data, + } + valInput.BatchInfo = append(valInput.BatchInfo, decInfo) + } + for moduleHash, info := range entry.UserWasms { + asm, err := base64.StdEncoding.DecodeString(info.Asm) + if err != nil { + return nil, err + } + module, err := base64.StdEncoding.DecodeString(info.Module) + if err != nil { + return nil, err + } + decInfo := state.ActivatedWasm{ + Asm: asm, + Module: module, + } + valInput.UserWasms[moduleHash] = decInfo + } + return valInput, nil +} diff --git a/validator/server_common/machine_locator.go b/validator/server_common/machine_locator.go index 28093c30f0..71f6af60b6 100644 --- a/validator/server_common/machine_locator.go +++ b/validator/server_common/machine_locator.go @@ -58,7 +58,7 @@ func NewMachineLocator(rootPath string) (*MachineLocator, error) { for _, dir := range dirs { fInfo, err := os.Stat(dir) if err != nil { - log.Warn("Getting file info", "error", err) + log.Warn("Getting file info", "dir", dir, "error", err) continue } if !fInfo.IsDir() { diff --git a/validator/valnode/redis/consumer.go b/validator/valnode/redis/consumer.go index 1cadaf7c9a..016f30bd61 100644 --- a/validator/valnode/redis/consumer.go +++ b/validator/valnode/redis/consumer.go @@ -22,7 +22,8 @@ type ValidationServer struct { spawner validator.ValidationSpawner // consumers stores moduleRoot to consumer mapping. - consumers map[common.Hash]*pubsub.Consumer[*validator.ValidationInput, validator.GoGlobalState] + consumers map[common.Hash]*pubsub.Consumer[*validator.ValidationInput, validator.GoGlobalState] + streamTimeout time.Duration } func NewValidationServer(cfg *ValidationServerConfig, spawner validator.ValidationSpawner) (*ValidationServer, error) { @@ -43,39 +44,83 @@ func NewValidationServer(cfg *ValidationServerConfig, spawner validator.Validati consumers[mr] = c } return &ValidationServer{ - consumers: consumers, - spawner: spawner, + consumers: consumers, + spawner: spawner, + streamTimeout: cfg.StreamTimeout, }, nil } func (s *ValidationServer) Start(ctx_in context.Context) { s.StopWaiter.Start(ctx_in, s) + // Channel that all consumers use to indicate their readiness. + readyStreams := make(chan struct{}, len(s.consumers)) for moduleRoot, c := range s.consumers { c := c + moduleRoot := moduleRoot c.Start(ctx_in) - s.StopWaiter.CallIteratively(func(ctx context.Context) time.Duration { - req, err := c.Consume(ctx) - if err != nil { - log.Error("Consuming request", "error", err) - return 0 + // Channel for single consumer, once readiness is indicated in this, + // consumer will start consuming iteratively. + ready := make(chan struct{}, 1) + s.StopWaiter.LaunchThread(func(ctx context.Context) { + for { + if pubsub.StreamExists(ctx, c.StreamName(), c.RedisClient()) { + ready <- struct{}{} + readyStreams <- struct{}{} + return + } + select { + case <-ctx.Done(): + log.Info("Context done while checking redis stream existance", "error", ctx.Err().Error()) + return + case <-time.After(time.Millisecond * 100): + } } - if req == nil { - // There's nothing in the queue. - return time.Second - } - valRun := s.spawner.Launch(req.Value, moduleRoot) - res, err := valRun.Await(ctx) - if err != nil { - log.Error("Error validating", "request value", req.Value, "error", err) - return 0 - } - if err := c.SetResult(ctx, req.ID, res); err != nil { - log.Error("Error setting result for request", "id", req.ID, "result", res, "error", err) - return 0 + }) + s.StopWaiter.LaunchThread(func(ctx context.Context) { + select { + case <-ctx.Done(): + log.Info("Context done while waiting a redis stream to be ready", "error", ctx.Err().Error()) + return + case <-ready: // Wait until the stream exists and start consuming iteratively. } - return time.Second + s.StopWaiter.CallIteratively(func(ctx context.Context) time.Duration { + req, err := c.Consume(ctx) + if err != nil { + log.Error("Consuming request", "error", err) + return 0 + } + if req == nil { + // There's nothing in the queue. + return time.Second + } + valRun := s.spawner.Launch(req.Value, moduleRoot) + res, err := valRun.Await(ctx) + if err != nil { + log.Error("Error validating", "request value", req.Value, "error", err) + return 0 + } + if err := c.SetResult(ctx, req.ID, res); err != nil { + log.Error("Error setting result for request", "id", req.ID, "result", res, "error", err) + return 0 + } + return time.Second + }) }) } + s.StopWaiter.LaunchThread(func(ctx context.Context) { + for { + select { + case <-readyStreams: + log.Trace("At least one stream is ready") + return // Don't block Start if at least one of the stream is ready. + case <-time.After(s.streamTimeout): + log.Error("Waiting for redis streams timed out") + case <-ctx.Done(): + log.Info("Context done while waiting redis streams to be ready, failed to start") + return + } + } + }) } type ValidationServerConfig struct { @@ -83,23 +128,28 @@ type ValidationServerConfig struct { ConsumerConfig pubsub.ConsumerConfig `koanf:"consumer-config"` // Supported wasm module roots. ModuleRoots []string `koanf:"module-roots"` + // Timeout on polling for existence of each redis stream. + StreamTimeout time.Duration `koanf:"stream-timeout"` } var DefaultValidationServerConfig = ValidationServerConfig{ RedisURL: "", ConsumerConfig: pubsub.DefaultConsumerConfig, ModuleRoots: []string{}, + StreamTimeout: 10 * time.Minute, } var TestValidationServerConfig = ValidationServerConfig{ RedisURL: "", ConsumerConfig: pubsub.TestConsumerConfig, ModuleRoots: []string{}, + StreamTimeout: time.Minute, } func ValidationServerConfigAddOptions(prefix string, f *pflag.FlagSet) { pubsub.ConsumerConfigAddOptions(prefix+".consumer-config", f) f.StringSlice(prefix+".module-roots", nil, "Supported module root hashes") + f.Duration(prefix+".stream-timeout", DefaultValidationServerConfig.StreamTimeout, "Timeout on polling for existence of redis streams") } func (cfg *ValidationServerConfig) Enabled() bool { diff --git a/validator/valnode/redis/consumer_test.go b/validator/valnode/redis/consumer_test.go new file mode 100644 index 0000000000..0ebd697f16 --- /dev/null +++ b/validator/valnode/redis/consumer_test.go @@ -0,0 +1,30 @@ +package redis + +import ( + "context" + "testing" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/util/redisutil" + "github.com/offchainlabs/nitro/util/testhelpers" +) + +func TestTimeout(t *testing.T) { + handler := testhelpers.InitTestLog(t, log.LevelInfo) + ctx, cancel := context.WithCancel(context.Background()) + redisURL := redisutil.CreateTestRedis(ctx, t) + TestValidationServerConfig.RedisURL = redisURL + TestValidationServerConfig.ModuleRoots = []string{"0x123"} + TestValidationServerConfig.StreamTimeout = 100 * time.Millisecond + vs, err := NewValidationServer(&TestValidationServerConfig, nil) + if err != nil { + t.Fatalf("NewValidationSever() unexpected error: %v", err) + } + vs.Start(ctx) + time.Sleep(time.Second) + if !handler.WasLogged("Waiting for redis streams timed out") { + t.Error("Expected message about stream time-outs was not logged") + } + cancel() +} diff --git a/validator/valnode/validation_api.go b/validator/valnode/validation_api.go index bc22059da3..a67299b1a1 100644 --- a/validator/valnode/validation_api.go +++ b/validator/valnode/validation_api.go @@ -12,9 +12,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/state" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/stopwaiter" "github.com/offchainlabs/nitro/validator" "github.com/offchainlabs/nitro/validator/server_api" @@ -34,7 +32,7 @@ func (a *ValidationServerAPI) Room() int { } func (a *ValidationServerAPI) Validate(ctx context.Context, entry *server_api.InputJSON, moduleRoot common.Hash) (validator.GoGlobalState, error) { - valInput, err := ValidationInputFromJson(entry) + valInput, err := server_api.ValidationInputFromJson(entry) if err != nil { return validator.GoGlobalState{}, err } @@ -78,7 +76,7 @@ func NewExecutionServerAPI(valSpawner validator.ValidationSpawner, execution val } func (a *ExecServerAPI) CreateExecutionRun(ctx context.Context, wasmModuleRoot common.Hash, jsonInput *server_api.InputJSON) (uint64, error) { - input, err := ValidationInputFromJson(jsonInput) + input, err := server_api.ValidationInputFromJson(jsonInput) if err != nil { return 0, err } @@ -116,7 +114,7 @@ func (a *ExecServerAPI) Start(ctx_in context.Context) { } func (a *ExecServerAPI) WriteToFile(ctx context.Context, jsonInput *server_api.InputJSON, expOut validator.GoGlobalState, moduleRoot common.Hash) error { - input, err := ValidationInputFromJson(jsonInput) + input, err := server_api.ValidationInputFromJson(jsonInput) if err != nil { return err } @@ -190,55 +188,3 @@ func (a *ExecServerAPI) CloseExec(execid uint64) { run.run.Close() delete(a.runs, execid) } - -func ValidationInputFromJson(entry *server_api.InputJSON) (*validator.ValidationInput, error) { - preimages := make(map[arbutil.PreimageType]map[common.Hash][]byte) - for ty, jsonPreimages := range entry.PreimagesB64 { - preimages[ty] = jsonPreimages.Map - } - valInput := &validator.ValidationInput{ - Id: entry.Id, - HasDelayedMsg: entry.HasDelayedMsg, - DelayedMsgNr: entry.DelayedMsgNr, - StartState: entry.StartState, - Preimages: preimages, - UserWasms: make(state.UserWasms), - DebugChain: entry.DebugChain, - - BlockHeight: entry.L1BlockHeight, - HotShotLiveness: entry.HotShotLiveness, - HotShotCommitment: entry.HotShotCommitment, - } - delayed, err := base64.StdEncoding.DecodeString(entry.DelayedMsgB64) - if err != nil { - return nil, err - } - valInput.DelayedMsg = delayed - for _, binfo := range entry.BatchInfo { - data, err := base64.StdEncoding.DecodeString(binfo.DataB64) - if err != nil { - return nil, err - } - decInfo := validator.BatchInfo{ - Number: binfo.Number, - Data: data, - } - valInput.BatchInfo = append(valInput.BatchInfo, decInfo) - } - for moduleHash, info := range entry.UserWasms { - asm, err := base64.StdEncoding.DecodeString(info.Asm) - if err != nil { - return nil, err - } - module, err := base64.StdEncoding.DecodeString(info.Module) - if err != nil { - return nil, err - } - decInfo := state.ActivatedWasm{ - Asm: asm, - Module: module, - } - valInput.UserWasms[moduleHash] = decInfo - } - return valInput, nil -} diff --git a/validator/valnode/valnode.go b/validator/valnode/valnode.go index 93a5b37238..972e11189d 100644 --- a/validator/valnode/valnode.go +++ b/validator/valnode/valnode.go @@ -25,7 +25,7 @@ type WasmConfig struct { func WasmConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".root-path", DefaultWasmConfig.RootPath, "path to machine folders, each containing wasm files (machine.wavm.br, replay.wasm)") f.Bool(prefix+".enable-wasmroots-check", DefaultWasmConfig.EnableWasmrootsCheck, "enable check for compatibility of on-chain WASM module root with node") - f.StringSlice(prefix+".allowed-wasm-module-roots", DefaultWasmConfig.AllowedWasmModuleRoots, "list of WASM module roots to check if the on-chain WASM module root belongs to on node startup") + f.StringSlice(prefix+".allowed-wasm-module-roots", DefaultWasmConfig.AllowedWasmModuleRoots, "list of WASM module roots or mahcine base paths to match against on-chain WasmModuleRoot") } var DefaultWasmConfig = WasmConfig{ diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index 6f5bf54e4d..ba70756c98 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -302,7 +302,7 @@ func (cc *ClientConnection) Receive(ctx context.Context, timeout time.Duration) return msg, op, err } -// readRequests reads json-rpc request from connection. +// readRequest reads json-rpc request from connection. func (cc *ClientConnection) readRequest(ctx context.Context, timeout time.Duration) ([]byte, ws.OpCode, error) { cc.ioMutex.Lock() defer cc.ioMutex.Unlock()