diff --git a/arbcompress/compress_common.go b/arbcompress/compress_common.go index a61dd9a171..997232e7cc 100644 --- a/arbcompress/compress_common.go +++ b/arbcompress/compress_common.go @@ -17,6 +17,8 @@ func compressedBufferSizeFor(length int) int { return length + (length>>10)*8 + 64 // actual limit is: length + (length >> 14) * 4 + 6 } -func CompressLevel(input []byte, level int) ([]byte, error) { +func CompressLevel(input []byte, level uint64) ([]byte, error) { + // level is trusted and shouldn't be anything crazy + // #nosec G115 return Compress(input, uint32(level), EmptyDictionary) } diff --git a/arbitrator/prover/src/programs/config.rs b/arbitrator/prover/src/programs/config.rs index 1a37294b04..0353589358 100644 --- a/arbitrator/prover/src/programs/config.rs +++ b/arbitrator/prover/src/programs/config.rs @@ -17,7 +17,7 @@ use { meter::Meter, start::StartMover, MiddlewareWrapper, }, std::sync::Arc, - wasmer::{Cranelift, CraneliftOptLevel, Engine, Store}, + wasmer::{Cranelift, CraneliftOptLevel, Engine, Store, Target}, wasmer_compiler_singlepass::Singlepass, }; @@ -180,17 +180,19 @@ impl CompileConfig { } #[cfg(feature = "native")] - pub fn store(&self) -> Store { - let mut compiler: Box = match self.debug.cranelift { + pub fn engine(&self, target: Target) -> Engine { + use wasmer::sys::EngineBuilder; + + let mut wasmer_config: Box = match self.debug.cranelift { true => { - let mut compiler = Cranelift::new(); - compiler.opt_level(CraneliftOptLevel::Speed); - Box::new(compiler) + let mut wasmer_config = Cranelift::new(); + wasmer_config.opt_level(CraneliftOptLevel::Speed); + Box::new(wasmer_config) } false => Box::new(Singlepass::new()), }; - compiler.canonicalize_nans(true); - compiler.enable_verifier(); + wasmer_config.canonicalize_nans(true); + wasmer_config.enable_verifier(); let start = MiddlewareWrapper::new(StartMover::new(self.debug.debug_info)); let meter = MiddlewareWrapper::new(Meter::new(&self.pricing)); @@ -200,22 +202,24 @@ impl CompileConfig { // add the instrumentation in the order of application // note: this must be consistent with the prover - compiler.push_middleware(Arc::new(start)); - compiler.push_middleware(Arc::new(meter)); - compiler.push_middleware(Arc::new(dygas)); - compiler.push_middleware(Arc::new(depth)); - compiler.push_middleware(Arc::new(bound)); + wasmer_config.push_middleware(Arc::new(start)); + wasmer_config.push_middleware(Arc::new(meter)); + wasmer_config.push_middleware(Arc::new(dygas)); + wasmer_config.push_middleware(Arc::new(depth)); + wasmer_config.push_middleware(Arc::new(bound)); if self.debug.count_ops { let counter = Counter::new(); - compiler.push_middleware(Arc::new(MiddlewareWrapper::new(counter))); + wasmer_config.push_middleware(Arc::new(MiddlewareWrapper::new(counter))); } - Store::new(compiler) + EngineBuilder::new(wasmer_config) + .set_target(Some(target)) + .into() } #[cfg(feature = "native")] - pub fn engine(&self) -> Engine { - self.store().engine().clone() + pub fn store(&self, target: Target) -> Store { + Store::new(self.engine(target)) } } diff --git a/arbitrator/stylus/src/cache.rs b/arbitrator/stylus/src/cache.rs index 06739f2219..fa38d45419 100644 --- a/arbitrator/stylus/src/cache.rs +++ b/arbitrator/stylus/src/cache.rs @@ -10,6 +10,8 @@ use prover::programs::config::CompileConfig; use std::{collections::HashMap, num::NonZeroUsize}; use wasmer::{Engine, Module, Store}; +use crate::target_cache::target_native; + lazy_static! { static ref INIT_CACHE: Mutex = Mutex::new(InitCache::new(256)); } @@ -120,7 +122,7 @@ impl InitCache { } drop(cache); - let engine = CompileConfig::version(version, debug).engine(); + let engine = CompileConfig::version(version, debug).engine(target_native()); let module = unsafe { Module::deserialize_unchecked(&engine, module)? }; let item = CacheItem::new(module, engine); diff --git a/arbitrator/stylus/src/lib.rs b/arbitrator/stylus/src/lib.rs index 3c53359f8b..a252b60a01 100644 --- a/arbitrator/stylus/src/lib.rs +++ b/arbitrator/stylus/src/lib.rs @@ -18,6 +18,7 @@ use native::NativeInstance; use prover::programs::{prelude::*, StylusData}; use run::RunProgram; use std::{marker::PhantomData, mem, ptr}; +use target_cache::{target_cache_get, target_cache_set}; pub use brotli; pub use prover; @@ -29,6 +30,7 @@ pub mod run; mod cache; mod evm_api; +mod target_cache; mod util; #[cfg(test)] @@ -122,9 +124,9 @@ impl RustBytes { } } -/// Instruments and "activates" a user wasm. +/// "activates" a user wasm. /// -/// The `output` is either the serialized asm & module pair or an error string. +/// The `output` is either the module or an error string. /// Returns consensus info such as the module hash and footprint on success. /// /// Note that this operation costs gas and is limited by the amount supplied via the `gas` pointer. @@ -140,7 +142,6 @@ pub unsafe extern "C" fn stylus_activate( version: u16, debug: bool, output: *mut RustBytes, - asm_len: *mut usize, codehash: *const Bytes32, module_hash: *mut Bytes32, stylus_data: *mut StylusData, @@ -152,18 +153,97 @@ pub unsafe extern "C" fn stylus_activate( let codehash = &*codehash; let gas = &mut *gas; - let (asm, module, info) = - match native::activate(wasm, codehash, version, page_limit, debug, gas) { - Ok(val) => val, - Err(err) => return output.write_err(err), - }; - *asm_len = asm.len(); + let (module, info) = match native::activate(wasm, codehash, version, page_limit, debug, gas) { + Ok(val) => val, + Err(err) => return output.write_err(err), + }; + *module_hash = module.hash(); *stylus_data = info; - let mut data = asm; - data.extend(&*module.into_bytes()); - output.write(data); + output.write(module.into_bytes()); + UserOutcomeKind::Success +} + +/// "compiles" a user wasm. +/// +/// The `output` is either the asm or an error string. +/// Returns consensus info such as the module hash and footprint on success. +/// +/// # Safety +/// +/// `output` must not be null. +#[no_mangle] +pub unsafe extern "C" fn stylus_compile( + wasm: GoSliceData, + version: u16, + debug: bool, + name: GoSliceData, + output: *mut RustBytes, +) -> UserOutcomeKind { + let wasm = wasm.slice(); + let output = &mut *output; + let name = match String::from_utf8(name.slice().to_vec()) { + Ok(val) => val, + Err(err) => return output.write_err(err.into()), + }; + let target = match target_cache_get(&name) { + Ok(val) => val, + Err(err) => return output.write_err(err), + }; + + let asm = match native::compile(wasm, version, debug, target) { + Ok(val) => val, + Err(err) => return output.write_err(err), + }; + + output.write(asm); + UserOutcomeKind::Success +} + +#[no_mangle] +/// # Safety +/// +/// `output` must not be null. +pub unsafe extern "C" fn wat_to_wasm(wat: GoSliceData, output: *mut RustBytes) -> UserOutcomeKind { + let output = &mut *output; + let wasm = match wasmer::wat2wasm(wat.slice()) { + Ok(val) => val, + Err(err) => return output.write_err(err.into()), + }; + output.write(wasm.into_owned()); + UserOutcomeKind::Success +} + +/// sets target index to a string +/// +/// String format is: Triple+CpuFeature+CpuFeature.. +/// +/// # Safety +/// +/// `output` must not be null. +#[no_mangle] +pub unsafe extern "C" fn stylus_target_set( + name: GoSliceData, + description: GoSliceData, + output: *mut RustBytes, + native: bool, +) -> UserOutcomeKind { + let output = &mut *output; + let name = match String::from_utf8(name.slice().to_vec()) { + Ok(val) => val, + Err(err) => return output.write_err(err.into()), + }; + + let desc_str = match String::from_utf8(description.slice().to_vec()) { + Ok(val) => val, + Err(err) => return output.write_err(err.into()), + }; + + if let Err(err) = target_cache_set(name, desc_str, native) { + return output.write_err(err); + }; + UserOutcomeKind::Success } diff --git a/arbitrator/stylus/src/native.rs b/arbitrator/stylus/src/native.rs index a7b996edf0..cc1d191fe2 100644 --- a/arbitrator/stylus/src/native.rs +++ b/arbitrator/stylus/src/native.rs @@ -4,7 +4,7 @@ use crate::{ cache::InitCache, env::{MeterData, WasmEnv}, - host, util, + host, }; use arbutil::{ evm::{ @@ -33,11 +33,13 @@ use std::{ ops::{Deref, DerefMut}, }; use wasmer::{ - imports, AsStoreMut, Function, FunctionEnv, Instance, Memory, Module, Pages, Store, + imports, AsStoreMut, Function, FunctionEnv, Instance, Memory, Module, Pages, Store, Target, TypedFunction, Value, WasmTypeList, }; use wasmer_vm::VMExtern; +use crate::target_cache::target_native; + #[derive(Debug)] pub struct NativeInstance> { pub instance: Instance, @@ -98,7 +100,7 @@ impl> NativeInstance { evm_data: EvmData, ) -> Result { let env = WasmEnv::new(compile, None, evm, evm_data); - let store = env.compile.store(); + let store = env.compile.store(target_native()); let module = unsafe { Module::deserialize_unchecked(&store, module)? }; Self::from_module(module, store, env) } @@ -137,9 +139,10 @@ impl> NativeInstance { evm_data: EvmData, compile: &CompileConfig, config: StylusConfig, + target: Target, ) -> Result { let env = WasmEnv::new(compile.clone(), Some(config), evm_api, evm_data); - let store = env.compile.store(); + let store = env.compile.store(target); let wat_or_wasm = std::fs::read(path)?; let module = Module::new(&store, wat_or_wasm)?; Self::from_module(module, store, env) @@ -347,8 +350,8 @@ impl> StartlessMachine for NativeInstance { } } -pub fn module(wasm: &[u8], compile: CompileConfig) -> Result> { - let mut store = compile.store(); +pub fn module(wasm: &[u8], compile: CompileConfig, target: Target) -> Result> { + let mut store = compile.store(target); let module = Module::new(&store, wasm)?; macro_rules! stub { (u8 <- $($types:tt)+) => { @@ -428,7 +431,6 @@ pub fn module(wasm: &[u8], compile: CompileConfig) -> Result> { imports.define("console", "tee_f64", stub!(f64 <- |_: f64|)); imports.define("debug", "null_host", stub!(||)); } - Instance::new(&mut store, &module, &imports)?; let module = module.serialize()?; Ok(module.to_vec()) @@ -441,14 +443,14 @@ pub fn activate( page_limit: u16, debug: bool, gas: &mut u64, -) -> Result<(Vec, ProverModule, StylusData)> { - let compile = CompileConfig::version(version, debug); +) -> Result<(ProverModule, StylusData)> { let (module, stylus_data) = ProverModule::activate(wasm, codehash, version, page_limit, debug, gas)?; - let asm = match self::module(wasm, compile) { - Ok(asm) => asm, - Err(err) => util::panic_with_wasm(wasm, err), - }; - Ok((asm, module, stylus_data)) + Ok((module, stylus_data)) +} + +pub fn compile(wasm: &[u8], version: u16, debug: bool, target: Target) -> Result> { + let compile = CompileConfig::version(version, debug); + self::module(wasm, compile, target) } diff --git a/arbitrator/stylus/src/target_cache.rs b/arbitrator/stylus/src/target_cache.rs new file mode 100644 index 0000000000..a1d63829d6 --- /dev/null +++ b/arbitrator/stylus/src/target_cache.rs @@ -0,0 +1,81 @@ +// Copyright 2022-2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +use eyre::{eyre, OptionExt, Result}; +use lazy_static::lazy_static; +use parking_lot::RwLock; +use std::{collections::HashMap, str::FromStr}; +use wasmer_types::{CpuFeature, Target, Triple}; + +lazy_static! { + static ref TARGET_CACHE: RwLock> = RwLock::new(HashMap::new()); + static ref TARGET_NATIVE: RwLock = RwLock::new(Target::default()); +} + +fn target_from_string(input: String) -> Result { + if input.is_empty() { + return Ok(Target::default()); + } + let mut parts = input.split('+'); + + let Some(triple_string) = parts.next() else { + return Err(eyre!("no architecture")); + }; + + let triple = match Triple::from_str(triple_string) { + Ok(val) => val, + Err(e) => return Err(eyre!(e)), + }; + + let mut features = CpuFeature::set(); + for flag in parts { + features.insert(CpuFeature::from_str(flag)?); + } + + Ok(Target::new(triple, features)) +} + +/// Populates `TARGET_CACHE` inserting target specified by `description` under `name` key. +/// Additionally, if `native` is set it sets `TARGET_NATIVE` to the specified target. +pub fn target_cache_set(name: String, description: String, native: bool) -> Result<()> { + let target = target_from_string(description)?; + + if native { + if !target.is_native() { + return Err(eyre!("arch not native")); + } + let flags_not_supported = Target::default() + .cpu_features() + .complement() + .intersection(*target.cpu_features()); + if !flags_not_supported.is_empty() { + let mut err_message = String::new(); + err_message.push_str("cpu flags not supported on local cpu for: "); + for item in flags_not_supported.iter() { + err_message.push('+'); + err_message.push_str(&item.to_string()); + } + return Err(eyre!(err_message)); + } + *TARGET_NATIVE.write() = target.clone(); + } + + TARGET_CACHE.write().insert(name, target); + + Ok(()) +} + +pub fn target_native() -> Target { + TARGET_NATIVE.read().clone() +} + +pub fn target_cache_get(name: &str) -> Result { + if name.is_empty() { + return Ok(TARGET_NATIVE.read().clone()); + } + TARGET_CACHE + .read() + .get(name) + .cloned() + .ok_or_eyre("arch not set") +} diff --git a/arbitrator/stylus/src/test/api.rs b/arbitrator/stylus/src/test/api.rs index 92d7317918..5d9f625e5e 100644 --- a/arbitrator/stylus/src/test/api.rs +++ b/arbitrator/stylus/src/test/api.rs @@ -14,6 +14,7 @@ use eyre::Result; use parking_lot::Mutex; use prover::programs::{memory::MemoryModel, prelude::*}; use std::{collections::HashMap, sync::Arc}; +use wasmer::Target; use super::TestInstance; @@ -53,7 +54,7 @@ impl TestEvmApi { pub fn deploy(&mut self, address: Bytes20, config: StylusConfig, name: &str) -> Result<()> { let file = format!("tests/{name}/target/wasm32-unknown-unknown/release/{name}.wasm"); let wasm = std::fs::read(file)?; - let module = native::module(&wasm, self.compile.clone())?; + let module = native::module(&wasm, self.compile.clone(), Target::default())?; self.contracts.lock().insert(address, module); self.configs.lock().insert(address, config); Ok(()) diff --git a/arbitrator/stylus/src/test/misc.rs b/arbitrator/stylus/src/test/misc.rs index ae44a885f0..92c4394ae3 100644 --- a/arbitrator/stylus/src/test/misc.rs +++ b/arbitrator/stylus/src/test/misc.rs @@ -9,12 +9,12 @@ use crate::{ }; use eyre::Result; use prover::programs::{prelude::*, start::StartMover}; -use wasmer::{imports, Function}; +use wasmer::{imports, Function, Target}; #[test] fn test_bulk_memory() -> Result<()> { let (compile, config, ink) = test_configs(); - let mut store = compile.store(); + let mut store = compile.store(Target::default()); let filename = "../prover/test-cases/bulk-memory.wat"; let imports = imports! { "env" => { diff --git a/arbitrator/stylus/src/test/mod.rs b/arbitrator/stylus/src/test/mod.rs index 236e5639e6..00c9c62ae4 100644 --- a/arbitrator/stylus/src/test/mod.rs +++ b/arbitrator/stylus/src/test/mod.rs @@ -16,7 +16,7 @@ use rand::prelude::*; use std::{collections::HashMap, path::Path, sync::Arc}; use wasmer::{ imports, wasmparser::Operator, CompilerConfig, Function, FunctionEnv, Imports, Instance, - Module, Store, + Module, Store, Target, }; use wasmer_compiler_singlepass::Singlepass; @@ -33,7 +33,7 @@ type TestInstance = NativeInstance; impl TestInstance { fn new_test(path: &str, compile: CompileConfig) -> Result { - let mut store = compile.store(); + let mut store = compile.store(Target::default()); let imports = imports! { "test" => { "noop" => Function::new_typed(&mut store, || {}), @@ -86,7 +86,14 @@ impl TestInstance { config: StylusConfig, ) -> Result<(Self, TestEvmApi)> { let (mut evm, evm_data) = TestEvmApi::new(compile.clone()); - let native = Self::from_path(path, evm.clone(), evm_data, compile, config)?; + let native = Self::from_path( + path, + evm.clone(), + evm_data, + compile, + config, + Target::default(), + )?; let footprint = native.memory().ty(&native.store).minimum.0 as u16; evm.set_pages(footprint); Ok((native, evm)) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 71239efdbb..6b4b95f8e0 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -121,7 +121,7 @@ type BatchPoster struct { nextRevertCheckBlock int64 // the last parent block scanned for reverting batches postedFirstBatch bool // indicates if batch poster has posted the first batch - accessList func(SequencerInboxAccs, AfterDelayedMessagesRead int) types.AccessList + accessList func(SequencerInboxAccs, AfterDelayedMessagesRead uint64) types.AccessList } type l1BlockBound int @@ -374,7 +374,7 @@ func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, e } // Dataposter sender may be external signer address, so we should initialize // access list after initializing dataposter. - b.accessList = func(SequencerInboxAccs, AfterDelayedMessagesRead int) types.AccessList { + b.accessList = func(SequencerInboxAccs, AfterDelayedMessagesRead uint64) types.AccessList { if !b.config().UseAccessLists || opts.L1Reader.IsParentChainArbitrum() { // Access lists cost gas instead of saving gas when posting to L2s, // because data is expensive in comparison to computation. @@ -433,8 +433,8 @@ type AccessListOpts struct { BridgeAddr common.Address DataPosterAddr common.Address GasRefunderAddr common.Address - SequencerInboxAccs int - AfterDelayedMessagesRead int + SequencerInboxAccs uint64 + AfterDelayedMessagesRead uint64 } // AccessList returns access list (contracts, storage slots) for batchposter. @@ -476,12 +476,12 @@ func AccessList(opts *AccessListOpts) types.AccessList { }, } - for _, v := range []struct{ slotIdx, val int }{ + for _, v := range []struct{ slotIdx, val uint64 }{ {7, opts.SequencerInboxAccs - 1}, // - sequencerInboxAccs[sequencerInboxAccs.length - 1]; (keccak256(7, sequencerInboxAccs.length - 1)) {7, opts.SequencerInboxAccs}, // - sequencerInboxAccs.push(...); (keccak256(7, sequencerInboxAccs.length)) {6, opts.AfterDelayedMessagesRead - 1}, // - delayedInboxAccs[afterDelayedMessagesRead - 1]; (keccak256(6, afterDelayedMessagesRead - 1)) } { - sb := arbutil.SumBytes(arbutil.PaddedKeccak256([]byte{byte(v.slotIdx)}), big.NewInt(int64(v.val)).Bytes()) + sb := arbutil.SumBytes(arbutil.PaddedKeccak256([]byte{byte(v.slotIdx)}), new(big.Int).SetUint64(v.val).Bytes()) l[1].StorageKeys = append(l[1].StorageKeys, common.Hash(sb)) } @@ -603,9 +603,12 @@ func (b *BatchPoster) pollForL1PriceData(ctx context.Context) { l1GasPrice = blobFeePerByte.Uint64() / 16 } } + // #nosec G115 blobGasUsedGauge.Update(int64(*h.BlobGasUsed)) } + // #nosec G115 blockGasUsedGauge.Update(int64(h.GasUsed)) + // #nosec G115 blockGasLimitGauge.Update(int64(h.GasLimit)) suggestedTipCap, err := b.l1Reader.Client().SuggestGasTipCap(ctx) if err != nil { @@ -613,6 +616,7 @@ func (b *BatchPoster) pollForL1PriceData(ctx context.Context) { } else { suggestedTipCapGauge.Update(suggestedTipCap.Int64()) } + // #nosec G115 l1GasPriceGauge.Update(int64(l1GasPrice)) case <-ctx.Done(): return @@ -1176,6 +1180,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if err != nil { return false, err } + // #nosec G115 firstMsgTime := time.Unix(int64(firstMsg.Message.Header.Timestamp), 0) lastPotentialMsg, err := b.streamer.GetMessage(msgCount - 1) @@ -1403,7 +1408,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if len(kzgBlobs)*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock { return false, fmt.Errorf("produced %v blobs for batch but a block can only hold %v (compressed batch was %v bytes long)", len(kzgBlobs), params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob, len(sequencerMsg)) } - accessList := b.accessList(int(batchPosition.NextSeqNum), int(b.building.segments.delayedMsg)) + accessList := b.accessList(batchPosition.NextSeqNum, b.building.segments.delayedMsg) // On restart, we may be trying to estimate gas for a batch whose successor has // already made it into pending state, if not latest state. // In that case, we might get a revert with `DelayedBackwards()`. @@ -1439,7 +1444,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) b.building.muxBackend.delayedInboxStart = batchPosition.DelayedMessageCount b.building.muxBackend.SetPositionWithinMessage(0) simMux := arbstate.NewInboxMultiplexer(b.building.muxBackend, batchPosition.DelayedMessageCount, dapReaders, daprovider.KeysetValidate) - log.Info("Begin checking the correctness of batch against inbox multiplexer", "startMsgSeqNum", batchPosition.MessageCount, "endMsgSeqNum", b.building.msgCount-1) + log.Debug("Begin checking the correctness of batch against inbox multiplexer", "startMsgSeqNum", batchPosition.MessageCount, "endMsgSeqNum", b.building.msgCount-1) for i := batchPosition.MessageCount; i < b.building.msgCount; i++ { msg, err := simMux.Pop(ctx) if err != nil { @@ -1505,6 +1510,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) messagesPerBatch = 1 } backlog := uint64(unpostedMessages) / messagesPerBatch + // #nosec G115 batchPosterEstimatedBatchBacklogGauge.Update(int64(backlog)) if backlog > 10 { logLevel := log.Warn diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 15446fe855..6a483929b2 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -359,6 +359,7 @@ func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64, thi if err != nil { return fmt.Errorf("getting nonce of a dataposter sender: %w", err) } + // #nosec G115 latestUnconfirmedNonceGauge.Update(int64(unconfirmedNonce)) if nextNonce >= cfg.MaxMempoolTransactions+unconfirmedNonce { return fmt.Errorf("%w: transaction nonce: %d, unconfirmed nonce: %d, max mempool size: %d", ErrExceedsMaxMempoolSize, nextNonce, unconfirmedNonce, cfg.MaxMempoolTransactions) @@ -371,6 +372,7 @@ func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64, thi if err != nil { return fmt.Errorf("getting nonce of a dataposter sender: %w", err) } + // #nosec G115 latestUnconfirmedNonceGauge.Update(int64(unconfirmedNonce)) if unconfirmedNonce > nextNonce { return fmt.Errorf("latest on-chain nonce %v is greater than to next nonce %v", unconfirmedNonce, nextNonce) @@ -525,6 +527,7 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u if err != nil { return nil, nil, nil, fmt.Errorf("failed to get latest nonce %v blocks ago (block %v): %w", config.NonceRbfSoftConfs, softConfBlock, err) } + // #nosec G115 latestSoftConfirmedNonceGauge.Update(int64(softConfNonce)) suggestedTip, err := p.client.SuggestGasTipCap(ctx) @@ -1052,6 +1055,7 @@ func (p *DataPoster) updateNonce(ctx context.Context) error { } return nil } + // #nosec G115 latestFinalizedNonceGauge.Update(int64(nonce)) log.Info("Data poster transactions confirmed", "previousNonce", p.nonce, "newNonce", nonce, "previousL1Block", p.lastBlock, "newL1Block", header.Number) if len(p.errorCount) > 0 { @@ -1132,6 +1136,7 @@ func (p *DataPoster) Start(ctxIn context.Context) { log.Warn("Failed to get latest nonce", "err", err) return minWait } + // #nosec G115 latestUnconfirmedNonceGauge.Update(int64(unconfirmedNonce)) // We use unconfirmedNonce here to replace-by-fee transactions that aren't in a block, // excluding those that are in an unconfirmed block. If a reorg occurs, we'll continue @@ -1143,7 +1148,7 @@ func (p *DataPoster) Start(ctxIn context.Context) { } latestQueued, err := p.queue.FetchLast(ctx) if err != nil { - log.Error("Failed to fetch lastest queued tx", "err", err) + log.Error("Failed to fetch last queued tx", "err", err) return minWait } var latestCumulativeWeight, latestNonce uint64 @@ -1154,43 +1159,38 @@ func (p *DataPoster) Start(ctxIn context.Context) { confirmedNonce := unconfirmedNonce - 1 confirmedMeta, err := p.queue.Get(ctx, confirmedNonce) if err == nil && confirmedMeta != nil { + // #nosec G115 totalQueueWeightGauge.Update(int64(arbmath.SaturatingUSub(latestCumulativeWeight, confirmedMeta.CumulativeWeight()))) + // #nosec G115 totalQueueLengthGauge.Update(int64(arbmath.SaturatingUSub(latestNonce, confirmedNonce))) } else { - log.Error("Failed to fetch latest confirmed tx from queue", "err", err, "confirmedMeta", confirmedMeta) + log.Error("Failed to fetch latest confirmed tx from queue", "confirmedNonce", confirmedNonce, "err", err, "confirmedMeta", confirmedMeta) } } for _, tx := range queueContents { - previouslyUnsent := !tx.Sent - sendAttempted := false if now.After(tx.NextReplacement) { weightBacklog := arbmath.SaturatingUSub(latestCumulativeWeight, tx.CumulativeWeight()) nonceBacklog := arbmath.SaturatingUSub(latestNonce, tx.FullTx.Nonce()) err := p.replaceTx(ctx, tx, arbmath.MaxInt(nonceBacklog, weightBacklog)) - sendAttempted = true p.maybeLogError(err, tx, "failed to replace-by-fee transaction") + } else { + err := p.sendTx(ctx, tx, tx) + p.maybeLogError(err, tx, "failed to re-send transaction") + } + tx, err = p.queue.Get(ctx, tx.FullTx.Nonce()) + if err != nil { + log.Error("Failed to fetch tx from queue to check updated status", "nonce", tx.FullTx.Nonce(), "err", err) + return minWait } if nextCheck.After(tx.NextReplacement) { nextCheck = tx.NextReplacement } - if !sendAttempted && previouslyUnsent { - err := p.sendTx(ctx, tx, tx) - sendAttempted = true - p.maybeLogError(err, tx, "failed to re-send transaction") - if err != nil { - nextSend := time.Now().Add(time.Minute) - if nextCheck.After(nextSend) { - nextCheck = nextSend - } - } - } - if previouslyUnsent && sendAttempted { - // Don't try to send more than 1 unsent transaction, to play nicely with parent chain mempools. - // Transactions will be unsent if there was some error when originally sending them, - // or if transaction type changes and the prior tx is not yet reorg resistant. - break + if !tx.Sent { + // We can't progress any further if we failed to send this tx + // Retry sending this tx soon + return minWait } } wait := time.Until(nextCheck) @@ -1343,7 +1343,7 @@ var DefaultDataPosterConfig = DataPosterConfig{ MaxMempoolWeight: 18, MinTipCapGwei: 0.05, MinBlobTxTipCapGwei: 1, // default geth minimum, and relays aren't likely to accept lower values given propagation time - MaxTipCapGwei: 5, + MaxTipCapGwei: 1.2, MaxBlobTxTipCapGwei: 1, // lower than normal because 4844 rbf is a minimum of a 2x MaxFeeBidMultipleBips: arbmath.OneInBips * 10, NonceRbfSoftConfs: 1, diff --git a/arbnode/dataposter/dbstorage/storage.go b/arbnode/dataposter/dbstorage/storage.go index 97055193a6..37ebfa5099 100644 --- a/arbnode/dataposter/dbstorage/storage.go +++ b/arbnode/dataposter/dbstorage/storage.go @@ -42,7 +42,7 @@ func (s *Storage) FetchContents(_ context.Context, startingIndex uint64, maxResu var res []*storage.QueuedTransaction it := s.db.NewIterator([]byte(""), idxToKey(startingIndex)) defer it.Release() - for i := 0; i < int(maxResults); i++ { + for i := uint64(0); i < maxResults; i++ { if !it.Next() { break } diff --git a/arbnode/dataposter/slice/slicestorage.go b/arbnode/dataposter/slice/slicestorage.go index 69de7564a3..8685ed6f54 100644 --- a/arbnode/dataposter/slice/slicestorage.go +++ b/arbnode/dataposter/slice/slicestorage.go @@ -89,8 +89,8 @@ func (s *Storage) Put(_ context.Context, index uint64, prev, new *storage.Queued } s.queue = append(s.queue, newEnc) } else if index >= s.firstNonce { - queueIdx := int(index - s.firstNonce) - if queueIdx > len(s.queue) { + queueIdx := index - s.firstNonce + if queueIdx > uint64(len(s.queue)) { return fmt.Errorf("attempted to set out-of-bounds index %v in queue starting at %v of length %v", index, s.firstNonce, len(s.queue)) } prevEnc, err := s.encDec().Encode(prev) diff --git a/arbnode/dataposter/storage/time.go b/arbnode/dataposter/storage/time.go index aa15f29170..82f8a3dbf5 100644 --- a/arbnode/dataposter/storage/time.go +++ b/arbnode/dataposter/storage/time.go @@ -34,11 +34,13 @@ func (b *RlpTime) DecodeRLP(s *rlp.Stream) error { if err != nil { return err } + // #nosec G115 *b = RlpTime(time.Unix(int64(enc.Seconds), int64(enc.Nanos))) return nil } func (b RlpTime) EncodeRLP(w io.Writer) error { + // #nosec G115 return rlp.Encode(w, rlpTimeEncoding{ Seconds: uint64(time.Time(b).Unix()), Nanos: uint64(time.Time(b).Nanosecond()), diff --git a/arbnode/dataposter/storage_test.go b/arbnode/dataposter/storage_test.go index e2aa321e0d..8934d92b45 100644 --- a/arbnode/dataposter/storage_test.go +++ b/arbnode/dataposter/storage_test.go @@ -362,6 +362,7 @@ func TestLength(t *testing.T) { if err != nil { t.Fatalf("Length() unexpected error: %v", err) } + // #nosec G115 if want := arbmath.MaxInt(0, 20-int(tc.pruneFrom)); got != want { t.Errorf("Length() = %d want %d", got, want) } diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index 77a0b6e7a2..fd050b5f67 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -437,8 +437,8 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { } delayedMessages, err := r.delayedBridge.LookupMessagesInRange(ctx, from, to, func(batchNum uint64) ([]byte, error) { if len(sequencerBatches) > 0 && batchNum >= sequencerBatches[0].SequenceNumber { - idx := int(batchNum - sequencerBatches[0].SequenceNumber) - if idx < len(sequencerBatches) { + idx := batchNum - sequencerBatches[0].SequenceNumber + if idx < uint64(len(sequencerBatches)) { return sequencerBatches[idx].Serialize(ctx, r.l1Reader.Client()) } log.Warn("missing mentioned batch in L1 message lookup", "batch", batchNum) diff --git a/arbnode/inbox_test.go b/arbnode/inbox_test.go index 70392598d6..1c46c593b9 100644 --- a/arbnode/inbox_test.go +++ b/arbnode/inbox_test.go @@ -72,7 +72,9 @@ func NewTransactionStreamerForTest(t *testing.T, ownerAddress common.Address) (* if err != nil { Fail(t, err) } - execEngine.Initialize(gethexec.DefaultCachingConfig.StylusLRUCache) + if err := execEngine.Initialize(gethexec.DefaultCachingConfig.StylusLRUCache, &gethexec.DefaultStylusTargetConfig); err != nil { + Fail(t, err) + } execSeq := &execClientWrapper{execEngine, t} inbox, err := NewTransactionStreamer(arbDb, bc.Config(), execSeq, nil, make(chan error, 1), transactionStreamerConfigFetcher, &DefaultSnapSyncConfig) if err != nil { diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index 23b81bde62..fe4149c80e 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -804,6 +804,7 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L if len(messages) > 0 { latestTimestamp = messages[len(messages)-1].Message.Header.Timestamp } + // #nosec G115 log.Info( "InboxTracker", "sequencerBatchCount", pos, @@ -811,7 +812,9 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L "l1Block", latestL1Block, "l1Timestamp", time.Unix(int64(latestTimestamp), 0), ) + // #nosec G115 inboxLatestBatchGauge.Update(int64(pos)) + // #nosec G115 inboxLatestBatchMessageGauge.Update(int64(newMessageCount)) if t.validator != nil { diff --git a/arbnode/node.go b/arbnode/node.go index 0676d6393c..93b58e800f 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -515,6 +515,7 @@ func createNodeImpl( if err != nil { return nil, err } + // #nosec G115 sequencerInbox, err := NewSequencerInbox(l1client, deployInfo.SequencerInbox, int64(deployInfo.DeployedAt)) if err != nil { return nil, err @@ -639,6 +640,7 @@ func createNodeImpl( tmpAddress := common.HexToAddress(config.Staker.ContractWalletAddress) existingWalletAddress = &tmpAddress } + // #nosec G115 wallet, err = validatorwallet.NewContract(dp, existingWalletAddress, deployInfo.ValidatorWalletCreator, deployInfo.Rollup, l1Reader, txOptsValidator, int64(deployInfo.DeployedAt), func(common.Address) {}, getExtraGas) if err != nil { return nil, err @@ -660,7 +662,7 @@ func createNodeImpl( confirmedNotifiers = append(confirmedNotifiers, messagePruner) } - stakerObj, err = staker.NewStaker(l1Reader, wallet, bind.CallOpts{}, config.Staker, blockValidator, statelessBlockValidator, nil, confirmedNotifiers, deployInfo.ValidatorUtils, fatalErrChan) + stakerObj, err = staker.NewStaker(l1Reader, wallet, bind.CallOpts{}, func() *staker.L1ValidatorConfig { return &configFetcher.Get().Staker }, blockValidator, statelessBlockValidator, nil, confirmedNotifiers, deployInfo.ValidatorUtils, fatalErrChan) if err != nil { return nil, err } diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 90e7feddc6..a5bab8342f 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -840,6 +840,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil // Active broadcast reorg and L1 messages at or before start of broadcast messages // Or no active broadcast reorg and broadcast messages start before or immediately after last L1 message if messagesAfterPos >= broadcastStartPos { + // #nosec G115 broadcastSliceIndex := int(messagesAfterPos - broadcastStartPos) messagesOldLen := len(messages) if broadcastSliceIndex < len(s.broadcasterQueuedMessages) { diff --git a/arbos/addressSet/addressSet_test.go b/arbos/addressSet/addressSet_test.go index 7d06c74f0b..d32e07a546 100644 --- a/arbos/addressSet/addressSet_test.go +++ b/arbos/addressSet/addressSet_test.go @@ -316,6 +316,7 @@ func checkIfRectifyMappingWorks(t *testing.T, aset *AddressSet, owners []common. Fail(t, "RectifyMapping did not fix the mismatch") } + // #nosec G115 if clearList && int(size(t, aset)) != index+1 { Fail(t, "RectifyMapping did not fix the mismatch") } diff --git a/arbos/addressTable/addressTable.go b/arbos/addressTable/addressTable.go index 3fbb7b3782..566c71b689 100644 --- a/arbos/addressTable/addressTable.go +++ b/arbos/addressTable/addressTable.go @@ -118,6 +118,7 @@ func (atab *AddressTable) Decompress(buf []byte) (common.Address, uint64, error) if !exists { return common.Address{}, 0, errors.New("invalid index in compressed address") } + // #nosec G115 numBytesRead := uint64(rd.Size() - int64(rd.Len())) return addr, numBytesRead, nil } diff --git a/arbos/arbosState/initialization_test.go b/arbos/arbosState/initialization_test.go index 34802392fe..b0fe1d0dac 100644 --- a/arbos/arbosState/initialization_test.go +++ b/arbos/arbosState/initialization_test.go @@ -109,6 +109,7 @@ func pseudorandomAccountInitInfoForTesting(prand *testhelpers.PseudoRandomDataSo } func pseudorandomHashHashMapForTesting(prand *testhelpers.PseudoRandomDataSource, maxItems uint64) map[common.Hash]common.Hash { + // #nosec G115 size := int(prand.GetUint64() % maxItems) ret := make(map[common.Hash]common.Hash) for i := 0; i < size; i++ { diff --git a/arbos/arbosState/initialize.go b/arbos/arbosState/initialize.go index c44febf386..56fa579c15 100644 --- a/arbos/arbosState/initialize.go +++ b/arbos/arbosState/initialize.go @@ -6,10 +6,12 @@ package arbosState import ( "errors" "math/big" + "regexp" "sort" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" @@ -64,6 +66,8 @@ func InitializeArbosInDatabase(db ethdb.Database, cacheConfig *core.CacheConfig, log.Crit("failed to init empty statedb", "error", err) } + noStateTrieChangesToCommitError := regexp.MustCompile("^triedb layer .+ is disk layer$") + // commit avoids keeping the entire state in memory while importing the state. // At some time it was also used to avoid reprocessing the whole import in case of a crash. commit := func() (common.Hash, error) { @@ -73,7 +77,11 @@ func InitializeArbosInDatabase(db ethdb.Database, cacheConfig *core.CacheConfig, } err = stateDatabase.TrieDB().Commit(root, true) if err != nil { - return common.Hash{}, err + // pathdb returns an error when there are no state trie changes to commit and we try to commit. + // This checks if the error is the expected one and ignores it. + if (cacheConfig.StateScheme != rawdb.PathScheme) || !noStateTrieChangesToCommitError.MatchString(err.Error()) { + return common.Hash{}, err + } } statedb, err = state.New(root, stateDatabase, nil) if err != nil { diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index 9e00eeb581..392bf36d37 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -509,7 +509,7 @@ func (ps *L1PricingState) getPosterUnitsWithoutCache(tx *types.Transaction, post return 0 } - l1Bytes, err := byteCountAfterBrotliLevel(txBytes, int(brotliCompressionLevel)) + l1Bytes, err := byteCountAfterBrotliLevel(txBytes, brotliCompressionLevel) if err != nil { panic(fmt.Sprintf("failed to compress tx: %v", err)) } @@ -594,7 +594,7 @@ func (ps *L1PricingState) PosterDataCost(message *core.Message, poster common.Ad return am.BigMulByUint(pricePerUnit, units), units } -func byteCountAfterBrotliLevel(input []byte, level int) (uint64, error) { +func byteCountAfterBrotliLevel(input []byte, level uint64) (uint64, error) { compressed, err := arbcompress.CompressLevel(input, level) if err != nil { return 0, err diff --git a/arbos/l1pricing_test.go b/arbos/l1pricing_test.go index 6e2b1b7eec..1cda4b3d82 100644 --- a/arbos/l1pricing_test.go +++ b/arbos/l1pricing_test.go @@ -100,7 +100,7 @@ func expectedResultsForL1Test(input *l1PricingTest) *l1TestExpectedResults { availableFunds = availableFundsCap } } - fundsWantedForRewards := big.NewInt(int64(input.unitReward * input.unitsPerSecond)) + fundsWantedForRewards := new(big.Int).SetUint64(input.unitReward * input.unitsPerSecond) unitsAllocated := arbmath.UintToBig(input.unitsPerSecond) if arbmath.BigLessThan(availableFunds, fundsWantedForRewards) { ret.rewardRecipientBalance = availableFunds @@ -111,7 +111,7 @@ func expectedResultsForL1Test(input *l1PricingTest) *l1TestExpectedResults { uncappedAvailableFunds = arbmath.BigSub(uncappedAvailableFunds, ret.rewardRecipientBalance) ret.unitsRemaining = (3 * input.unitsPerSecond) - unitsAllocated.Uint64() - maxCollectable := big.NewInt(int64(input.fundsSpent)) + maxCollectable := new(big.Int).SetUint64(input.fundsSpent) if arbmath.BigLessThan(availableFunds, maxCollectable) { maxCollectable = availableFunds } @@ -170,7 +170,7 @@ func _testL1PricingFundsDue(t *testing.T, testParams *l1PricingTest, expectedRes Require(t, err) // create some fake collection - balanceAdded := big.NewInt(int64(testParams.fundsCollectedPerSecond * 3)) + balanceAdded := new(big.Int).SetUint64(testParams.fundsCollectedPerSecond * 3) unitsAdded := testParams.unitsPerSecond * 3 evm.StateDB.AddBalance(l1pricing.L1PricerFundsPoolAddress, uint256.MustFromBig(balanceAdded)) err = l1p.SetL1FeesAvailable(balanceAdded) diff --git a/arbos/l2pricing/l2pricing_test.go b/arbos/l2pricing/l2pricing_test.go index 57759d7f82..aa1e785f70 100644 --- a/arbos/l2pricing/l2pricing_test.go +++ b/arbos/l2pricing/l2pricing_test.go @@ -40,6 +40,7 @@ func TestPricingModelExp(t *testing.T) { // show that running at the speed limit with a full pool is a steady-state colors.PrintBlue("full pool & speed limit") for seconds := 0; seconds < 4; seconds++ { + // #nosec G115 fakeBlockUpdate(t, pricing, int64(seconds)*int64(limit), uint64(seconds)) if getPrice(t, pricing) != minPrice { Fail(t, "price changed when it shouldn't have") @@ -50,6 +51,7 @@ func TestPricingModelExp(t *testing.T) { // note that for large enough spans of time the price will rise a miniscule amount due to the pool's avg colors.PrintBlue("pool target & speed limit") for seconds := 0; seconds < 4; seconds++ { + // #nosec G115 fakeBlockUpdate(t, pricing, int64(seconds)*int64(limit), uint64(seconds)) if getPrice(t, pricing) != minPrice { Fail(t, "price changed when it shouldn't have") @@ -59,6 +61,7 @@ func TestPricingModelExp(t *testing.T) { // show that running over the speed limit escalates the price before the pool drains colors.PrintBlue("exceeding the speed limit") for { + // #nosec G115 fakeBlockUpdate(t, pricing, 8*int64(limit), 1) newPrice := getPrice(t, pricing) if newPrice < price { diff --git a/arbos/l2pricing/model.go b/arbos/l2pricing/model.go index 131af2c2cf..476effa8aa 100644 --- a/arbos/l2pricing/model.go +++ b/arbos/l2pricing/model.go @@ -30,22 +30,26 @@ func (ps *L2PricingState) AddToGasPool(gas int64) error { return err } // pay off some of the backlog with the added gas, stopping at 0 - backlog = arbmath.SaturatingUCast[uint64](arbmath.SaturatingSub(int64(backlog), gas)) + if gas > 0 { + backlog = arbmath.SaturatingUSub(backlog, uint64(gas)) + } else { + backlog = arbmath.SaturatingUAdd(backlog, uint64(-gas)) + } return ps.SetGasBacklog(backlog) } // UpdatePricingModel updates the pricing model with info from the last block func (ps *L2PricingState) UpdatePricingModel(l2BaseFee *big.Int, timePassed uint64, debug bool) { speedLimit, _ := ps.SpeedLimitPerSecond() - _ = ps.AddToGasPool(int64(timePassed * speedLimit)) + _ = ps.AddToGasPool(arbmath.SaturatingCast[int64](arbmath.SaturatingUMul(timePassed, speedLimit))) inertia, _ := ps.PricingInertia() tolerance, _ := ps.BacklogTolerance() backlog, _ := ps.GasBacklog() minBaseFee, _ := ps.MinBaseFeeWei() baseFee := minBaseFee if backlog > tolerance*speedLimit { - excess := int64(backlog - tolerance*speedLimit) - exponentBips := arbmath.NaturalToBips(excess) / arbmath.Bips(inertia*speedLimit) + excess := arbmath.SaturatingCast[int64](backlog - tolerance*speedLimit) + exponentBips := arbmath.NaturalToBips(excess) / arbmath.SaturatingCast[arbmath.Bips](inertia*speedLimit) baseFee = arbmath.BigMulByBips(minBaseFee, arbmath.ApproxExpBasisPoints(exponentBips, 4)) } _ = ps.SetBaseFeeWei(baseFee) diff --git a/arbos/programs/cgo_test.go b/arbos/programs/cgo_test.go new file mode 100644 index 0000000000..c0e146d98d --- /dev/null +++ b/arbos/programs/cgo_test.go @@ -0,0 +1,44 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +//go:build !wasm +// +build !wasm + +package programs + +import ( + "fmt" + "os" + "strings" + "testing" +) + +func TestConstants(t *testing.T) { + err := testConstants() + if err != nil { + t.Fatal(err) + } +} + +// normal test will not write anything to disk +// to test cross-compilation: +// * run test with TEST_COMPILE=STORE on one machine +// * copy target/testdata to the other machine +// * run test with TEST_COMPILE=LOAD on the other machine +func TestCompileArch(t *testing.T) { + compile_env := os.Getenv("TEST_COMPILE") + if compile_env == "" { + fmt.Print("use TEST_COMPILE=[STORE|LOAD] to allow store/load in compile test") + } + store := strings.Contains(compile_env, "STORE") + err := testCompileArch(store) + if err != nil { + t.Fatal(err) + } + if store || strings.Contains(compile_env, "LOAD") { + err = testCompileLoad() + if err != nil { + t.Fatal(err) + } + } +} diff --git a/arbos/programs/constant_test.go b/arbos/programs/constant_test.go deleted file mode 100644 index fe29bcf3d9..0000000000 --- a/arbos/programs/constant_test.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2024, Offchain Labs, Inc. -// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE - -package programs - -import "testing" - -func TestConstants(t *testing.T) { - err := testConstants() - if err != nil { - t.Fatal(err) - } -} diff --git a/arbos/programs/native.go b/arbos/programs/native.go index a0976afb2f..fd3dec25a0 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -54,11 +54,11 @@ func activateProgram( debug bool, burner burn.Burner, ) (*activationInfo, error) { - info, asm, module, err := activateProgramInternal(db, program, codehash, wasm, page_limit, version, debug, burner.GasLeft()) + info, asmMap, err := activateProgramInternal(db, program, codehash, wasm, page_limit, version, debug, burner.GasLeft()) if err != nil { return nil, err } - db.ActivateWasm(info.moduleHash, asm, module) + db.ActivateWasm(info.moduleHash, asmMap) return info, nil } @@ -71,41 +71,52 @@ func activateProgramInternal( version uint16, debug bool, gasLeft *uint64, -) (*activationInfo, []byte, []byte, error) { +) (*activationInfo, map[rawdb.Target][]byte, error) { output := &rustBytes{} - asmLen := usize(0) moduleHash := &bytes32{} stylusData := &C.StylusData{} codeHash := hashToBytes32(codehash) - status := userStatus(C.stylus_activate( + status_mod := userStatus(C.stylus_activate( goSlice(wasm), u16(page_limit), u16(version), cbool(debug), output, - &asmLen, &codeHash, moduleHash, stylusData, (*u64)(gasLeft), )) - data, msg, err := status.toResult(output.intoBytes(), debug) + module, msg, err := status_mod.toResult(output.intoBytes(), debug) if err != nil { if debug { log.Warn("activation failed", "err", err, "msg", msg, "program", addressForLogging) } if errors.Is(err, vm.ErrExecutionReverted) { - return nil, nil, nil, fmt.Errorf("%w: %s", ErrProgramActivation, msg) + return nil, nil, fmt.Errorf("%w: %s", ErrProgramActivation, msg) } - return nil, nil, nil, err + return nil, nil, err + } + target := rawdb.LocalTarget() + status_asm := C.stylus_compile( + goSlice(wasm), + u16(version), + cbool(debug), + goSlice([]byte(target)), + output, + ) + asm := output.intoBytes() + if status_asm != 0 { + return nil, nil, fmt.Errorf("%w: %s", ErrProgramActivation, string(asm)) + } + asmMap := map[rawdb.Target][]byte{ + rawdb.TargetWavm: module, + target: asm, } hash := moduleHash.toHash() - split := int(asmLen) - asm := data[:split] - module := data[split:] info := &activationInfo{ moduleHash: hash, @@ -114,11 +125,12 @@ func activateProgramInternal( asmEstimate: uint32(stylusData.asm_estimate), footprint: uint16(stylusData.footprint), } - return info, asm, module, err + return info, asmMap, err } func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, addressForLogging common.Address, code []byte, codeHash common.Hash, pagelimit uint16, time uint64, debugMode bool, program Program) ([]byte, error) { - localAsm, err := statedb.TryGetActivatedAsm(moduleHash) + localTarget := rawdb.LocalTarget() + localAsm, err := statedb.TryGetActivatedAsm(localTarget, moduleHash) if err == nil && len(localAsm) > 0 { return localAsm, nil } @@ -132,7 +144,7 @@ func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, addressForLogging c unlimitedGas := uint64(0xffffffffffff) // we know program is activated, so it must be in correct version and not use too much memory - info, asm, module, err := activateProgramInternal(statedb, addressForLogging, codeHash, wasm, pagelimit, program.version, debugMode, &unlimitedGas) + info, asmMap, err := activateProgramInternal(statedb, addressForLogging, codeHash, wasm, pagelimit, program.version, debugMode, &unlimitedGas) if err != nil { log.Error("failed to reactivate program", "address", addressForLogging, "expected moduleHash", moduleHash, "err", err) return nil, fmt.Errorf("failed to reactivate program address: %v err: %w", addressForLogging, err) @@ -148,14 +160,23 @@ func getLocalAsm(statedb vm.StateDB, moduleHash common.Hash, addressForLogging c // stylus program is active on-chain, and was activated in the past // so we store it directly to database batch := statedb.Database().WasmStore().NewBatch() - rawdb.WriteActivation(batch, moduleHash, asm, module) + rawdb.WriteActivation(batch, moduleHash, asmMap) if err := batch.Write(); err != nil { log.Error("failed writing re-activation to state", "address", addressForLogging, "err", err) } } else { // program activated recently, possibly in this eth_call // store it to statedb. It will be stored to database if statedb is commited - statedb.ActivateWasm(info.moduleHash, asm, module) + statedb.ActivateWasm(info.moduleHash, asmMap) + } + asm, exists := asmMap[localTarget] + if !exists { + var availableTargets []rawdb.Target + for target := range asmMap { + availableTargets = append(availableTargets, target) + } + log.Error("failed to reactivate program - missing asm for local target", "address", addressForLogging, "local target", localTarget, "available targets", availableTargets) + return nil, fmt.Errorf("failed to reactivate program - missing asm for local target, address: %v, local target: %v, available targets: %v", addressForLogging, localTarget, availableTargets) } return asm, nil } @@ -182,7 +203,11 @@ func callProgram( } if db, ok := db.(*state.StateDB); ok { - db.RecordProgram(moduleHash) + targets := []rawdb.Target{ + rawdb.TargetWavm, + rawdb.LocalTarget(), + } + db.RecordProgram(targets, moduleHash) } evmApi := newApi(interpreter, tracingInfo, scope, memoryModel) @@ -263,6 +288,25 @@ func ResizeWasmLruCache(size uint32) { C.stylus_cache_lru_resize(u32(size)) } +const DefaultTargetDescriptionArm = "arm64-linux-unknown+neon" +const DefaultTargetDescriptionX86 = "x86_64-linux-unknown+sse4.2" + +func SetTarget(name rawdb.Target, description string, native bool) error { + output := &rustBytes{} + status := userStatus(C.stylus_target_set( + goSlice([]byte(name)), + goSlice([]byte(description)), + output, + cbool(native), + )) + if status != userSuccess { + msg := arbutil.ToStringOrHex(output.intoBytes()) + log.Error("failed to set stylus compilation target", "status", status, "msg", msg) + return fmt.Errorf("failed to set stylus compilation target, status %v: %v", status, msg) + } + return nil +} + func (value bytes32) toHash() common.Hash { hash := common.Hash{} for index, b := range value.bytes { diff --git a/arbos/programs/testcompile.go b/arbos/programs/testcompile.go new file mode 100644 index 0000000000..a16bae52c0 --- /dev/null +++ b/arbos/programs/testcompile.go @@ -0,0 +1,246 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +//go:build !wasm +// +build !wasm + +package programs + +// This file exists because cgo isn't allowed in tests + +/* +#cgo CFLAGS: -g -Wall -I../../target/include/ +#include "arbitrator.h" + +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; +typedef size_t usize; + +void handleReqWrap(usize api, u32 req_type, RustSlice *data, u64 *out_cost, GoSliceData *out_result, GoSliceData *out_raw_data); +*/ +import "C" +import ( + "fmt" + "os" + "runtime" + + "github.com/ethereum/go-ethereum/core/rawdb" +) + +func Wat2Wasm(wat []byte) ([]byte, error) { + output := &rustBytes{} + + status := C.wat_to_wasm(goSlice(wat), output) + + if status != 0 { + return nil, fmt.Errorf("failed reading wat file: %v", string(output.intoBytes())) + } + + return output.intoBytes(), nil +} + +func testCompileArch(store bool) error { + + localTarget := rawdb.LocalTarget() + nativeArm64 := localTarget == rawdb.TargetArm64 + nativeAmd64 := localTarget == rawdb.TargetAmd64 + + arm64CompileName := []byte(rawdb.TargetArm64) + amd64CompileName := []byte(rawdb.TargetAmd64) + + arm64TargetString := []byte(DefaultTargetDescriptionArm) + amd64TargetString := []byte(DefaultTargetDescriptionX86) + + output := &rustBytes{} + + _, err := fmt.Print("starting test.. native arm? ", nativeArm64, " amd? ", nativeAmd64, " GOARCH/GOOS: ", runtime.GOARCH+"/"+runtime.GOOS, "\n") + if err != nil { + return err + } + + status := C.stylus_target_set(goSlice(arm64CompileName), + goSlice(arm64TargetString), + output, + cbool(nativeArm64)) + + if status != 0 { + return fmt.Errorf("failed setting compilation target arm: %v", string(output.intoBytes())) + } + + status = C.stylus_target_set(goSlice(amd64CompileName), + goSlice(amd64TargetString), + output, + cbool(nativeAmd64)) + + if status != 0 { + return fmt.Errorf("failed setting compilation target amd: %v", string(output.intoBytes())) + } + + source, err := os.ReadFile("../../arbitrator/stylus/tests/add.wat") + if err != nil { + return fmt.Errorf("failed reading stylus contract: %w", err) + } + + wasm, err := Wat2Wasm(source) + if err != nil { + return err + } + + if store { + _, err := fmt.Print("storing compiled files to ../../target/testdata/\n") + if err != nil { + return err + } + err = os.MkdirAll("../../target/testdata", 0755) + if err != nil { + return err + } + } + + status = C.stylus_compile( + goSlice(wasm), + u16(1), + cbool(true), + goSlice([]byte("booga")), + output, + ) + if status == 0 { + return fmt.Errorf("succeeded compiling non-existent arch: %v", string(output.intoBytes())) + } + + status = C.stylus_compile( + goSlice(wasm), + u16(1), + cbool(true), + goSlice([]byte{}), + output, + ) + if status != 0 { + return fmt.Errorf("failed compiling native: %v", string(output.intoBytes())) + } + if store && !nativeAmd64 && !nativeArm64 { + _, err := fmt.Printf("writing host file\n") + if err != nil { + return err + } + + err = os.WriteFile("../../target/testdata/host.bin", output.intoBytes(), 0644) + if err != nil { + return err + } + } + + status = C.stylus_compile( + goSlice(wasm), + u16(1), + cbool(true), + goSlice(arm64CompileName), + output, + ) + if status != 0 { + return fmt.Errorf("failed compiling arm: %v", string(output.intoBytes())) + } + if store { + _, err := fmt.Printf("writing arm file\n") + if err != nil { + return err + } + + err = os.WriteFile("../../target/testdata/arm64.bin", output.intoBytes(), 0644) + if err != nil { + return err + } + } + + status = C.stylus_compile( + goSlice(wasm), + u16(1), + cbool(true), + goSlice(amd64CompileName), + output, + ) + if status != 0 { + return fmt.Errorf("failed compiling amd: %v", string(output.intoBytes())) + } + if store { + _, err := fmt.Printf("writing amd64 file\n") + if err != nil { + return err + } + + err = os.WriteFile("../../target/testdata/amd64.bin", output.intoBytes(), 0644) + if err != nil { + return err + } + } + + return nil +} + +func testCompileLoad() error { + filePath := "../../target/testdata/host.bin" + localTarget := rawdb.LocalTarget() + if localTarget == rawdb.TargetArm64 { + filePath = "../../target/testdata/arm64.bin" + } + if localTarget == rawdb.TargetAmd64 { + filePath = "../../target/testdata/amd64.bin" + } + + _, err := fmt.Print("starting load test. FilePath: ", filePath, " GOARCH/GOOS: ", runtime.GOARCH+"/"+runtime.GOOS, "\n") + if err != nil { + return err + } + + localAsm, err := os.ReadFile(filePath) + if err != nil { + return err + } + + calldata := []byte{} + + evmData := EvmData{} + progParams := ProgParams{ + MaxDepth: 10000, + InkPrice: 1, + DebugMode: true, + } + reqHandler := C.NativeRequestHandler{ + handle_request_fptr: (*[0]byte)(C.handleReqWrap), + id: 0, + } + + inifiniteGas := u64(0xfffffffffffffff) + + output := &rustBytes{} + + _, err = fmt.Print("launching program..\n") + if err != nil { + return err + } + + status := userStatus(C.stylus_call( + goSlice(localAsm), + goSlice(calldata), + progParams.encode(), + reqHandler, + evmData.encode(), + cbool(true), + output, + &inifiniteGas, + u32(0), + )) + + _, err = fmt.Print("returned: ", status, "\n") + if err != nil { + return err + } + + _, msg, err := status.toResult(output.intoBytes(), true) + if status == userFailure { + err = fmt.Errorf("%w: %v", err, msg) + } + + return err +} diff --git a/arbos/programs/wasmstorehelper.go b/arbos/programs/wasmstorehelper.go index 9e69178694..4f82d80282 100644 --- a/arbos/programs/wasmstorehelper.go +++ b/arbos/programs/wasmstorehelper.go @@ -44,8 +44,8 @@ func (p Programs) SaveActiveProgramToWasmStore(statedb *state.StateDB, codeHash } // If already in wasm store then return early - localAsm, err := statedb.TryGetActivatedAsm(moduleHash) - if err == nil && len(localAsm) > 0 { + _, err = statedb.TryGetActivatedAsmMap([]rawdb.Target{rawdb.TargetWavm, rawdb.LocalTarget()}, moduleHash) + if err == nil { return nil } @@ -58,7 +58,7 @@ func (p Programs) SaveActiveProgramToWasmStore(statedb *state.StateDB, codeHash unlimitedGas := uint64(0xffffffffffff) // We know program is activated, so it must be in correct version and not use too much memory // Empty program address is supplied because we dont have access to this during rebuilding of wasm store - info, asm, module, err := activateProgramInternal(statedb, common.Address{}, codeHash, wasm, params.PageLimit, program.version, debugMode, &unlimitedGas) + info, asmMap, err := activateProgramInternal(statedb, common.Address{}, codeHash, wasm, params.PageLimit, program.version, debugMode, &unlimitedGas) if err != nil { log.Error("failed to reactivate program while rebuilding wasm store", "expected moduleHash", moduleHash, "err", err) return fmt.Errorf("failed to reactivate program while rebuilding wasm store: %w", err) @@ -70,7 +70,7 @@ func (p Programs) SaveActiveProgramToWasmStore(statedb *state.StateDB, codeHash } batch := statedb.Database().WasmStore().NewBatch() - rawdb.WriteActivation(batch, moduleHash, asm, module) + rawdb.WriteActivation(batch, moduleHash, asmMap) if err := batch.Write(); err != nil { log.Error("failed writing re-activation to state while rebuilding wasm store", "err", err) return err diff --git a/arbos/storage/storage.go b/arbos/storage/storage.go index 6e6c976644..352726778d 100644 --- a/arbos/storage/storage.go +++ b/arbos/storage/storage.go @@ -156,11 +156,6 @@ func (s *Storage) GetUint64ByUint64(key uint64) (uint64, error) { return s.GetUint64(util.UintToHash(key)) } -func (s *Storage) GetUint32(key common.Hash) (uint32, error) { - value, err := s.Get(key) - return uint32(value.Big().Uint64()), err -} - func (s *Storage) Set(key common.Hash, value common.Hash) error { if s.burner.ReadOnly() { log.Error("Read-only burner attempted to mutate state", "key", key, "value", value) @@ -420,6 +415,7 @@ func (sbu *StorageBackedInt64) Get() (int64, error) { if !raw.Big().IsUint64() { panic("invalid value found in StorageBackedInt64 storage") } + // #nosec G115 return int64(raw.Big().Uint64()), err // see implementation note above } @@ -477,6 +473,7 @@ func (sbu *StorageBackedUint16) Get() (uint16, error) { if !big.IsUint64() || big.Uint64() > math.MaxUint16 { panic("expected uint16 compatible value in storage") } + // #nosec G115 return uint16(big.Uint64()), err } @@ -517,6 +514,7 @@ func (sbu *StorageBackedUint32) Get() (uint32, error) { if !big.IsUint64() || big.Uint64() > math.MaxUint32 { panic("expected uint32 compatible value in storage") } + // #nosec G115 return uint32(big.Uint64()), err } diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 753ca19cd6..b58a7420b7 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -246,7 +246,7 @@ func (r *inboxMultiplexer) IsCachedSegementLast() bool { if r.delayedMessagesRead < seqMsg.afterDelayedMessages { return false } - for segmentNum := int(r.cachedSegmentNum) + 1; segmentNum < len(seqMsg.segments); segmentNum++ { + for segmentNum := r.cachedSegmentNum + 1; segmentNum < uint64(len(seqMsg.segments)); segmentNum++ { segment := seqMsg.segments[segmentNum] if len(segment) == 0 { continue @@ -276,7 +276,7 @@ func (r *inboxMultiplexer) getNextMsg() (*arbostypes.MessageWithMetadata, error) if segmentNum >= uint64(len(seqMsg.segments)) { break } - segment = seqMsg.segments[int(segmentNum)] + segment = seqMsg.segments[segmentNum] if len(segment) == 0 { segmentNum++ continue @@ -322,7 +322,7 @@ func (r *inboxMultiplexer) getNextMsg() (*arbostypes.MessageWithMetadata, error) log.Warn("reading virtual delayed message segment", "delayedMessagesRead", r.delayedMessagesRead, "afterDelayedMessages", seqMsg.afterDelayedMessages) segment = []byte{BatchSegmentKindDelayedMessages} } else { - segment = seqMsg.segments[int(segmentNum)] + segment = seqMsg.segments[segmentNum] } if len(segment) == 0 { log.Error("empty sequencer message segment", "sequence", r.cachedSegmentNum, "segmentNum", segmentNum) diff --git a/arbutil/block_message_relation.go b/arbutil/block_message_relation.go index a69f9079ee..e164cf2619 100644 --- a/arbutil/block_message_relation.go +++ b/arbutil/block_message_relation.go @@ -15,5 +15,6 @@ func SignedBlockNumberToMessageCount(blockNumber int64, genesisBlockNumber uint6 } func MessageCountToBlockNumber(messageCount MessageIndex, genesisBlockNumber uint64) int64 { + // #nosec G115 return int64(uint64(messageCount)+genesisBlockNumber) - 1 } diff --git a/arbutil/correspondingl1blocknumber.go b/arbutil/correspondingl1blocknumber.go index 05323ed183..d654e471e2 100644 --- a/arbutil/correspondingl1blocknumber.go +++ b/arbutil/correspondingl1blocknumber.go @@ -20,6 +20,7 @@ func ParentHeaderToL1BlockNumber(header *types.Header) uint64 { } func CorrespondingL1BlockNumber(ctx context.Context, client L1Interface, parentBlockNumber uint64) (uint64, error) { + // #nosec G115 header, err := client.HeaderByNumber(ctx, big.NewInt(int64(parentBlockNumber))) if err != nil { return 0, fmt.Errorf("error getting L1 block number %d header : %w", parentBlockNumber, err) diff --git a/blocks_reexecutor/blocks_reexecutor.go b/blocks_reexecutor/blocks_reexecutor.go index 1e4a06fe90..f7cc0d8c72 100644 --- a/blocks_reexecutor/blocks_reexecutor.go +++ b/blocks_reexecutor/blocks_reexecutor.go @@ -102,7 +102,8 @@ func New(c *Config, blockchain *core.BlockChain, fatalErrChan chan error) *Block if rng > end-start { rng = end - start } - start += uint64(rand.Intn(int(end - start - rng + 1))) + // #nosec G115 + start += uint64(rand.Int63n(int64(end - start - rng + 1))) end = start + rng } // Inclusive of block reexecution [start, end] diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index f6501105c2..b7b935fb7a 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -97,6 +97,7 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { if err != nil { log.Warn("error calculating backlogSizeInBytes", "err", err) } else { + // #nosec G115 backlogSizeInBytesGauge.Update(int64(size)) } } @@ -108,6 +109,7 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { segment = newBacklogSegment() b.head.Store(segment) b.tail.Store(segment) + // #nosec G115 confirmedSequenceNumberGauge.Update(int64(msg.SequenceNumber)) } @@ -143,9 +145,11 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { } lookupByIndex.Store(uint64(msg.SequenceNumber), segment) b.messageCount.Add(1) + // #nosec G115 backlogSizeInBytesGauge.Inc(int64(msg.Size())) } + // #nosec G115 backlogSizeGauge.Update(int64(b.Count())) return nil } @@ -174,7 +178,7 @@ func (b *backlog) Get(start, end uint64) (*m.BroadcastMessage, error) { } bm := &m.BroadcastMessage{Version: 1} - required := int(end-start) + 1 + required := end - start + 1 for { segMsgs, err := segment.Get(arbmath.MaxInt(start, segment.Start()), arbmath.MinInt(end, segment.End())) if err != nil { @@ -183,7 +187,7 @@ func (b *backlog) Get(start, end uint64) (*m.BroadcastMessage, error) { bm.Messages = append(bm.Messages, segMsgs...) segment = segment.Next() - if len(bm.Messages) == required { + if uint64(len(bm.Messages)) == required { break } else if segment == nil { return nil, errOutOfBounds @@ -213,6 +217,7 @@ func (b *backlog) delete(confirmed uint64) { return } + // #nosec G115 confirmedSequenceNumberGauge.Update(int64(confirmed)) // find the segment containing the confirmed message diff --git a/broadcaster/backlog/backlog_test.go b/broadcaster/backlog/backlog_test.go index ee712de9ed..d74389f692 100644 --- a/broadcaster/backlog/backlog_test.go +++ b/broadcaster/backlog/backlog_test.go @@ -33,8 +33,8 @@ func validateBacklog(t *testing.T, b *backlog, count, start, end uint64, lookupK } } - expLen := len(lookupKeys) - actualLen := int(b.Count()) + expLen := uint64(len(lookupKeys)) + actualLen := b.Count() if expLen != actualLen { t.Errorf("expected length of lookupByIndex map (%d) does not equal actual length (%d)", expLen, actualLen) } diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index ba95f2d8af..397698635a 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -145,6 +145,7 @@ func (b *Broadcaster) ListenerAddr() net.Addr { } func (b *Broadcaster) GetCachedMessageCount() int { + // #nosec G115 return int(b.backlog.Count()) } diff --git a/cmd/conf/init.go b/cmd/conf/init.go index d88bcdd241..f360691693 100644 --- a/cmd/conf/init.go +++ b/cmd/conf/init.go @@ -30,7 +30,7 @@ type InitConfig struct { PruneThreads int `koanf:"prune-threads"` PruneTrieCleanCache int `koanf:"prune-trie-clean-cache"` RecreateMissingStateFrom uint64 `koanf:"recreate-missing-state-from"` - RebuildLocalWasm bool `koanf:"rebuild-local-wasm"` + RebuildLocalWasm string `koanf:"rebuild-local-wasm"` ReorgToBatch int64 `koanf:"reorg-to-batch"` ReorgToMessageBatch int64 `koanf:"reorg-to-message-batch"` ReorgToBlockBatch int64 `koanf:"reorg-to-block-batch"` @@ -56,7 +56,7 @@ var InitConfigDefault = InitConfig{ PruneThreads: runtime.NumCPU(), PruneTrieCleanCache: 600, RecreateMissingStateFrom: 0, // 0 = disabled - RebuildLocalWasm: true, + RebuildLocalWasm: "auto", ReorgToBatch: -1, ReorgToMessageBatch: -1, ReorgToBlockBatch: -1, @@ -82,10 +82,14 @@ func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Int(prefix+".prune-threads", InitConfigDefault.PruneThreads, "the number of threads to use when pruning") f.Int(prefix+".prune-trie-clean-cache", InitConfigDefault.PruneTrieCleanCache, "amount of memory in megabytes to cache unchanged state trie nodes with when traversing state database during pruning") f.Uint64(prefix+".recreate-missing-state-from", InitConfigDefault.RecreateMissingStateFrom, "block number to start recreating missing states from (0 = disabled)") - f.Bool(prefix+".rebuild-local-wasm", InitConfigDefault.RebuildLocalWasm, "rebuild local wasm database on boot if needed (otherwise-will be done lazily)") f.Int64(prefix+".reorg-to-batch", InitConfigDefault.ReorgToBatch, "rolls back the blockchain to a specified batch number") f.Int64(prefix+".reorg-to-message-batch", InitConfigDefault.ReorgToMessageBatch, "rolls back the blockchain to the first batch at or before a given message index") f.Int64(prefix+".reorg-to-block-batch", InitConfigDefault.ReorgToBlockBatch, "rolls back the blockchain to the first batch at or before a given block number") + f.String(prefix+".rebuild-local-wasm", InitConfigDefault.RebuildLocalWasm, "rebuild local wasm database on boot if needed (otherwise-will be done lazily). Three modes are supported \n"+ + "\"auto\"- (enabled by default) if any previous rebuilding attempt was successful then rebuilding is disabled else continues to rebuild,\n"+ + "\"force\"- force rebuilding which would commence rebuilding despite the status of previous attempts,\n"+ + "\"false\"- do not rebuild on startup", + ) } func (c *InitConfig) Validate() error { @@ -110,6 +114,10 @@ func (c *InitConfig) Validate() error { } } } + c.RebuildLocalWasm = strings.ToLower(c.RebuildLocalWasm) + if c.RebuildLocalWasm != "auto" && c.RebuildLocalWasm != "force" && c.RebuildLocalWasm != "false" { + return fmt.Errorf("invalid value of rebuild-local-wasm, want: auto or force or false, got: %s", c.RebuildLocalWasm) + } return nil } diff --git a/cmd/dbconv/dbconv/config.go b/cmd/dbconv/dbconv/config.go index 74623bc264..917f34261d 100644 --- a/cmd/dbconv/dbconv/config.go +++ b/cmd/dbconv/dbconv/config.go @@ -71,7 +71,7 @@ var DefaultDBConvConfig = DBConvConfig{ func DBConvConfigAddOptions(f *flag.FlagSet) { DBConfigAddOptions("src", f, &DefaultDBConvConfig.Src) DBConfigAddOptions("dst", f, &DefaultDBConvConfig.Dst) - f.Int("ideal-batch-size", DefaultDBConvConfig.IdealBatchSize, "ideal write batch size") + f.Int("ideal-batch-size", DefaultDBConvConfig.IdealBatchSize, "ideal write batch size in bytes") f.Bool("convert", DefaultDBConvConfig.Convert, "enables conversion step") f.Bool("compact", DefaultDBConvConfig.Compact, "enables compaction step") f.String("verify", DefaultDBConvConfig.Verify, "enables verification step (\"\" = disabled, \"keys\" = only keys, \"full\" = keys and values)") diff --git a/cmd/dbconv/dbconv/dbconv_test.go b/cmd/dbconv/dbconv/dbconv_test.go index f31dd68618..31aa0c3917 100644 --- a/cmd/dbconv/dbconv/dbconv_test.go +++ b/cmd/dbconv/dbconv/dbconv_test.go @@ -4,12 +4,10 @@ import ( "context" "testing" - "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/util/testhelpers" ) func TestConversion(t *testing.T) { - _ = testhelpers.InitTestLog(t, log.LvlTrace) oldDBConfig := DBConfigDefaultSrc oldDBConfig.Data = t.TempDir() diff --git a/cmd/dbconv/main.go b/cmd/dbconv/main.go index c0b5c8f8e4..2d61c96552 100644 --- a/cmd/dbconv/main.go +++ b/cmd/dbconv/main.go @@ -85,7 +85,7 @@ func main() { os.Exit(1) } stats := conv.Stats() - log.Info("Conversion finished.", "entries", stats.Entries(), "MB", stats.Bytes()/1024/1024, "avg Ke/s", stats.AverageEntriesPerSecond()/1000, "avg MB/s", stats.AverageBytesPerSecond()/1024/1024, "elapsed", stats.Elapsed()) + log.Info("Conversion finished.", "entries", stats.Entries(), "MB", stats.Bytes()/1024/1024, "avg entries/s", fmt.Sprintf("%.3e", stats.AverageEntriesPerSecond()/1000), "avg MB/s", stats.AverageBytesPerSecond()/1024/1024, "elapsed", stats.Elapsed()) } if config.Compact { diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index c364da5932..fc3439a56d 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -301,6 +301,7 @@ func setLatestSnapshotUrl(ctx context.Context, initConfig *conf.InitConfig, chai return fmt.Errorf("failed to parse latest mirror \"%s\": %w", initConfig.LatestBase, err) } latestFileUrl := baseUrl.JoinPath(chain, "latest-"+initConfig.Latest+".txt").String() + latestFileUrl = strings.ToLower(latestFileUrl) latestFileBytes, err := httpGet(ctx, latestFileUrl) if err != nil { return fmt.Errorf("failed to get latest file at \"%s\": %w", latestFileUrl, err) @@ -312,6 +313,7 @@ func setLatestSnapshotUrl(ctx context.Context, initConfig *conf.InitConfig, chai } else { initConfig.Url = baseUrl.JoinPath(latestFile).String() } + initConfig.Url = strings.ToLower(initConfig.Url) log.Info("Set latest snapshot url", "url", initConfig.Url) return nil } @@ -397,6 +399,82 @@ func checkEmptyDatabaseDir(dir string, force bool) error { return nil } +func databaseIsEmpty(db ethdb.Database) bool { + it := db.NewIterator(nil, nil) + defer it.Release() + return !it.Next() +} + +// removes all entries with keys prefixed with prefixes and of length used in initial version of wasm store schema +func purgeVersion0WasmStoreEntries(db ethdb.Database) error { + prefixes, keyLength := rawdb.DeprecatedPrefixesV0() + batch := db.NewBatch() + notMatchingLengthKeyLogged := false + for _, prefix := range prefixes { + it := db.NewIterator(prefix, nil) + defer it.Release() + for it.Next() { + key := it.Key() + if len(key) != keyLength { + if !notMatchingLengthKeyLogged { + log.Warn("Found key with deprecated prefix but not matching length, skipping removal. (this warning is logged only once)", "key", key) + notMatchingLengthKeyLogged = true + } + continue + } + if err := batch.Delete(key); err != nil { + return fmt.Errorf("Failed to remove key %v : %w", key, err) + } + + // Recreate the iterator after every batch commit in order + // to allow the underlying compactor to delete the entries. + if batch.ValueSize() >= ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { + return fmt.Errorf("Failed to write batch: %w", err) + } + batch.Reset() + it.Release() + it = db.NewIterator(prefix, key) + } + } + } + if batch.ValueSize() > 0 { + if err := batch.Write(); err != nil { + return fmt.Errorf("Failed to write batch: %w", err) + } + batch.Reset() + } + return nil +} + +// if db is not empty, validates if wasm database schema version matches current version +// otherwise persists current version +func validateOrUpgradeWasmStoreSchemaVersion(db ethdb.Database) error { + if !databaseIsEmpty(db) { + version, err := rawdb.ReadWasmSchemaVersion(db) + if err != nil { + if dbutil.IsErrNotFound(err) { + version = []byte{0} + } else { + return fmt.Errorf("Failed to retrieve wasm schema version: %w", err) + } + } + if len(version) != 1 || version[0] > rawdb.WasmSchemaVersion { + return fmt.Errorf("Unsupported wasm database schema version, current version: %v, read from wasm database: %v", rawdb.WasmSchemaVersion, version) + } + // special step for upgrading from version 0 - remove all entries added in version 0 + if version[0] == 0 { + log.Warn("Detected wasm store schema version 0 - removing all old wasm store entries") + if err := purgeVersion0WasmStoreEntries(db); err != nil { + return fmt.Errorf("Failed to purge wasm store version 0 entries: %w", err) + } + log.Info("Wasm store schama version 0 entries successfully removed.") + } + } + rawdb.WriteWasmSchemaVersion(db) + return nil +} + func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeConfig, chainId *big.Int, cacheConfig *core.CacheConfig, persistentConfig *conf.PersistentConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses) (ethdb.Database, *core.BlockChain, error) { if !config.Init.Force { if readOnlyDb, err := stack.OpenDatabaseWithFreezerWithExtraOptions("l2chaindata", 0, 0, config.Persistent.Ancient, "l2chaindata/", true, persistentConfig.Pebble.ExtraOptions("l2chaindata")); err == nil { @@ -416,6 +494,9 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return nil, nil, err } + if err := validateOrUpgradeWasmStoreSchemaVersion(wasmDb); err != nil { + return nil, nil, err + } if err := dbutil.UnfinishedConversionCheck(wasmDb); err != nil { return nil, nil, fmt.Errorf("wasm unfinished database conversion check error: %w", err) } @@ -451,13 +532,21 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err = gethexec.WriteToKeyValueStore(wasmDb, gethexec.RebuildingPositionKey, gethexec.RebuildingDone); err != nil { return nil, nil, fmt.Errorf("unable to set rebuilding status of wasm store to done: %w", err) } - } else if config.Init.RebuildLocalWasm { - position, err := gethexec.ReadFromKeyValueStore[common.Hash](wasmDb, gethexec.RebuildingPositionKey) - if err != nil { - log.Info("Unable to get codehash position in rebuilding of wasm store, its possible it isnt initialized yet, so initializing it and starting rebuilding", "err", err) + } else if config.Init.RebuildLocalWasm != "false" { + var position common.Hash + if config.Init.RebuildLocalWasm == "force" { + log.Info("Commencing force rebuilding of wasm store by setting codehash position in rebuilding to beginning") if err := gethexec.WriteToKeyValueStore(wasmDb, gethexec.RebuildingPositionKey, common.Hash{}); err != nil { return nil, nil, fmt.Errorf("unable to initialize codehash position in rebuilding of wasm store to beginning: %w", err) } + } else { + position, err = gethexec.ReadFromKeyValueStore[common.Hash](wasmDb, gethexec.RebuildingPositionKey) + if err != nil { + log.Info("Unable to get codehash position in rebuilding of wasm store, its possible it isnt initialized yet, so initializing it and starting rebuilding", "err", err) + if err := gethexec.WriteToKeyValueStore(wasmDb, gethexec.RebuildingPositionKey, common.Hash{}); err != nil { + return nil, nil, fmt.Errorf("unable to initialize codehash position in rebuilding of wasm store to beginning: %w", err) + } + } } if position != gethexec.RebuildingDone { startBlockHash, err := gethexec.ReadFromKeyValueStore[common.Hash](wasmDb, gethexec.RebuildingStartBlockHashKey) @@ -533,6 +622,9 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return nil, nil, err } + if err := validateOrUpgradeWasmStoreSchemaVersion(wasmDb); err != nil { + return nil, nil, err + } chainDb := rawdb.WrapDatabaseWithWasm(chainData, wasmDb, 1) _, err = rawdb.ParseStateScheme(cacheConfig.StateScheme, chainDb) if err != nil { diff --git a/cmd/nitro/init_test.go b/cmd/nitro/init_test.go index 95a4b208d4..b2773ed861 100644 --- a/cmd/nitro/init_test.go +++ b/cmd/nitro/init_test.go @@ -22,12 +22,14 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/node" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/conf" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util/testhelpers" + "github.com/offchainlabs/nitro/util/testhelpers/env" ) const ( @@ -207,6 +209,7 @@ func TestSetLatestSnapshotUrl(t *testing.T) { testCases := []struct { name string + chain string latestContents string wantUrl func(string) string }{ @@ -230,6 +233,12 @@ func TestSetLatestSnapshotUrl(t *testing.T) { latestContents: "https://some.domain.com/arb1/2024/21/archive.tar.gz", wantUrl: func(serverAddr string) string { return "https://some.domain.com/arb1/2024/21/archive.tar.gz" }, }, + { + name: "chain and contents with upper case", + chain: "ARB1", + latestContents: "ARB1/2024/21/ARCHIVE.TAR.GZ", + wantUrl: func(serverAddr string) string { return serverAddr + "/arb1/2024/21/archive.tar.gz" }, + }, } for _, testCase := range testCases { @@ -237,6 +246,7 @@ func TestSetLatestSnapshotUrl(t *testing.T) { // Create latest file serverDir := t.TempDir() + err := os.Mkdir(filepath.Join(serverDir, chain), dirPerm) Require(t, err) err = os.WriteFile(filepath.Join(serverDir, chain, latestFile), []byte(testCase.latestContents), filePerm) @@ -251,7 +261,11 @@ func TestSetLatestSnapshotUrl(t *testing.T) { initConfig := conf.InitConfigDefault initConfig.Latest = snapshotKind initConfig.LatestBase = addr - err = setLatestSnapshotUrl(ctx, &initConfig, chain) + configChain := testCase.chain + if configChain == "" { + configChain = chain + } + err = setLatestSnapshotUrl(ctx, &initConfig, configChain) Require(t, err) // Check url @@ -404,3 +418,139 @@ func TestOpenInitializeChainDbIncompatibleStateScheme(t *testing.T) { t.Fatalf("Failed to detect incompatible state scheme") } } + +func writeKeys(t *testing.T, db ethdb.Database, keys [][]byte) { + t.Helper() + batch := db.NewBatch() + for _, key := range keys { + err := batch.Put(key, []byte("some data")) + if err != nil { + t.Fatal("Internal test error - failed to insert key:", err) + } + } + err := batch.Write() + if err != nil { + t.Fatal("Internal test error - failed to write batch:", err) + } + batch.Reset() +} + +func checkKeys(t *testing.T, db ethdb.Database, keys [][]byte, shouldExist bool) { + t.Helper() + for _, key := range keys { + has, err := db.Has(key) + if err != nil { + t.Fatal("Failed to check key existence, key: ", key) + } + if shouldExist && !has { + t.Fatal("Key not found:", key) + } + if !shouldExist && has { + t.Fatal("Key found:", key, "k3:", string(key[:3]), "len", len(key)) + } + } +} + +func TestPurgeVersion0WasmStoreEntries(t *testing.T) { + stackConf := node.DefaultConfig + stackConf.DataDir = t.TempDir() + stack, err := node.New(&stackConf) + if err != nil { + t.Fatalf("Failed to create test stack: %v", err) + } + defer stack.Close() + db, err := stack.OpenDatabaseWithExtraOptions("wasm", NodeConfigDefault.Execution.Caching.DatabaseCache, NodeConfigDefault.Persistent.Handles, "wasm/", false, nil) + if err != nil { + t.Fatalf("Failed to open test db: %v", err) + } + var version0Keys [][]byte + for i := 0; i < 20; i++ { + version0Keys = append(version0Keys, + append([]byte{0x00, 'w', 'a'}, testhelpers.RandomSlice(32)...)) + version0Keys = append(version0Keys, + append([]byte{0x00, 'w', 'm'}, testhelpers.RandomSlice(32)...)) + } + var collidedKeys [][]byte + for i := 0; i < 5; i++ { + collidedKeys = append(collidedKeys, + append([]byte{0x00, 'w', 'a'}, testhelpers.RandomSlice(31)...)) + collidedKeys = append(collidedKeys, + append([]byte{0x00, 'w', 'm'}, testhelpers.RandomSlice(31)...)) + collidedKeys = append(collidedKeys, + append([]byte{0x00, 'w', 'a'}, testhelpers.RandomSlice(33)...)) + collidedKeys = append(collidedKeys, + append([]byte{0x00, 'w', 'm'}, testhelpers.RandomSlice(33)...)) + } + var otherKeys [][]byte + for i := 0x00; i <= 0xff; i++ { + if byte(i) == 'a' || byte(i) == 'm' { + continue + } + otherKeys = append(otherKeys, + append([]byte{0x00, 'w', byte(i)}, testhelpers.RandomSlice(32)...)) + otherKeys = append(otherKeys, + append([]byte{0x00, 'w', byte(i)}, testhelpers.RandomSlice(32)...)) + } + for i := 0; i < 10; i++ { + var randomSlice []byte + var j int + for j = 0; j < 10; j++ { + randomSlice = testhelpers.RandomSlice(testhelpers.RandomUint64(1, 40)) + if len(randomSlice) >= 3 && !bytes.Equal(randomSlice[:3], []byte{0x00, 'w', 'm'}) && !bytes.Equal(randomSlice[:3], []byte{0x00, 'w', 'm'}) { + break + } + } + if j == 10 { + t.Fatal("Internal test error - failed to generate random key") + } + otherKeys = append(otherKeys, randomSlice) + } + writeKeys(t, db, version0Keys) + writeKeys(t, db, collidedKeys) + writeKeys(t, db, otherKeys) + checkKeys(t, db, version0Keys, true) + checkKeys(t, db, collidedKeys, true) + checkKeys(t, db, otherKeys, true) + err = purgeVersion0WasmStoreEntries(db) + if err != nil { + t.Fatal("Failed to purge version 0 keys, err:", err) + } + checkKeys(t, db, version0Keys, false) + checkKeys(t, db, collidedKeys, true) + checkKeys(t, db, otherKeys, true) +} + +func TestOpenInitializeChainDbEmptyInit(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stackConfig := testhelpers.CreateStackConfigForTest(t.TempDir()) + stack, err := node.New(stackConfig) + defer stack.Close() + Require(t, err) + + nodeConfig := NodeConfigDefault + nodeConfig.Execution.Caching.StateScheme = env.GetTestStateScheme() + nodeConfig.Chain.ID = 42161 + nodeConfig.Node = *arbnode.ConfigDefaultL2Test() + nodeConfig.Init.Empty = true + + l1Client := ethclient.NewClient(stack.Attach()) + + chainDb, blockchain, err := openInitializeChainDb( + ctx, + stack, + &nodeConfig, + new(big.Int).SetUint64(nodeConfig.Chain.ID), + gethexec.DefaultCacheConfigFor(stack, &nodeConfig.Execution.Caching), + &nodeConfig.Persistent, + l1Client, + chaininfo.RollupAddresses{}, + ) + Require(t, err) + blockchain.Stop() + err = chainDb.Close() + Require(t, err) +} diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 3ab63fcfc9..f5c37b1643 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -371,6 +371,7 @@ func mainImpl() int { if err != nil { log.Crit("error getting rollup addresses config", "err", err) } + // #nosec G115 addr, err := validatorwallet.GetValidatorWalletContract(ctx, deployInfo.ValidatorWalletCreator, int64(deployInfo.DeployedAt), l1TransactionOptsValidator, l1Reader, true) if err != nil { log.Crit("error creating validator wallet contract", "error", err, "address", l1TransactionOptsValidator.From.Hex()) @@ -582,7 +583,7 @@ func mainImpl() int { l1TransactionOptsBatchPoster, dataSigner, fatalErrChan, - big.NewInt(int64(nodeConfig.ParentChain.ID)), + new(big.Int).SetUint64(nodeConfig.ParentChain.ID), blobReader, ) if err != nil { @@ -942,10 +943,12 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa // Don't print wallet passwords if nodeConfig.Conf.Dump { err = confighelpers.DumpConfig(k, map[string]interface{}{ - "parent-chain.wallet.password": "", - "parent-chain.wallet.private-key": "", - "chain.dev-wallet.password": "", - "chain.dev-wallet.private-key": "", + "node.batch-poster.parent-chain-wallet.password": "", + "node.batch-poster.parent-chain-wallet.private-key": "", + "node.staker.parent-chain-wallet.password": "", + "node.staker.parent-chain-wallet.private-key": "", + "chain.dev-wallet.password": "", + "chain.dev-wallet.private-key": "", }) if err != nil { return nil, nil, err diff --git a/cmd/staterecovery/staterecovery.go b/cmd/staterecovery/staterecovery.go index bb01477414..5486ba3726 100644 --- a/cmd/staterecovery/staterecovery.go +++ b/cmd/staterecovery/staterecovery.go @@ -60,6 +60,7 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon break } if time.Since(logged) > 1*time.Minute { + // #nosec G115 log.Info("Recreating missing states", "block", current, "target", target, "remaining", int64(target)-int64(current), "elapsed", time.Since(start), "recreated", recreated) logged = time.Now() } diff --git a/cmd/util/confighelpers/configuration.go b/cmd/util/confighelpers/configuration.go index 55c9ec330f..19b5b1a24c 100644 --- a/cmd/util/confighelpers/configuration.go +++ b/cmd/util/confighelpers/configuration.go @@ -13,7 +13,6 @@ import ( "github.com/knadh/koanf" "github.com/knadh/koanf/parsers/json" - koanfjson "github.com/knadh/koanf/parsers/json" "github.com/knadh/koanf/providers/confmap" "github.com/knadh/koanf/providers/env" "github.com/knadh/koanf/providers/file" @@ -215,15 +214,17 @@ func devFlagArgs() []string { } func BeginCommonParse(f *flag.FlagSet, args []string) (*koanf.Koanf, error) { + var expandedArgs []string for _, arg := range args { if arg == "--version" || arg == "-v" { return nil, ErrVersion } else if arg == "--dev" { - args = devFlagArgs() - break + expandedArgs = append(expandedArgs, devFlagArgs()...) + } else { + expandedArgs = append(expandedArgs, arg) } } - if err := f.Parse(args); err != nil { + if err := f.Parse(expandedArgs); err != nil { return nil, err } @@ -305,7 +306,7 @@ func DumpConfig(k *koanf.Koanf, extraOverrideFields map[string]interface{}) erro return fmt.Errorf("error removing extra parameters before dump: %w", err) } - c, err := k.Marshal(koanfjson.Parser()) + c, err := k.Marshal(json.Parser()) if err != nil { return fmt.Errorf("unable to marshal config file to JSON: %w", err) } diff --git a/das/aggregator.go b/das/aggregator.go index d944f8d48a..e8972447ad 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -166,6 +166,7 @@ type storeResponse struct { // If Store gets not enough successful responses by the time its context is canceled // (eg via TimeoutWrapper) then it also returns an error. func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) { + // #nosec G115 log.Trace("das.Aggregator.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0)) allBackendsSucceeded := false diff --git a/das/dasRpcClient.go b/das/dasRpcClient.go index ca2ee8e7d4..635696bdab 100644 --- a/das/dasRpcClient.go +++ b/das/dasRpcClient.go @@ -138,6 +138,7 @@ func (c *DASRPCClient) sendChunk(ctx context.Context, batchId, i uint64, chunk [ } func (c *DASRPCClient) legacyStore(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) { + // #nosec G115 log.Trace("das.DASRPCClient.Store(...)", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "this", *c) reqSig, err := applyDasSigner(c.signer, message, timeout) diff --git a/das/dasRpcServer.go b/das/dasRpcServer.go index 9e6228ca5d..d14766cc7e 100644 --- a/das/dasRpcServer.go +++ b/das/dasRpcServer.go @@ -108,6 +108,7 @@ type StoreResult struct { } func (s *DASRPCServer) Store(ctx context.Context, message hexutil.Bytes, timeout hexutil.Uint64, sig hexutil.Bytes) (*StoreResult, error) { + // #nosec G115 log.Trace("dasRpc.DASRPCServer.Store", "message", pretty.FirstFewBytes(message), "message length", len(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig), "this", s) rpcStoreRequestGauge.Inc(1) start := time.Now() @@ -277,6 +278,7 @@ func (s *DASRPCServer) StartChunkedStore(ctx context.Context, timestamp, nChunks } // Prevent replay of old messages + // #nosec G115 if time.Since(time.Unix(int64(timestamp), 0)).Abs() > time.Minute { return nil, errors.New("too much time has elapsed since request was signed") } diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go index d873f0568d..2bcbccaae3 100644 --- a/das/dastree/dastree.go +++ b/das/dastree/dastree.go @@ -61,12 +61,13 @@ func RecordHash(record func(bytes32, []byte, arbutil.PreimageType), preimage ... return arbmath.FlipBit(keccord(prepend(LeafByte, keccord([]byte{}).Bytes())), 0) } - length := uint32(len(unrolled)) + length := len(unrolled) leaves := []node{} - for bin := uint32(0); bin < length; bin += BinSize { + for bin := 0; bin < length; bin += BinSize { end := arbmath.MinInt(bin+BinSize, length) hash := keccord(prepend(LeafByte, keccord(unrolled[bin:end]).Bytes())) - leaves = append(leaves, node{hash, end - bin}) + // #nosec G115 + leaves = append(leaves, node{hash, uint32(end - bin)}) } layer := leaves @@ -186,7 +187,9 @@ func Content(root bytes32, oracle func(bytes32) ([]byte, error)) ([]byte, error) leaves = append(leaves, leaf) case NodeByte: count := binary.BigEndian.Uint32(data[64:]) - power := uint32(arbmath.NextOrCurrentPowerOf2(uint64(count))) + power := arbmath.NextOrCurrentPowerOf2(uint64(count)) + // #nosec G115 + halfPower := uint32(power / 2) if place.size != count { return nil, fmt.Errorf("invalid size data: %v vs %v for %v", count, place.size, data) @@ -194,11 +197,11 @@ func Content(root bytes32, oracle func(bytes32) ([]byte, error)) ([]byte, error) prior := node{ hash: common.BytesToHash(data[:32]), - size: power / 2, + size: halfPower, } after := node{ hash: common.BytesToHash(data[32:64]), - size: count - power/2, + size: count - halfPower, } // we want to expand leftward so we reverse their order diff --git a/das/db_storage_service.go b/das/db_storage_service.go index e3b6183c37..1d9e5348d4 100644 --- a/das/db_storage_service.go +++ b/das/db_storage_service.go @@ -8,6 +8,7 @@ import ( "context" "errors" "fmt" + "math" "os" "path/filepath" "time" @@ -172,7 +173,8 @@ func (dbs *DBStorageService) Put(ctx context.Context, data []byte, timeout uint6 return dbs.db.Update(func(txn *badger.Txn) error { e := badger.NewEntry(dastree.HashBytes(data), data) - if dbs.discardAfterTimeout { + if dbs.discardAfterTimeout && timeout <= math.MaxInt64 { + // #nosec G115 e = e.WithTTL(time.Until(time.Unix(int64(timeout), 0))) } return txn.SetEntry(e) diff --git a/das/local_file_storage_service.go b/das/local_file_storage_service.go index 65ca6fe15c..ce86786718 100644 --- a/das/local_file_storage_service.go +++ b/das/local_file_storage_service.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "math" "os" "path" "path/filepath" @@ -133,6 +134,10 @@ func (s *LocalFileStorageService) GetByHash(ctx context.Context, key common.Hash func (s *LocalFileStorageService) Put(ctx context.Context, data []byte, expiry uint64) error { logPut("das.LocalFileStorageService.Store", data, expiry, s) + if expiry > math.MaxInt64 { + return fmt.Errorf("request expiry time (%v) exceeds max int64", expiry) + } + // #nosec G115 expiryTime := time.Unix(int64(expiry), 0) currentTimePlusRetention := time.Now().Add(s.config.MaxRetention) if expiryTime.After(currentTimePlusRetention) { @@ -182,6 +187,7 @@ func (s *LocalFileStorageService) Put(ctx context.Context, data []byte, expiry u // new flat layout files, set their modification time accordingly. if s.enableLegacyLayout { tv := syscall.Timeval{ + // #nosec G115 Sec: int64(expiry - uint64(s.legacyLayout.retention.Seconds())), Usec: 0, } diff --git a/das/local_file_storage_service_test.go b/das/local_file_storage_service_test.go index cc27e293e3..01b999f356 100644 --- a/das/local_file_storage_service_test.go +++ b/das/local_file_storage_service_test.go @@ -99,6 +99,7 @@ func TestMigrationNoExpiry(t *testing.T) { getByHashAndCheck(t, s, "a", "b", "c", "d") // Can still iterate by timestamp even if expiry disabled + // #nosec G115 countTimestampEntries(t, &s.layout, time.Unix(int64(now+11), 0), 4) } diff --git a/das/s3_storage_service.go b/das/s3_storage_service.go index a1de200c52..d251f12214 100644 --- a/das/s3_storage_service.go +++ b/das/s3_storage_service.go @@ -8,6 +8,7 @@ import ( "context" "fmt" "io" + "math" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -110,7 +111,8 @@ func (s3s *S3StorageService) Put(ctx context.Context, value []byte, timeout uint Bucket: aws.String(s3s.bucket), Key: aws.String(s3s.objectPrefix + EncodeStorageServiceKey(dastree.Hash(value))), Body: bytes.NewReader(value)} - if !s3s.discardAfterTimeout { + if s3s.discardAfterTimeout && timeout <= math.MaxInt64 { + // #nosec G115 expires := time.Unix(int64(timeout), 0) putObjectInput.Expires = &expires } diff --git a/das/sign_after_store_das_writer.go b/das/sign_after_store_das_writer.go index 0e31d30ae9..40b03847d8 100644 --- a/das/sign_after_store_das_writer.go +++ b/das/sign_after_store_das_writer.go @@ -105,6 +105,7 @@ func NewSignAfterStoreDASWriter(ctx context.Context, config DataAvailabilityConf } func (d *SignAfterStoreDASWriter) Store(ctx context.Context, message []byte, timeout uint64) (c *daprovider.DataAvailabilityCertificate, err error) { + // #nosec G115 log.Trace("das.SignAfterStoreDASWriter.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "this", d) c = &daprovider.DataAvailabilityCertificate{ Timeout: timeout, diff --git a/das/simple_das_reader_aggregator.go b/das/simple_das_reader_aggregator.go index dc6147a7e4..f45c56afe0 100644 --- a/das/simple_das_reader_aggregator.go +++ b/das/simple_das_reader_aggregator.go @@ -50,8 +50,8 @@ var DefaultRestfulClientAggregatorConfig = RestfulClientAggregatorConfig{ } type SimpleExploreExploitStrategyConfig struct { - ExploreIterations int `koanf:"explore-iterations"` - ExploitIterations int `koanf:"exploit-iterations"` + ExploreIterations uint32 `koanf:"explore-iterations"` + ExploitIterations uint32 `koanf:"exploit-iterations"` } var DefaultSimpleExploreExploitStrategyConfig = SimpleExploreExploitStrategyConfig{ @@ -73,8 +73,8 @@ func RestfulClientAggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { } func SimpleExploreExploitStrategyConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Int(prefix+".explore-iterations", DefaultSimpleExploreExploitStrategyConfig.ExploreIterations, "number of consecutive GetByHash calls to the aggregator where each call will cause it to randomly select from REST endpoints until one returns successfully, before switching to exploit mode") - f.Int(prefix+".exploit-iterations", DefaultSimpleExploreExploitStrategyConfig.ExploitIterations, "number of consecutive GetByHash calls to the aggregator where each call will cause it to select from REST endpoints in order of best latency and success rate, before switching to explore mode") + f.Uint32(prefix+".explore-iterations", DefaultSimpleExploreExploitStrategyConfig.ExploreIterations, "number of consecutive GetByHash calls to the aggregator where each call will cause it to randomly select from REST endpoints until one returns successfully, before switching to exploit mode") + f.Uint32(prefix+".exploit-iterations", DefaultSimpleExploreExploitStrategyConfig.ExploitIterations, "number of consecutive GetByHash calls to the aggregator where each call will cause it to select from REST endpoints in order of best latency and success rate, before switching to explore mode") } func NewRestfulClientAggregator(ctx context.Context, config *RestfulClientAggregatorConfig) (*SimpleDASReaderAggregator, error) { @@ -120,8 +120,8 @@ func NewRestfulClientAggregator(ctx context.Context, config *RestfulClientAggreg switch strings.ToLower(config.Strategy) { case "simple-explore-exploit": a.strategy = &simpleExploreExploitStrategy{ - exploreIterations: uint32(config.SimpleExploreExploitStrategy.ExploreIterations), - exploitIterations: uint32(config.SimpleExploreExploitStrategy.ExploitIterations), + exploreIterations: config.SimpleExploreExploitStrategy.ExploreIterations, + exploitIterations: config.SimpleExploreExploitStrategy.ExploitIterations, } case "testing-sequential": a.strategy = &testingSequentialStrategy{} diff --git a/das/syncing_fallback_storage.go b/das/syncing_fallback_storage.go index ee0ddc6b5f..8af46b7fc5 100644 --- a/das/syncing_fallback_storage.go +++ b/das/syncing_fallback_storage.go @@ -105,9 +105,9 @@ type l1SyncService struct { lastBatchAcc common.Hash } -// The original syncing process had a bug, so the file was renamed to cause any mirrors -// in the wild to re-sync from their configured starting block number. -const nextBlockNoFilename = "nextBlockNumberV2" +// The filename has been updated when we have discovered bugs that may have impacted +// syncing, to cause mirrors to re-sync. +const nextBlockNoFilename = "nextBlockNumberV3" func readSyncStateOrDefault(syncDir string, dflt uint64) uint64 { if syncDir == "" { diff --git a/das/util.go b/das/util.go index de266c433f..114e075e79 100644 --- a/das/util.go +++ b/das/util.go @@ -13,11 +13,13 @@ import ( func logPut(store string, data []byte, timeout uint64, reader daprovider.DASReader, more ...interface{}) { if len(more) == 0 { + // #nosec G115 log.Trace( store, "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(timeout), 0), "this", reader, ) } else { + // #nosec G115 log.Trace( store, "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(timeout), 0), "this", reader, more, diff --git a/execution/gethexec/api.go b/execution/gethexec/api.go index c19072ae77..2bff8026c2 100644 --- a/execution/gethexec/api.go +++ b/execution/gethexec/api.go @@ -78,6 +78,7 @@ func (api *ArbDebugAPI) evenlySpaceBlocks(start, end rpc.BlockNumber) (uint64, u end, _ = api.blockchain.ClipToPostNitroGenesis(end) blocks := end.Int64() - start.Int64() + 1 + // #nosec G115 bound := int64(api.blockRangeBound) step := int64(1) if blocks > bound { diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index d8a592736c..19d77fc38f 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -27,6 +27,7 @@ import ( "time" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" @@ -137,7 +138,7 @@ func (s *ExecutionEngine) MarkFeedStart(to arbutil.MessageIndex) { defer s.cachedL1PriceData.mutex.Unlock() if to < s.cachedL1PriceData.startOfL1PriceDataCache { - log.Info("trying to trim older cache which doesnt exist anymore") + log.Debug("trying to trim older L1 price data cache which doesnt exist anymore") } else if to >= s.cachedL1PriceData.endOfL1PriceDataCache { s.cachedL1PriceData.startOfL1PriceDataCache = 0 s.cachedL1PriceData.endOfL1PriceDataCache = 0 @@ -149,10 +150,25 @@ func (s *ExecutionEngine) MarkFeedStart(to arbutil.MessageIndex) { } } -func (s *ExecutionEngine) Initialize(rustCacheSize uint32) { +func (s *ExecutionEngine) Initialize(rustCacheSize uint32, targetConfig *StylusTargetConfig) error { if rustCacheSize != 0 { programs.ResizeWasmLruCache(rustCacheSize) } + var effectiveStylusTarget string + target := rawdb.LocalTarget() + switch target { + case rawdb.TargetArm64: + effectiveStylusTarget = targetConfig.Arm64 + case rawdb.TargetAmd64: + effectiveStylusTarget = targetConfig.Amd64 + case rawdb.TargetHost: + effectiveStylusTarget = targetConfig.Host + } + err := programs.SetTarget(target, effectiveStylusTarget, true) + if err != nil { + return fmt.Errorf("Failed to set stylus target: %w", err) + } + return nil } func (s *ExecutionEngine) SetRecorder(recorder *BlockRecorder) { diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index af40b4b3f7..b864332e83 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -19,6 +19,7 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/arbos/programs" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" @@ -27,6 +28,24 @@ import ( flag "github.com/spf13/pflag" ) +type StylusTargetConfig struct { + Arm64 string `koanf:"arm64"` + Amd64 string `koanf:"amd64"` + Host string `koanf:"host"` +} + +var DefaultStylusTargetConfig = StylusTargetConfig{ + Arm64: programs.DefaultTargetDescriptionArm, + Amd64: programs.DefaultTargetDescriptionX86, + Host: "", +} + +func StylusTargetConfigAddOptions(prefix string, f *flag.FlagSet) { + f.String(prefix+".arm64", DefaultStylusTargetConfig.Arm64, "stylus programs compilation target for arm64 linux") + f.String(prefix+".amd64", DefaultStylusTargetConfig.Amd64, "stylus programs compilation target for amd64 linux") + f.String(prefix+".host", DefaultStylusTargetConfig.Host, "stylus programs compilation target for system other than 64-bit ARM or 64-bit x86") +} + type Config struct { ParentChainReader headerreader.Config `koanf:"parent-chain-reader" reload:"hot"` Sequencer SequencerConfig `koanf:"sequencer" reload:"hot"` @@ -40,6 +59,7 @@ type Config struct { TxLookupLimit uint64 `koanf:"tx-lookup-limit"` EnablePrefetchBlock bool `koanf:"enable-prefetch-block"` SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` + StylusTarget StylusTargetConfig `koanf:"stylus-target"` forwardingTarget string } @@ -78,6 +98,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) { SyncMonitorConfigAddOptions(prefix+".sync-monitor", f) f.Uint64(prefix+".tx-lookup-limit", ConfigDefault.TxLookupLimit, "retain the ability to lookup transactions by hash for the past N blocks (0 = all blocks)") f.Bool(prefix+".enable-prefetch-block", ConfigDefault.EnablePrefetchBlock, "enable prefetching of blocks") + StylusTargetConfigAddOptions(prefix+".stylus-target", f) } var ConfigDefault = Config{ @@ -92,6 +113,7 @@ var ConfigDefault = Config{ Caching: DefaultCachingConfig, Forwarder: DefaultNodeForwarderConfig, EnablePrefetchBlock: true, + StylusTarget: DefaultStylusTargetConfig, } type ConfigFetcher func() *Config @@ -251,9 +273,13 @@ func (n *ExecutionNode) MarkFeedStart(to arbutil.MessageIndex) { } func (n *ExecutionNode) Initialize(ctx context.Context) error { - n.ExecEngine.Initialize(n.ConfigFetcher().Caching.StylusLRUCache) + config := n.ConfigFetcher() + err := n.ExecEngine.Initialize(config.Caching.StylusLRUCache, &config.StylusTarget) + if err != nil { + return fmt.Errorf("error initializing execution engine: %w", err) + } n.ArbInterface.Initialize(n) - err := n.Backend.Start() + err = n.Backend.Start() if err != nil { return fmt.Errorf("error starting geth backend: %w", err) } diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 90e3082062..819cd10500 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -887,6 +887,7 @@ func (s *Sequencer) createBlock(ctx context.Context) (returnValue bool) { for _, queueItem := range queueItems { s.txRetryQueue.Push(queueItem) } + // #nosec G115 log.Error( "cannot sequence: unknown L1 block or L1 timestamp too far from local clock time", "l1Block", l1Block, @@ -1037,10 +1038,14 @@ func (s *Sequencer) updateExpectedSurplus(ctx context.Context) (int64, error) { if err != nil { return 0, fmt.Errorf("error encountered getting l1 pricing surplus while updating expectedSurplus: %w", err) } + // #nosec G115 backlogL1GasCharged := int64(s.execEngine.backlogL1GasCharged()) + // #nosec G115 backlogCallDataUnits := int64(s.execEngine.backlogCallDataUnits()) + // #nosec G115 expectedSurplus := int64(surplus) + backlogL1GasCharged - backlogCallDataUnits*int64(l1GasPrice) // update metrics + // #nosec G115 l1GasPriceGauge.Update(int64(l1GasPrice)) callDataUnitsBacklogGauge.Update(backlogCallDataUnits) unusedL1GasChargeGauge.Update(backlogL1GasCharged) diff --git a/execution/gethexec/stylus_tracer.go b/execution/gethexec/stylus_tracer.go new file mode 100644 index 0000000000..16c43d71d8 --- /dev/null +++ b/execution/gethexec/stylus_tracer.go @@ -0,0 +1,184 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +package gethexec + +import ( + "encoding/json" + "fmt" + "math/big" + "strings" + "sync/atomic" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/offchainlabs/nitro/util/stack" +) + +func init() { + tracers.DefaultDirectory.Register("stylusTracer", newStylusTracer, false) +} + +// stylusTracer captures Stylus HostIOs and returns them in a structured format to be used in Cargo +// Stylus Replay. +type stylusTracer struct { + open *stack.Stack[HostioTraceInfo] + stack *stack.Stack[*stack.Stack[HostioTraceInfo]] + interrupt atomic.Bool + reason error +} + +// HostioTraceInfo contains the captured HostIO log returned by stylusTracer. +type HostioTraceInfo struct { + // Name of the HostIO. + Name string `json:"name"` + + // Arguments of the HostIO encoded as binary. + // For details about the encoding check the HostIO implemenation on + // arbitrator/wasm-libraries/user-host-trait. + Args hexutil.Bytes `json:"args"` + + // Outputs of the HostIO encoded as binary. + // For details about the encoding check the HostIO implemenation on + // arbitrator/wasm-libraries/user-host-trait. + Outs hexutil.Bytes `json:"outs"` + + // Amount of Ink before executing the HostIO. + StartInk uint64 `json:"startInk"` + + // Amount of Ink after executing the HostIO. + EndInk uint64 `json:"endInk"` + + // For *call HostIOs, the address of the called contract. + Address *common.Address `json:"address,omitempty"` + + // For *call HostIOs, the steps performed by the called contract. + Steps *stack.Stack[HostioTraceInfo] `json:"steps,omitempty"` +} + +// nestsHostios contains the hostios with nested calls. +var nestsHostios = map[string]bool{ + "call_contract": true, + "delegate_call_contract": true, + "static_call_contract": true, +} + +func newStylusTracer(ctx *tracers.Context, _ json.RawMessage) (tracers.Tracer, error) { + return &stylusTracer{ + open: stack.NewStack[HostioTraceInfo](), + stack: stack.NewStack[*stack.Stack[HostioTraceInfo]](), + }, nil +} + +func (t *stylusTracer) CaptureStylusHostio(name string, args, outs []byte, startInk, endInk uint64) { + if t.interrupt.Load() { + return + } + info := HostioTraceInfo{ + Name: name, + Args: args, + Outs: outs, + StartInk: startInk, + EndInk: endInk, + } + if nestsHostios[name] { + last, err := t.open.Pop() + if err != nil { + t.Stop(err) + return + } + if !strings.HasPrefix(last.Name, "evm_") || last.Name[4:] != info.Name { + t.Stop(fmt.Errorf("trace inconsistency for %v: last opcode is %v", info.Name, last.Name)) + return + } + if last.Steps == nil { + t.Stop(fmt.Errorf("trace inconsistency for %v: nil steps", info.Name)) + return + } + info.Address = last.Address + info.Steps = last.Steps + } + t.open.Push(info) +} + +func (t *stylusTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { + if t.interrupt.Load() { + return + } + + // This function adds the prefix evm_ because it assumes the opcode came from the EVM. + // If the opcode comes from WASM, the CaptureStylusHostio function will remove the evm prefix. + var name string + switch typ { + case vm.CALL: + name = "evm_call_contract" + case vm.DELEGATECALL: + name = "evm_delegate_call_contract" + case vm.STATICCALL: + name = "evm_static_call_contract" + case vm.CREATE: + name = "evm_create1" + case vm.CREATE2: + name = "evm_create2" + case vm.SELFDESTRUCT: + name = "evm_self_destruct" + } + + inner := stack.NewStack[HostioTraceInfo]() + info := HostioTraceInfo{ + Name: name, + Address: &to, + Steps: inner, + } + t.open.Push(info) + t.stack.Push(t.open) + t.open = inner +} + +func (t *stylusTracer) CaptureExit(output []byte, gasUsed uint64, _ error) { + if t.interrupt.Load() { + return + } + var err error + t.open, err = t.stack.Pop() + if err != nil { + t.Stop(err) + } +} + +func (t *stylusTracer) GetResult() (json.RawMessage, error) { + if t.reason != nil { + return nil, t.reason + } + if t.open == nil { + return nil, fmt.Errorf("trace is nil") + } + msg, err := json.Marshal(t.open) + if err != nil { + return nil, err + } + return msg, nil +} + +func (t *stylusTracer) Stop(err error) { + t.reason = err + t.interrupt.Store(true) +} + +// Unimplemented EVMLogger interface methods + +func (t *stylusTracer) CaptureArbitrumTransfer(env *vm.EVM, from, to *common.Address, value *big.Int, before bool, purpose string) { +} +func (t *stylusTracer) CaptureArbitrumStorageGet(key common.Hash, depth int, before bool) {} +func (t *stylusTracer) CaptureArbitrumStorageSet(key, value common.Hash, depth int, before bool) {} +func (t *stylusTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { +} +func (t *stylusTracer) CaptureEnd(output []byte, gasUsed uint64, err error) {} +func (t *stylusTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { +} +func (t *stylusTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, _ *vm.ScopeContext, depth int, err error) { +} +func (t *stylusTracer) CaptureTxStart(gasLimit uint64) {} +func (t *stylusTracer) CaptureTxEnd(restGas uint64) {} diff --git a/execution/gethexec/tx_pre_checker.go b/execution/gethexec/tx_pre_checker.go index dacfd32e81..e0ae330148 100644 --- a/execution/gethexec/tx_pre_checker.go +++ b/execution/gethexec/tx_pre_checker.go @@ -43,7 +43,7 @@ type TxPreCheckerConfig struct { type TxPreCheckerConfigFetcher func() *TxPreCheckerConfig var DefaultTxPreCheckerConfig = TxPreCheckerConfig{ - Strictness: TxPreCheckerStrictnessNone, + Strictness: TxPreCheckerStrictnessLikelyCompatible, RequiredStateAge: 2, RequiredStateMaxBlocks: 4, } @@ -161,6 +161,7 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty oldHeader := header blocksTraversed := uint(0) // find a block that's old enough + // #nosec G115 for now-int64(oldHeader.Time) < config.RequiredStateAge && (config.RequiredStateMaxBlocks <= 0 || blocksTraversed < config.RequiredStateMaxBlocks) && oldHeader.Number.Uint64() > 0 { diff --git a/execution/nodeInterface/NodeInterface.go b/execution/nodeInterface/NodeInterface.go index 9179a52718..45fcebcdfa 100644 --- a/execution/nodeInterface/NodeInterface.go +++ b/execution/nodeInterface/NodeInterface.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "math" "math/big" "sort" @@ -234,6 +235,7 @@ func (n NodeInterface) ConstructOutboxProof(c ctx, evm mech, size, leaf uint64) } balanced := size == arbmath.NextPowerOf2(size)/2 + // #nosec G115 treeLevels := int(arbmath.Log2ceil(size)) // the # of levels in the tree proofLevels := treeLevels - 1 // the # of levels where a hash is needed (all but root) walkLevels := treeLevels // the # of levels we need to consider when building walks @@ -297,6 +299,7 @@ func (n NodeInterface) ConstructOutboxProof(c ctx, evm mech, size, leaf uint64) mid := (lo + hi) / 2 + // #nosec G115 block, err := n.backend.BlockByNumber(n.context, rpc.BlockNumber(mid)) if err != nil { searchErr = err @@ -643,6 +646,10 @@ func (n NodeInterface) LegacyLookupMessageBatchProof(c ctx, evm mech, batchNum h // L2BlockRangeForL1 fetches the L1 block number of a given l2 block number. // c ctx and evm mech arguments are not used but supplied to match the precompile function type in NodeInterface contract func (n NodeInterface) BlockL1Num(c ctx, evm mech, l2BlockNum uint64) (uint64, error) { + if l2BlockNum > math.MaxInt64 { + return 0, fmt.Errorf("requested l2 block number %d out of range for int64", l2BlockNum) + } + // #nosec G115 blockHeader, err := n.backend.HeaderByNumber(n.context, rpc.BlockNumber(l2BlockNum)) if err != nil { return 0, err diff --git a/go-ethereum b/go-ethereum index a1fc200e5b..575062fad7 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit a1fc200e5b85a7737a9834ec28fb768fb7bde7bd +Subproject commit 575062fad7ff4db9d7c235f49472f658be29e2fe diff --git a/go.mod b/go.mod index 6649973725..da49b0d8b9 100644 --- a/go.mod +++ b/go.mod @@ -41,7 +41,6 @@ require ( github.com/rivo/tview v0.0.0-20240307173318-e804876934a1 github.com/spf13/pflag v1.0.5 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 - github.com/wasmerio/wasmer-go v1.0.4 github.com/wealdtech/go-merkletree v1.0.0 golang.org/x/crypto v0.21.0 golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa diff --git a/go.sum b/go.sum index 8529b2497d..c0193be769 100644 --- a/go.sum +++ b/go.sum @@ -704,8 +704,6 @@ github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9 github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= -github.com/wasmerio/wasmer-go v1.0.4 h1:MnqHoOGfiQ8MMq2RF6wyCeebKOe84G88h5yv+vmxJgs= -github.com/wasmerio/wasmer-go v1.0.4/go.mod h1:0gzVdSfg6pysA6QVp6iVRPTagC6Wq9pOE8J86WKb2Fk= github.com/wealdtech/go-merkletree v1.0.0 h1:DsF1xMzj5rK3pSQM6mPv8jlyJyHXhFxpnA2bwEjMMBY= github.com/wealdtech/go-merkletree v1.0.0/go.mod h1:cdil512d/8ZC7Kx3bfrDvGMQXB25NTKbsm0rFrmDax4= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= diff --git a/precompiles/ArbAddressTable.go b/precompiles/ArbAddressTable.go index 05f2275fd7..102fd55c3b 100644 --- a/precompiles/ArbAddressTable.go +++ b/precompiles/ArbAddressTable.go @@ -33,7 +33,7 @@ func (con ArbAddressTable) Decompress(c ctx, evm mech, buf []uint8, offset huge) return addr{}, nil, errors.New("invalid offset in ArbAddressTable.Decompress") } result, nbytes, err := c.State.AddressTable().Decompress(buf[ioffset:]) - return result, big.NewInt(int64(nbytes)), err + return result, new(big.Int).SetUint64(nbytes), err } // Lookup the index of an address in the table @@ -45,7 +45,7 @@ func (con ArbAddressTable) Lookup(c ctx, evm mech, addr addr) (huge, error) { if !exists { return nil, errors.New("address does not exist in AddressTable") } - return big.NewInt(int64(result)), nil + return new(big.Int).SetUint64(result), nil } // LookupIndex for an address in the table by index @@ -66,11 +66,11 @@ func (con ArbAddressTable) LookupIndex(c ctx, evm mech, index huge) (addr, error // Register adds an account to the table, shrinking its compressed representation func (con ArbAddressTable) Register(c ctx, evm mech, addr addr) (huge, error) { slot, err := c.State.AddressTable().Register(addr) - return big.NewInt(int64(slot)), err + return new(big.Int).SetUint64(slot), err } // Size gets the number of addresses in the table func (con ArbAddressTable) Size(c ctx, evm mech) (huge, error) { size, err := c.State.AddressTable().Size() - return big.NewInt(int64(size)), err + return new(big.Int).SetUint64(size), err } diff --git a/precompiles/ArbRetryableTx.go b/precompiles/ArbRetryableTx.go index d508d75752..93e8023603 100644 --- a/precompiles/ArbRetryableTx.go +++ b/precompiles/ArbRetryableTx.go @@ -149,7 +149,7 @@ func (con ArbRetryableTx) GetTimeout(c ctx, evm mech, ticketId bytes32) (huge, e if err != nil { return nil, err } - return big.NewInt(int64(timeout)), nil + return new(big.Int).SetUint64(timeout), nil } // Keepalive adds one lifetime period to the ticket's expiry @@ -176,8 +176,9 @@ func (con ArbRetryableTx) Keepalive(c ctx, evm mech, ticketId bytes32) (huge, er return big.NewInt(0), err } - err = con.LifetimeExtended(c, evm, ticketId, big.NewInt(int64(newTimeout))) - return big.NewInt(int64(newTimeout)), err + bigNewTimeout := new(big.Int).SetUint64(newTimeout) + err = con.LifetimeExtended(c, evm, ticketId, bigNewTimeout) + return bigNewTimeout, err } // GetBeneficiary gets the beneficiary of the ticket diff --git a/precompiles/ArbSys.go b/precompiles/ArbSys.go index 13f56d3b8e..d55067a09c 100644 --- a/precompiles/ArbSys.go +++ b/precompiles/ArbSys.go @@ -162,7 +162,7 @@ func (con *ArbSys) SendTxToL1(c ctx, evm mech, value huge, destination addr, cal } } - leafNum := big.NewInt(int64(size - 1)) + leafNum := new(big.Int).SetUint64(size - 1) var blockTime big.Int blockTime.SetUint64(evm.Context.Time) @@ -199,7 +199,7 @@ func (con ArbSys) SendMerkleTreeState(c ctx, evm mech) (huge, bytes32, []bytes32 for i, par := range rawPartials { partials[i] = par } - return big.NewInt(int64(size)), rootHash, partials, nil + return new(big.Int).SetUint64(size), rootHash, partials, nil } // WithdrawEth send paid eth to the destination on L1 diff --git a/relay/relay_stress_test.go b/relay/relay_stress_test.go index 9a8875a429..9d5c415056 100644 --- a/relay/relay_stress_test.go +++ b/relay/relay_stress_test.go @@ -160,7 +160,7 @@ func largeBacklogRelayTestImpl(t *testing.T, numClients, backlogSize, l2MsgSize connected++ } } - if int32(connected) != int32(numClients) { + if connected != numClients { t.Fail() } log.Info("number of clients connected", "expected", numClients, "got", connected) diff --git a/scripts/convert-databases.bash b/scripts/convert-databases.bash index bd898c2c98..3020b389b4 100755 --- a/scripts/convert-databases.bash +++ b/scripts/convert-databases.bash @@ -182,7 +182,7 @@ fi convert_result= convert () { - srcdir=$(echo $src/$1 | tr -s /) + srcdir="$src"/$1 dstdir=$(echo $dst/$1 | tr -s /) if ! [ -e $dstdir ]; then echo "== Converting $1 db" diff --git a/staker/block_challenge_backend.go b/staker/block_challenge_backend.go index 42351789ba..0dd89865bd 100644 --- a/staker/block_challenge_backend.go +++ b/staker/block_challenge_backend.go @@ -219,6 +219,6 @@ func (b *BlockChallengeBackend) IssueExecChallenge( }, machineStatuses, globalStateHashes, - big.NewInt(int64(numsteps)), + new(big.Int).SetUint64(numsteps), ) } diff --git a/staker/block_validator.go b/staker/block_validator.go index df465cc31f..2239952b37 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -17,6 +17,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -126,6 +127,9 @@ func (c *BlockValidatorConfig) Validate() error { } c.memoryFreeLimit = limit } + if err := c.RedisValidationClientConfig.Validate(); err != nil { + return fmt.Errorf("failed to validate redis validation client config: %w", err) + } streamsEnabled := c.RedisValidationClientConfig.Enabled() if len(c.ValidationServerConfigs) == 0 { c.ValidationServerConfigs = []rpcclient.ClientConfig{c.ValidationServer} @@ -311,6 +315,7 @@ func NewBlockValidator( func atomicStorePos(addr *atomic.Uint64, val arbutil.MessageIndex, metr metrics.Gauge) { addr.Store(uint64(val)) + // #nosec G115 metr.Update(int64(val)) } @@ -495,7 +500,7 @@ func (v *BlockValidator) sendRecord(s *validationStatus) error { //nolint:gosec func (v *BlockValidator) writeToFile(validationEntry *validationEntry, moduleRoot common.Hash) error { - input, err := validationEntry.ToInput([]string{"wavm"}) + input, err := validationEntry.ToInput([]rawdb.Target{rawdb.TargetWavm}) if err != nil { return err } @@ -569,6 +574,7 @@ func (v *BlockValidator) createNextValidationEntry(ctx context.Context) (bool, e v.nextCreateBatch = batch v.nextCreateBatchBlockHash = batchBlockHash v.nextCreateBatchMsgCount = count + // #nosec G115 validatorMsgCountCurrentBatch.Update(int64(count)) v.nextCreateBatchReread = false } @@ -719,6 +725,7 @@ func (v *BlockValidator) iterativeValidationPrint(ctx context.Context) time.Dura if err != nil { printedCount = -1 } else { + // #nosec G115 printedCount = int64(batchMsgs) + int64(validated.GlobalState.PosInBatch) } log.Info("validated execution", "messageCount", printedCount, "globalstate", validated.GlobalState, "WasmRoots", validated.WasmRoots) @@ -988,8 +995,10 @@ func (v *BlockValidator) UpdateLatestStaked(count arbutil.MessageIndex, globalSt if v.recordSentA.Load() < countUint64 { v.recordSentA.Store(countUint64) } + // #nosec G115 v.validatedA.Store(countUint64) v.valLoopPos = count + // #nosec G115 validatorMsgCountValidatedGauge.Update(int64(countUint64)) err = v.writeLastValidated(globalState, nil) // we don't know which wasm roots were validated if err != nil { @@ -1054,6 +1063,7 @@ func (v *BlockValidator) Reorg(ctx context.Context, count arbutil.MessageIndex) } if v.validatedA.Load() > countUint64 { v.validatedA.Store(countUint64) + // #nosec G115 validatorMsgCountValidatedGauge.Update(int64(countUint64)) err := v.writeLastValidated(v.nextCreateStartGS, nil) // we don't know which wasm roots were validated if err != nil { @@ -1245,6 +1255,7 @@ func (v *BlockValidator) checkValidatedGSCaughtUp() (bool, error) { atomicStorePos(&v.createdA, count, validatorMsgCountCreatedGauge) atomicStorePos(&v.recordSentA, count, validatorMsgCountRecordSentGauge) atomicStorePos(&v.validatedA, count, validatorMsgCountValidatedGauge) + // #nosec G115 validatorMsgCountValidatedGauge.Update(int64(count)) v.chainCaughtUp = true return true, nil diff --git a/staker/challenge-cache/cache.go b/staker/challenge-cache/cache.go index ed4fad6450..5dca2764e8 100644 --- a/staker/challenge-cache/cache.go +++ b/staker/challenge-cache/cache.go @@ -187,12 +187,12 @@ func (c *Cache) Prune(ctx context.Context, messageNumber uint64) error { if info.IsDir() { matches := pattern.FindStringSubmatch(info.Name()) if len(matches) > 1 { - dirNameMessageNum, err := strconv.Atoi(matches[1]) + dirNameMessageNum, err := strconv.ParseUint(matches[1], 10, 64) if err != nil { return err } // Collect the directory path if the message number is <= the specified value. - if dirNameMessageNum <= int(messageNumber) { + if dirNameMessageNum <= messageNumber { pathsToDelete = append(pathsToDelete, path) } } diff --git a/staker/challenge_manager.go b/staker/challenge_manager.go index 80cafccced..ef431d3c79 100644 --- a/staker/challenge_manager.go +++ b/staker/challenge_manager.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" @@ -293,7 +294,7 @@ func (m *ChallengeManager) bisect(ctx context.Context, backend ChallengeBackend, if newChallengeLength < bisectionDegree { bisectionDegree = newChallengeLength } - newSegments := make([][32]byte, int(bisectionDegree+1)) + newSegments := make([][32]byte, bisectionDegree+1) position := startSegmentPosition normalSegmentLength := newChallengeLength / bisectionDegree for i := range newSegments { @@ -467,7 +468,7 @@ func (m *ChallengeManager) createExecutionBackend(ctx context.Context, step uint if err != nil { return fmt.Errorf("error creating validation entry for challenge %v msg %v for execution challenge: %w", m.challengeIndex, initialCount, err) } - input, err := entry.ToInput([]string{"wavm"}) + input, err := entry.ToInput([]rawdb.Target{rawdb.TargetWavm}) if err != nil { return fmt.Errorf("error getting validation entry input of challenge %v msg %v: %w", m.challengeIndex, initialCount, err) } diff --git a/staker/challenge_test.go b/staker/challenge_test.go index 4534b04a25..33f1644c63 100644 --- a/staker/challenge_test.go +++ b/staker/challenge_test.go @@ -77,7 +77,7 @@ func CreateChallenge( resultReceiverAddr, maxInboxMessage, [2][32]byte{startHashBytes, endHashBytes}, - big.NewInt(int64(endMachineSteps)), + new(big.Int).SetUint64(endMachineSteps), asserter, challenger, big.NewInt(100), diff --git a/staker/l1_validator.go b/staker/l1_validator.go index dd9673ee0b..6ea9fd8ded 100644 --- a/staker/l1_validator.go +++ b/staker/l1_validator.go @@ -247,6 +247,7 @@ func (v *L1Validator) generateNodeAction( startStateProposedParentChain, err, ) } + // #nosec G115 startStateProposedTime := time.Unix(int64(startStateProposedHeader.Time), 0) v.txStreamer.PauseReorgs() @@ -375,6 +376,7 @@ func (v *L1Validator) generateNodeAction( return nil, false, fmt.Errorf("error getting rollup minimum assertion period: %w", err) } + // #nosec G115 timeSinceProposed := big.NewInt(int64(l1BlockNumber) - int64(startStateProposedL1)) if timeSinceProposed.Cmp(minAssertionPeriod) < 0 { // Too soon to assert diff --git a/staker/rollup_watcher.go b/staker/rollup_watcher.go index b35bebd1c6..5ef28a49dc 100644 --- a/staker/rollup_watcher.go +++ b/staker/rollup_watcher.go @@ -196,7 +196,7 @@ func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, if logQueryRangeSize == 0 { query.ToBlock = toBlock } else { - query.ToBlock = new(big.Int).Add(fromBlock, big.NewInt(int64(logQueryRangeSize))) + query.ToBlock = new(big.Int).Add(fromBlock, new(big.Int).SetUint64(logQueryRangeSize)) } if query.ToBlock.Cmp(toBlock) > 0 { query.ToBlock = toBlock diff --git a/staker/staker.go b/staker/staker.go index 9d917d1853..6e93d27311 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -142,6 +142,8 @@ func (c *L1ValidatorConfig) Validate() error { return nil } +type L1ValidatorConfigFetcher func() *L1ValidatorConfig + var DefaultL1ValidatorConfig = L1ValidatorConfig{ Enable: true, Strategy: "Watchtower", @@ -257,7 +259,7 @@ type Staker struct { confirmedNotifiers []LatestConfirmedNotifier activeChallenge *ChallengeManager baseCallOpts bind.CallOpts - config L1ValidatorConfig + config L1ValidatorConfigFetcher highGasBlocksBuffer *big.Int lastActCalledBlock *big.Int inactiveLastCheckedNode *nodeAndHash @@ -295,7 +297,7 @@ func NewStaker( l1Reader *headerreader.HeaderReader, wallet ValidatorWalletInterface, callOpts bind.CallOpts, - config L1ValidatorConfig, + config L1ValidatorConfigFetcher, blockValidator *BlockValidator, statelessBlockValidator *StatelessBlockValidator, stakedNotifiers []LatestStakedNotifier, @@ -304,7 +306,7 @@ func NewStaker( fatalErr chan<- error, ) (*Staker, error) { - if err := config.Validate(); err != nil { + if err := config().Validate(); err != nil { return nil, err } client := l1Reader.Client() @@ -314,7 +316,7 @@ func NewStaker( return nil, err } stakerLastSuccessfulActionGauge.Update(time.Now().Unix()) - if config.StartValidationFromStaked && blockValidator != nil { + if config().StartValidationFromStaked && blockValidator != nil { stakedNotifiers = append(stakedNotifiers, blockValidator) } inactiveValidatedNodes := btree.NewG(2, func(a, b validatedNode) bool { @@ -327,7 +329,7 @@ func NewStaker( confirmedNotifiers: confirmedNotifiers, baseCallOpts: callOpts, config: config, - highGasBlocksBuffer: big.NewInt(config.PostingStrategy.HighGasDelayBlocks), + highGasBlocksBuffer: big.NewInt(config().PostingStrategy.HighGasDelayBlocks), lastActCalledBlock: nil, inboxReader: statelessBlockValidator.inboxReader, statelessBlockValidator: statelessBlockValidator, @@ -345,11 +347,12 @@ func (s *Staker) Initialize(ctx context.Context) error { if walletAddressOrZero != (common.Address{}) { s.updateStakerBalanceMetric(ctx) } - if s.blockValidator != nil && s.config.StartValidationFromStaked { + if s.blockValidator != nil && s.config().StartValidationFromStaked { latestStaked, _, err := s.validatorUtils.LatestStaked(&s.baseCallOpts, s.rollupAddress, walletAddressOrZero) if err != nil { return err } + // #nosec G115 stakerLatestStakedNodeGauge.Update(int64(latestStaked)) if latestStaked == 0 { return nil @@ -369,7 +372,8 @@ func (s *Staker) Initialize(ctx context.Context) error { // based on the config, the wallet address, and the on-chain rollup designated fast confirmer. // Before this function, both variables should be their default (i.e. fast confirmation is disabled). func (s *Staker) setupFastConfirmation(ctx context.Context) error { - if !s.config.EnableFastConfirmation { + cfg := s.config() + if !cfg.EnableFastConfirmation { return nil } if s.wallet.Address() == nil { @@ -400,7 +404,7 @@ func (s *Staker) setupFastConfirmation(ctx context.Context) error { fastConfirmer, s.builder, s.wallet, - s.config.gasRefunder, + cfg.gasRefunder, s.l1Reader, ) if err != nil { @@ -516,8 +520,9 @@ func (s *Staker) Start(ctxIn context.Context) { } }() var err error - if common.HexToAddress(s.config.GasRefunderAddress) != (common.Address{}) { - gasRefunderBalance, err := s.client.BalanceAt(ctx, common.HexToAddress(s.config.GasRefunderAddress), nil) + cfg := s.config() + if common.HexToAddress(cfg.GasRefunderAddress) != (common.Address{}) { + gasRefunderBalance, err := s.client.BalanceAt(ctx, common.HexToAddress(cfg.GasRefunderAddress), nil) if err != nil { log.Warn("error fetching validator gas refunder balance", "err", err) } else { @@ -546,7 +551,7 @@ func (s *Staker) Start(ctxIn context.Context) { // Try to create another tx return 0 } - return s.config.StakerInterval + return cfg.StakerInterval } stakerActionFailureCounter.Inc(1) backoff *= 2 @@ -566,6 +571,7 @@ func (s *Staker) Start(ctxIn context.Context) { if err != nil && ctx.Err() == nil { log.Error("staker: error checking latest staked", "err", err) } + // #nosec G115 stakerLatestStakedNodeGauge.Update(int64(staked)) if stakedGlobalState != nil { for _, notifier := range s.stakedNotifiers { @@ -581,13 +587,14 @@ func (s *Staker) Start(ctxIn context.Context) { log.Error("staker: error checking latest confirmed", "err", err) } } + // #nosec G115 stakerLatestConfirmedNodeGauge.Update(int64(confirmed)) if confirmedGlobalState != nil { for _, notifier := range s.confirmedNotifiers { notifier.UpdateLatestConfirmed(confirmedMsgCount, *confirmedGlobalState) } } - return s.config.StakerInterval + return s.config().StakerInterval }) } @@ -608,6 +615,7 @@ func (s *Staker) IsWhitelisted(ctx context.Context) (bool, error) { } func (s *Staker) shouldAct(ctx context.Context) bool { + cfg := s.config() var gasPriceHigh = false var gasPriceFloat float64 gasPrice, err := s.client.SuggestGasPrice(ctx) @@ -615,7 +623,7 @@ func (s *Staker) shouldAct(ctx context.Context) bool { log.Warn("error getting gas price", "err", err) } else { gasPriceFloat = float64(gasPrice.Int64()) / 1e9 - if gasPriceFloat >= s.config.PostingStrategy.HighGasThreshold { + if gasPriceFloat >= cfg.PostingStrategy.HighGasThreshold { gasPriceHigh = true } } @@ -640,14 +648,14 @@ func (s *Staker) shouldAct(ctx context.Context) bool { // Clamp `s.highGasBlocksBuffer` to between 0 and HighGasDelayBlocks if s.highGasBlocksBuffer.Sign() < 0 { s.highGasBlocksBuffer.SetInt64(0) - } else if s.highGasBlocksBuffer.Cmp(big.NewInt(s.config.PostingStrategy.HighGasDelayBlocks)) > 0 { - s.highGasBlocksBuffer.SetInt64(s.config.PostingStrategy.HighGasDelayBlocks) + } else if s.highGasBlocksBuffer.Cmp(big.NewInt(cfg.PostingStrategy.HighGasDelayBlocks)) > 0 { + s.highGasBlocksBuffer.SetInt64(cfg.PostingStrategy.HighGasDelayBlocks) } if gasPriceHigh && s.highGasBlocksBuffer.Sign() > 0 { log.Warn( "not acting yet as gas price is high", "gasPrice", gasPriceFloat, - "highGasPriceConfig", s.config.PostingStrategy.HighGasThreshold, + "highGasPriceConfig", cfg.PostingStrategy.HighGasThreshold, "highGasBuffer", s.highGasBlocksBuffer, ) return false @@ -678,7 +686,8 @@ func (s *Staker) confirmDataPosterIsReady(ctx context.Context) error { } func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { - if s.config.strategy != WatchtowerStrategy { + cfg := s.config() + if cfg.strategy != WatchtowerStrategy { err := s.confirmDataPosterIsReady(ctx) if err != nil { return nil, err @@ -720,6 +729,7 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { if err != nil { return nil, fmt.Errorf("error getting latest staked node of own wallet %v: %w", walletAddressOrZero, err) } + // #nosec G115 stakerLatestStakedNodeGauge.Update(int64(latestStakedNodeNum)) if rawInfo != nil { rawInfo.LatestStakedNode = latestStakedNodeNum @@ -732,7 +742,7 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { StakeExists: rawInfo != nil, } - effectiveStrategy := s.config.strategy + effectiveStrategy := cfg.strategy nodesLinear, err := s.validatorUtils.AreUnresolvedNodesLinear(callOpts, s.rollupAddress) if err != nil { return nil, fmt.Errorf("error checking for rollup assertion fork: %w", err) @@ -760,7 +770,7 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { info.LatestStakedNodeHash = s.inactiveLastCheckedNode.hash } - if s.config.EnableFastConfirmation { + if cfg.EnableFastConfirmation { firstUnresolvedNode, err := s.rollup.FirstUnresolvedNode(callOpts) if err != nil { return nil, err @@ -799,7 +809,7 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { if s.builder.BuildingTransactionCount() > 0 { // Try to fast confirm previous nodes before working on new ones log.Info("fast confirming previous node", "node", firstUnresolvedNode) - return s.wallet.ExecuteTransactions(ctx, s.builder, s.config.gasRefunder) + return s.wallet.ExecuteTransactions(ctx, s.builder, cfg.gasRefunder) } } } @@ -886,7 +896,7 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { return nil, fmt.Errorf("error withdrawing staker funds from our staker %v: %w", walletAddressOrZero, err) } log.Info("removing old stake and withdrawing funds") - return s.wallet.ExecuteTransactions(ctx, s.builder, s.config.gasRefunder) + return s.wallet.ExecuteTransactions(ctx, s.builder, cfg.gasRefunder) } } @@ -940,7 +950,7 @@ func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { if info.StakerInfo == nil && info.StakeExists { log.Info("staking to execute transactions") } - return s.wallet.ExecuteTransactions(ctx, s.builder, s.config.gasRefunder) + return s.wallet.ExecuteTransactions(ctx, s.builder, cfg.gasRefunder) } func (s *Staker) handleConflict(ctx context.Context, info *StakerInfo) error { @@ -966,7 +976,7 @@ func (s *Staker) handleConflict(ctx context.Context, info *StakerInfo) error { *info.CurrentChallenge, s.statelessBlockValidator, latestConfirmedCreated, - s.config.ConfirmationBlocks, + s.config().ConfirmationBlocks, ) if err != nil { return fmt.Errorf("error creating challenge manager: %w", err) @@ -980,8 +990,9 @@ func (s *Staker) handleConflict(ctx context.Context, info *StakerInfo) error { } func (s *Staker) advanceStake(ctx context.Context, info *OurStakerInfo, effectiveStrategy StakerStrategy) error { + cfg := s.config() active := effectiveStrategy >= StakeLatestStrategy - action, wrongNodesExist, err := s.generateNodeAction(ctx, info, effectiveStrategy, &s.config) + action, wrongNodesExist, err := s.generateNodeAction(ctx, info, effectiveStrategy, cfg) if err != nil { return fmt.Errorf("error generating node action: %w", err) } @@ -995,7 +1006,7 @@ func (s *Staker) advanceStake(ctx context.Context, info *OurStakerInfo, effectiv switch action := action.(type) { case createNodeAction: - if wrongNodesExist && s.config.DisableChallenge { + if wrongNodesExist && cfg.DisableChallenge { log.Error("refusing to challenge assertion as config disables challenges") info.CanProgress = false return nil @@ -1192,7 +1203,7 @@ func (s *Staker) createConflict(ctx context.Context, info *StakerInfo) error { } func (s *Staker) Strategy() StakerStrategy { - return s.config.strategy + return s.config().strategy } func (s *Staker) Rollup() *RollupWatcher { diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index e8232264fe..d5eeb8eb69 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -7,12 +7,12 @@ import ( "context" "errors" "fmt" - "runtime" "testing" "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" @@ -134,7 +134,7 @@ type validationEntry struct { DelayedMsg []byte } -func (e *validationEntry) ToInput(stylusArchs []string) (*validator.ValidationInput, error) { +func (e *validationEntry) ToInput(stylusArchs []rawdb.Target) (*validator.ValidationInput, error) { if e.Stage != Ready { return nil, errors.New("cannot create input from non-ready entry") } @@ -143,21 +143,22 @@ func (e *validationEntry) ToInput(stylusArchs []string) (*validator.ValidationIn HasDelayedMsg: e.HasDelayedMsg, DelayedMsgNr: e.DelayedMsgNr, Preimages: e.Preimages, - UserWasms: make(map[string]map[common.Hash][]byte, len(e.UserWasms)), + UserWasms: make(map[rawdb.Target]map[common.Hash][]byte, len(e.UserWasms)), BatchInfo: e.BatchInfo, DelayedMsg: e.DelayedMsg, StartState: e.Start, DebugChain: e.ChainConfig.DebugMode(), } + if len(stylusArchs) == 0 && len(e.UserWasms) > 0 { + return nil, fmt.Errorf("stylus support is required") + } for _, stylusArch := range stylusArchs { res.UserWasms[stylusArch] = make(map[common.Hash][]byte) } - for hash, info := range e.UserWasms { + for hash, asmMap := range e.UserWasms { for _, stylusArch := range stylusArchs { - if stylusArch == "wavm" { - res.UserWasms[stylusArch][hash] = info.Module - } else if stylusArch == runtime.GOARCH { - res.UserWasms[stylusArch][hash] = info.Asm + if asm, exists := asmMap[stylusArch]; exists { + res.UserWasms[stylusArch][hash] = asm } else { return nil, fmt.Errorf("stylusArch not supported by block validator: %v", stylusArch) } diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index bd0a1f3336..eef6c29b7a 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -259,6 +259,7 @@ func testBlockValidatorSimple(t *testing.T, opts Options) { Require(t, err) // up to 3 extra references: awaiting validation, recently valid, lastValidatedHeader largestRefCount := lastBlockNow.NumberU64() - lastBlock.NumberU64() + 3 + // #nosec G115 if finalRefCount < 0 || finalRefCount > int64(largestRefCount) { Fatal(t, "unexpected refcount:", finalRefCount) } diff --git a/system_tests/common_test.go b/system_tests/common_test.go index e14eb45a27..6e7375a921 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -204,6 +204,7 @@ func ExecConfigDefaultNonSequencerTest() *gethexec.Config { config.Sequencer.Enable = false config.Forwarder = DefaultTestForwarderConfig config.ForwardingTarget = "null" + config.TxPreChecker.Strictness = gethexec.TxPreCheckerStrictnessNone _ = config.Validate() @@ -216,6 +217,7 @@ func ExecConfigDefaultTest() *gethexec.Config { config.Sequencer = TestSequencerConfig config.ParentChainReader = headerreader.TestConfig config.ForwardingTarget = "null" + config.TxPreChecker.Strictness = gethexec.TxPreCheckerStrictnessNone _ = config.Validate() diff --git a/system_tests/contract_tx_test.go b/system_tests/contract_tx_test.go index 7d66e516b4..c1ef840c43 100644 --- a/system_tests/contract_tx_test.go +++ b/system_tests/contract_tx_test.go @@ -51,6 +51,7 @@ func TestContractTxDeploy(t *testing.T) { 0xF3, // RETURN } var requestId common.Hash + // #nosec G115 requestId[0] = uint8(stateNonce) contractTx := &types.ArbitrumContractTx{ ChainId: params.ArbitrumDevTestChainConfig().ChainID, diff --git a/system_tests/fast_confirm_test.go b/system_tests/fast_confirm_test.go index f05219d994..4a679e5077 100644 --- a/system_tests/fast_confirm_test.go +++ b/system_tests/fast_confirm_test.go @@ -160,7 +160,7 @@ func TestFastConfirmation(t *testing.T) { l2node.L1Reader, valWallet, bind.CallOpts{}, - valConfig, + func() *staker.L1ValidatorConfig { return &valConfig }, nil, stateless, nil, @@ -317,8 +317,8 @@ func TestFastConfirmationWithSafe(t *testing.T) { _, err = builder.L1.EnsureTxSucceeded(tx) Require(t, err) - valConfig := staker.TestL1ValidatorConfig - valConfig.EnableFastConfirmation = true + valConfigA := staker.TestL1ValidatorConfig + valConfigA.EnableFastConfirmation = true parentChainID, err := builder.L1.Client.ChainID(ctx) if err != nil { @@ -335,9 +335,9 @@ func TestFastConfirmationWithSafe(t *testing.T) { if err != nil { t.Fatalf("Error creating validator dataposter: %v", err) } - valWalletA, err := validatorwallet.NewContract(dpA, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, &l1authA, 0, func(common.Address) {}, func() uint64 { return valConfig.ExtraGas }) + valWalletA, err := validatorwallet.NewContract(dpA, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, &l1authA, 0, func(common.Address) {}, func() uint64 { return valConfigA.ExtraGas }) Require(t, err) - valConfig.Strategy = "MakeNodes" + valConfigA.Strategy = "MakeNodes" _, valStack := createTestValidationNode(t, ctx, &valnode.TestValidationConfig) blockValidatorConfig := staker.TestBlockValidatorConfig @@ -361,7 +361,7 @@ func TestFastConfirmationWithSafe(t *testing.T) { l2nodeA.L1Reader, valWalletA, bind.CallOpts{}, - valConfig, + func() *staker.L1ValidatorConfig { return &valConfigA }, nil, statelessA, nil, @@ -391,7 +391,9 @@ func TestFastConfirmationWithSafe(t *testing.T) { } valWalletB, err := validatorwallet.NewEOA(dpB, l2nodeB.DeployInfo.Rollup, l2nodeB.L1Reader.Client(), func() uint64 { return 0 }) Require(t, err) - valConfig.Strategy = "watchtower" + valConfigB := staker.TestL1ValidatorConfig + valConfigB.EnableFastConfirmation = true + valConfigB.Strategy = "watchtower" statelessB, err := staker.NewStatelessBlockValidator( l2nodeB.InboxReader, l2nodeB.InboxTracker, @@ -411,7 +413,7 @@ func TestFastConfirmationWithSafe(t *testing.T) { l2nodeB.L1Reader, valWalletB, bind.CallOpts{}, - valConfig, + func() *staker.L1ValidatorConfig { return &valConfigB }, nil, statelessB, nil, diff --git a/system_tests/forwarder_test.go b/system_tests/forwarder_test.go index 9fe419593e..f87283432e 100644 --- a/system_tests/forwarder_test.go +++ b/system_tests/forwarder_test.go @@ -246,6 +246,7 @@ func TestRedisForwarder(t *testing.T) { for i := range seqClients { userA := user("A", i) builder.L2Info.GenerateAccount(userA) + // #nosec G115 tx := builder.L2Info.PrepareTx("Owner", userA, builder.L2Info.TransferGas, big.NewInt(1e12+int64(builder.L2Info.TransferGas)*builder.L2Info.GasPrice.Int64()), nil) err := fallbackClient.SendTransaction(ctx, tx) Require(t, err) diff --git a/system_tests/initialization_test.go b/system_tests/initialization_test.go index f0797404a9..17e020e6ab 100644 --- a/system_tests/initialization_test.go +++ b/system_tests/initialization_test.go @@ -21,6 +21,7 @@ func InitOneContract(prand *testhelpers.PseudoRandomDataSource) (*statetransfer. storageMap := make(map[common.Hash]common.Hash) code := []byte{0x60, 0x0} // PUSH1 0 sum := big.NewInt(0) + // #nosec G115 numCells := int(prand.GetUint64() % 1000) for i := 0; i < numCells; i++ { storageAddr := prand.GetHash() diff --git a/system_tests/outbox_test.go b/system_tests/outbox_test.go index 739d756a31..c68df6ea22 100644 --- a/system_tests/outbox_test.go +++ b/system_tests/outbox_test.go @@ -146,6 +146,7 @@ func TestOutboxProofs(t *testing.T) { treeSize := root.size balanced := treeSize == arbmath.NextPowerOf2(treeSize)/2 + // #nosec G115 treeLevels := int(arbmath.Log2ceil(treeSize)) // the # of levels in the tree proofLevels := treeLevels - 1 // the # of levels where a hash is needed (all but root) walkLevels := treeLevels // the # of levels we need to consider when building walks diff --git a/system_tests/program_recursive_test.go b/system_tests/program_recursive_test.go index dbf527a293..e928f9f3aa 100644 --- a/system_tests/program_recursive_test.go +++ b/system_tests/program_recursive_test.go @@ -154,6 +154,7 @@ func testProgramResursiveCalls(t *testing.T, tests [][]multiCallRecurse, jit boo // execute transactions blockNum := uint64(0) for { + // #nosec G115 item := int(rander.GetUint64()/4) % len(tests) blockNum = testProgramRecursiveCall(t, builder, slotVals, rander, tests[item]) tests[item] = tests[len(tests)-1] diff --git a/system_tests/program_test.go b/system_tests/program_test.go index ae34c6c5bb..ed640809db 100644 --- a/system_tests/program_test.go +++ b/system_tests/program_test.go @@ -41,7 +41,6 @@ import ( "github.com/offchainlabs/nitro/util/colors" "github.com/offchainlabs/nitro/util/testhelpers" "github.com/offchainlabs/nitro/validator/valnode" - "github.com/wasmerio/wasmer-go/wasmer" ) var oneEth = arbmath.UintToBig(1e18) @@ -583,6 +582,7 @@ func testCalls(t *testing.T, jit bool) { for i := 0; i < 2; i++ { inner := nest(level - 1) + // #nosec G115 args = append(args, arbmath.Uint32ToBytes(uint32(len(inner)))...) args = append(args, inner...) } @@ -638,6 +638,7 @@ func testCalls(t *testing.T, jit bool) { colors.PrintBlue("Calling the ArbosTest precompile (Rust => precompile)") testPrecompile := func(gas uint64) uint64 { // Call the burnArbGas() precompile from Rust + // #nosec G115 burn := pack(burnArbGas(big.NewInt(int64(gas)))) args := argsForMulticall(vm.CALL, types.ArbosTestAddress, nil, burn) tx := l2info.PrepareTxTo("Owner", &callsAddr, 1e9, nil, args) @@ -651,6 +652,7 @@ func testCalls(t *testing.T, jit bool) { large := testPrecompile(largeGas) if !arbmath.Within(large-small, largeGas-smallGas, 2) { + // #nosec G115 ratio := float64(int64(large)-int64(small)) / float64(int64(largeGas)-int64(smallGas)) Fatal(t, "inconsistent burns", large, small, largeGas, smallGas, ratio) } @@ -1528,9 +1530,10 @@ func readWasmFile(t *testing.T, file string) ([]byte, []byte) { Require(t, err) // chose a random dictionary for testing, but keep the same files consistent + // #nosec G115 randDict := arbcompress.Dictionary((len(file) + len(t.Name())) % 2) - wasmSource, err := wasmer.Wat2Wasm(string(source)) + wasmSource, err := programs.Wat2Wasm(source) Require(t, err) wasm, err := arbcompress.Compress(wasmSource, arbcompress.LEVEL_WELL, randDict) Require(t, err) @@ -1598,6 +1601,7 @@ func argsForMulticall(opcode vm.OpCode, address common.Address, value *big.Int, if opcode == vm.CALL { length += 32 } + // #nosec G115 args = append(args, arbmath.Uint32ToBytes(uint32(length))...) args = append(args, kinds[opcode]) if opcode == vm.CALL { diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index 09d53669ee..cd3904ca06 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -132,6 +132,7 @@ func TestRecreateStateForRPCNoDepthLimit(t *testing.T) { func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + // #nosec G115 depthGasLimit := int64(256 * util.NormalizeL2GasForL1GasInitial(800_000, params.GWei)) execConfig := ExecConfigDefaultTest() execConfig.RPC.MaxRecreateStateDepth = depthGasLimit @@ -407,6 +408,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig gas = 0 blocks = 0 } else { + // #nosec G115 if int(i) >= int(lastBlock)-int(cacheConfig.BlockCount) { // skipping nonexistence check - the state might have been saved on node shutdown continue @@ -471,6 +473,7 @@ func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { for _, skipGas := range skipGasValues { for _, skipBlocks := range skipBlockValues[:len(skipBlockValues)-2] { cacheConfig.MaxAmountOfGasToSkipStateSaving = skipGas + // #nosec G115 cacheConfig.MaxNumberOfBlocksToSkipStateSaving = uint32(skipBlocks) testSkippingSavingStateAndRecreatingAfterRestart(t, &cacheConfig, 100) } @@ -495,6 +498,7 @@ func TestGettingStateForRPCFullNode(t *testing.T) { if header == nil { Fatal(t, "failed to get current block header") } + // #nosec G115 state, _, err := api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) Require(t, err) addr := builder.L2Info.GetAddress("User2") @@ -505,6 +509,7 @@ func TestGettingStateForRPCFullNode(t *testing.T) { Fatal(t, "User2 address does not exist in the state") } // Get the state again to avoid caching + // #nosec G115 state, _, err = api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) Require(t, err) @@ -542,6 +547,7 @@ func TestGettingStateForRPCHybridArchiveNode(t *testing.T) { if header == nil { Fatal(t, "failed to get current block header") } + // #nosec G115 state, _, err := api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) Require(t, err) addr := builder.L2Info.GetAddress("User2") @@ -552,6 +558,7 @@ func TestGettingStateForRPCHybridArchiveNode(t *testing.T) { Fatal(t, "User2 address does not exist in the state") } // Get the state again to avoid caching + // #nosec G115 state, _, err = api.StateAndHeaderByNumber(ctx, rpc.BlockNumber(header.Number.Uint64())) Require(t, err) diff --git a/system_tests/seq_nonce_test.go b/system_tests/seq_nonce_test.go index 72629e1978..c099563e29 100644 --- a/system_tests/seq_nonce_test.go +++ b/system_tests/seq_nonce_test.go @@ -111,6 +111,7 @@ func TestSequencerNonceTooHighQueueFull(t *testing.T) { } for wait := 9; wait >= 0; wait-- { + // #nosec G115 got := int(completed.Load()) expected := count - builder.execConfig.Sequencer.NonceFailureCacheSize if got == expected { diff --git a/system_tests/seqinbox_test.go b/system_tests/seqinbox_test.go index 4dc8f4a664..6babe5833f 100644 --- a/system_tests/seqinbox_test.go +++ b/system_tests/seqinbox_test.go @@ -229,6 +229,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { reorgTargetNumber := blockStates[reorgTo].l1BlockNumber currentHeader, err := builder.L1.Client.HeaderByNumber(ctx, nil) Require(t, err) + // #nosec G115 if currentHeader.Number.Int64()-int64(reorgTargetNumber) < 65 { Fatal(t, "Less than 65 blocks of difference between current block", currentHeader.Number, "and target", reorgTargetNumber) } @@ -346,7 +347,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { BridgeAddr: builder.L1Info.GetAddress("Bridge"), DataPosterAddr: seqOpts.From, GasRefunderAddr: gasRefunderAddr, - SequencerInboxAccs: len(blockStates), + SequencerInboxAccs: uint64(len(blockStates)), AfterDelayedMessagesRead: 1, }) if diff := diffAccessList(accessed, *wantAL); diff != "" { @@ -374,6 +375,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { t.Fatalf("BalanceAt(%v) unexpected error: %v", seqOpts.From, err) } txCost := txRes.EffectiveGasPrice.Uint64() * txRes.GasUsed + // #nosec G115 if diff := before.Int64() - after.Int64(); diff >= int64(txCost) { t.Errorf("Transaction: %v was not refunded, balance diff: %v, cost: %v", tx.Hash(), diff, txCost) } @@ -424,11 +426,13 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { } for _, state := range blockStates { + // #nosec G115 block, err := l2Backend.APIBackend().BlockByNumber(ctx, rpc.BlockNumber(state.l2BlockNumber)) Require(t, err) if block == nil { Fatal(t, "missing state block", state.l2BlockNumber) } + // #nosec G115 stateDb, _, err := l2Backend.APIBackend().StateAndHeaderByNumber(ctx, rpc.BlockNumber(state.l2BlockNumber)) Require(t, err) for acct, expectedBalance := range state.balances { diff --git a/system_tests/snap_sync_test.go b/system_tests/snap_sync_test.go index a04d9f5bf3..7462b5f5f0 100644 --- a/system_tests/snap_sync_test.go +++ b/system_tests/snap_sync_test.go @@ -92,8 +92,10 @@ func TestSnapSync(t *testing.T) { waitForBlockToCatchupToMessageCount(ctx, t, nodeC.Client, finalMessageCount) // Fetching message count - 1 instead on the latest block number as the latest block number might not be // present in the snap sync node since it does not have the sequencer feed. + // #nosec G115 header, err := builder.L2.Client.HeaderByNumber(ctx, big.NewInt(int64(finalMessageCount)-1)) Require(t, err) + // #nosec G115 headerNodeC, err := nodeC.Client.HeaderByNumber(ctx, big.NewInt(int64(finalMessageCount)-1)) Require(t, err) // Once the node is synced up, check if the block hash is the same for the last block diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index f57b68ad8a..03c9fd3628 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -166,7 +166,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) validatorUtils, err := rollupgen.NewValidatorUtils(l2nodeA.DeployInfo.ValidatorUtils, builder.L1.Client) Require(t, err) - valConfig := staker.TestL1ValidatorConfig + valConfigA := staker.TestL1ValidatorConfig parentChainID, err := builder.L1.Client.ChainID(ctx) if err != nil { t.Fatalf("Failed to get parent chain id: %v", err) @@ -182,12 +182,12 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) if err != nil { t.Fatalf("Error creating validator dataposter: %v", err) } - valWalletA, err := validatorwallet.NewContract(dpA, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, &l1authA, 0, func(common.Address) {}, func() uint64 { return valConfig.ExtraGas }) + valWalletA, err := validatorwallet.NewContract(dpA, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, &l1authA, 0, func(common.Address) {}, func() uint64 { return valConfigA.ExtraGas }) Require(t, err) if honestStakerInactive { - valConfig.Strategy = "Defensive" + valConfigA.Strategy = "Defensive" } else { - valConfig.Strategy = "MakeNodes" + valConfigA.Strategy = "MakeNodes" } _, valStack := createTestValidationNode(t, ctx, &valnode.TestValidationConfig) @@ -210,7 +210,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) l2nodeA.L1Reader, valWalletA, bind.CallOpts{}, - valConfig, + func() *staker.L1ValidatorConfig { return &valConfigA }, nil, statelessA, nil, @@ -244,7 +244,8 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) } valWalletB, err := validatorwallet.NewEOA(dpB, l2nodeB.DeployInfo.Rollup, l2nodeB.L1Reader.Client(), func() uint64 { return 0 }) Require(t, err) - valConfig.Strategy = "MakeNodes" + valConfigB := staker.TestL1ValidatorConfig + valConfigB.Strategy = "MakeNodes" statelessB, err := staker.NewStatelessBlockValidator( l2nodeB.InboxReader, l2nodeB.InboxTracker, @@ -262,7 +263,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) l2nodeB.L1Reader, valWalletB, bind.CallOpts{}, - valConfig, + func() *staker.L1ValidatorConfig { return &valConfigB }, nil, statelessB, nil, @@ -278,12 +279,13 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) Require(t, err) } valWalletC := validatorwallet.NewNoOp(builder.L1.Client, l2nodeA.DeployInfo.Rollup) - valConfig.Strategy = "Watchtower" + valConfigC := staker.TestL1ValidatorConfig + valConfigC.Strategy = "Watchtower" stakerC, err := staker.NewStaker( l2nodeA.L1Reader, valWalletC, bind.CallOpts{}, - valConfig, + func() *staker.L1ValidatorConfig { return &valConfigC }, nil, statelessA, nil, diff --git a/system_tests/stylus_tracer_test.go b/system_tests/stylus_tracer_test.go new file mode 100644 index 0000000000..3b95f38d21 --- /dev/null +++ b/system_tests/stylus_tracer_test.go @@ -0,0 +1,244 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +package arbtest + +import ( + "encoding/binary" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/google/go-cmp/cmp" + "github.com/offchainlabs/nitro/execution/gethexec" + "github.com/offchainlabs/nitro/solgen/go/mocksgen" + "github.com/offchainlabs/nitro/util/stack" + "github.com/offchainlabs/nitro/util/testhelpers" +) + +func TestStylusTracer(t *testing.T) { + const jit = false + builder, auth, cleanup := setupProgramTest(t, jit) + ctx := builder.ctx + l2client := builder.L2.Client + l2info := builder.L2Info + rpcClient := builder.L2.Client.Client() + defer cleanup() + + traceTransaction := func(tx common.Hash, tracer string) []gethexec.HostioTraceInfo { + traceOpts := struct { + Tracer string `json:"tracer"` + }{ + Tracer: tracer, + } + var result []gethexec.HostioTraceInfo + err := rpcClient.CallContext(ctx, &result, "debug_traceTransaction", tx, traceOpts) + Require(t, err, "trace transaction") + return result + } + + // Deploy contracts + stylusMulticall := deployWasm(t, ctx, auth, l2client, rustFile("multicall")) + evmMulticall, tx, _, err := mocksgen.DeployMultiCallTest(&auth, builder.L2.Client) + Require(t, err, "deploy evm multicall") + _, err = EnsureTxSucceeded(ctx, l2client, tx) + Require(t, err, "ensure evm multicall deployment") + + // Args for tests + key := testhelpers.RandomHash() + value := testhelpers.RandomHash() + loadStoreArgs := multicallEmptyArgs() + loadStoreArgs = multicallAppendStore(loadStoreArgs, key, value, false) + loadStoreArgs = multicallAppendLoad(loadStoreArgs, key, false) + callArgs := argsForMulticall(vm.CALL, stylusMulticall, nil, []byte{0}) + evmCall := argsForMulticall(vm.CALL, evmMulticall, nil, []byte{0}) + + for _, testCase := range []struct { + name string + contract common.Address + args []byte + want []gethexec.HostioTraceInfo + }{ + { + name: "non-recursive hostios", + contract: stylusMulticall, + args: loadStoreArgs, + want: []gethexec.HostioTraceInfo{ + {Name: "user_entrypoint", Args: intToBe32(len(loadStoreArgs))}, + {Name: "pay_for_memory_grow", Args: []byte{0x00, 0x01}}, + {Name: "read_args", Outs: loadStoreArgs}, + {Name: "storage_cache_bytes32", Args: append(key.Bytes(), value.Bytes()...)}, + {Name: "storage_flush_cache", Args: []byte{0x00}}, + {Name: "storage_load_bytes32", Args: key.Bytes(), Outs: value.Bytes()}, + {Name: "storage_flush_cache", Args: []byte{0x00}}, + {Name: "write_result", Args: value.Bytes()}, + {Name: "user_returned", Outs: intToBe32(0)}, + }, + }, + + { + name: "call stylus contract", + contract: stylusMulticall, + args: callArgs, + want: []gethexec.HostioTraceInfo{ + {Name: "user_entrypoint", Args: intToBe32(len(callArgs))}, + {Name: "pay_for_memory_grow", Args: []byte{0x00, 0x01}}, + {Name: "read_args", Outs: callArgs}, + { + Name: "call_contract", + Args: append(stylusMulticall.Bytes(), common.Hex2Bytes("ffffffffffffffff000000000000000000000000000000000000000000000000000000000000000000")...), + Outs: common.Hex2Bytes("0000000000"), + Address: &stylusMulticall, + Steps: (*stack.Stack[gethexec.HostioTraceInfo])(&[]gethexec.HostioTraceInfo{ + {Name: "user_entrypoint", Args: intToBe32(1)}, + {Name: "pay_for_memory_grow", Args: []byte{0x00, 0x01}}, + {Name: "read_args", Outs: []byte{0x00}}, + {Name: "storage_flush_cache", Args: []byte{0x00}}, + {Name: "write_result"}, + {Name: "user_returned", Outs: intToBe32(0)}, + }), + }, + {Name: "storage_flush_cache", Args: []byte{0x00}}, + {Name: "write_result"}, + {Name: "user_returned", Outs: intToBe32(0)}, + }, + }, + + { + name: "call evm contract", + contract: stylusMulticall, + args: evmCall, + want: []gethexec.HostioTraceInfo{ + {Name: "user_entrypoint", Args: intToBe32(len(callArgs))}, + {Name: "pay_for_memory_grow", Args: []byte{0x00, 0x01}}, + {Name: "read_args", Outs: evmCall}, + { + Name: "call_contract", + Args: append(evmMulticall.Bytes(), common.Hex2Bytes("ffffffffffffffff000000000000000000000000000000000000000000000000000000000000000000")...), + Outs: common.Hex2Bytes("0000000000"), + Address: &evmMulticall, + Steps: stack.NewStack[gethexec.HostioTraceInfo](), + }, + {Name: "storage_flush_cache", Args: []byte{0x00}}, + {Name: "write_result"}, + {Name: "user_returned", Outs: intToBe32(0)}, + }, + }, + + { + name: "evm contract calling wasm", + contract: evmMulticall, + args: callArgs, + want: []gethexec.HostioTraceInfo{ + { + Name: "evm_call_contract", + Address: &stylusMulticall, + Steps: (*stack.Stack[gethexec.HostioTraceInfo])(&[]gethexec.HostioTraceInfo{ + {Name: "user_entrypoint", Args: intToBe32(1)}, + {Name: "pay_for_memory_grow", Args: []byte{0x00, 0x01}}, + {Name: "read_args", Outs: []byte{0x00}}, + {Name: "storage_flush_cache", Args: []byte{0x00}}, + {Name: "write_result"}, + {Name: "user_returned", Outs: intToBe32(0)}, + }), + }, + }, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + to := testCase.contract + tx := l2info.PrepareTxTo("Owner", &to, l2info.TransferGas, nil, testCase.args) + err := l2client.SendTransaction(ctx, tx) + Require(t, err, "send transaction") + + nativeResult := traceTransaction(tx.Hash(), "stylusTracer") + normalizeHostioTrace(nativeResult) + if diff := cmp.Diff(testCase.want, nativeResult); diff != "" { + Fatal(t, "native tracer don't match wanted result", diff) + } + + jsResult := traceTransaction(tx.Hash(), jsStylusTracer) + normalizeHostioTrace(jsResult) + if diff := cmp.Diff(jsResult, nativeResult); diff != "" { + Fatal(t, "native tracer don't match js trace", diff) + } + }) + } +} + +func intToBe32(v int) []byte { + // #nosec G115 + return binary.BigEndian.AppendUint32(nil, uint32(v)) +} + +// normalize removes the start and end ink values from the trace so we can compare them. +// In Arbitrum, the gas used by the transaction varies depending on the L1 fees, so the trace +// returns different gas values and we can't hardcode them. +func normalizeHostioTrace(trace []gethexec.HostioTraceInfo) { + for i := range trace { + trace[i].StartInk = 0 + trace[i].EndInk = 0 + if len(trace[i].Args) == 0 { + trace[i].Args = nil + } + if len(trace[i].Outs) == 0 { + trace[i].Outs = nil + } + if trace[i].Steps != nil { + normalizeHostioTrace(*trace[i].Steps) + } + } +} + +var jsStylusTracer = ` +{ + "hostio": function(info) { + info.args = toHex(info.args); + info.outs = toHex(info.outs); + if (this.nests.includes(info.name)) { + Object.assign(info, this.open.pop()); + info.name = info.name.substring(4) // remove evm_ + } + this.open.push(info); + }, + "enter": function(frame) { + let inner = []; + let name = ""; + switch (frame.getType()) { + case "CALL": + name = "evm_call_contract"; + break; + case "DELEGATECALL": + name = "evm_delegate_call_contract"; + break; + case "STATICCALL": + name = "evm_static_call_contract"; + break; + case "CREATE": + name = "evm_create1"; + break; + case "CREATE2": + name = "evm_create2"; + break; + case "SELFDESTRUCT": + name = "evm_self_destruct"; + break; + } + this.open.push({ + address: toHex(frame.getTo()), + steps: inner, + name: name, + }); + this.stack.push(this.open); // save where we were + this.open = inner; + }, + "exit": function(result) { + this.open = this.stack.pop(); + }, + "result": function() { return this.open; }, + "fault": function() { return this.open; }, + stack: [], + open: [], + nests: ["call_contract", "delegate_call_contract", "static_call_contract"] +} +` diff --git a/system_tests/twonodeslong_test.go b/system_tests/twonodeslong_test.go index 83cd975dd8..60707b83fb 100644 --- a/system_tests/twonodeslong_test.go +++ b/system_tests/twonodeslong_test.go @@ -63,6 +63,7 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { builder.L2Info.GenerateAccount("ErrorTxSender") builder.L2.SendWaitTestTransactions(t, []*types.Transaction{ + // #nosec G115 builder.L2Info.PrepareTx("Faucet", "ErrorTxSender", builder.L2Info.TransferGas, big.NewInt(l2pricing.InitialBaseFeeWei*int64(builder.L2Info.TransferGas)), nil), }) diff --git a/system_tests/unsupported_txtypes_test.go b/system_tests/unsupported_txtypes_test.go index 4c3c8661c8..a228cb2454 100644 --- a/system_tests/unsupported_txtypes_test.go +++ b/system_tests/unsupported_txtypes_test.go @@ -112,8 +112,8 @@ func TestBlobAndInternalTxsAsDelayedMsgReject(t *testing.T) { blocknum, err := builder.L2.Client.BlockNumber(ctx) Require(t, err) - for i := int64(0); i <= int64(blocknum); i++ { - block, err := builder.L2.Client.BlockByNumber(ctx, big.NewInt(i)) + for i := uint64(0); i <= blocknum; i++ { + block, err := builder.L2.Client.BlockByNumber(ctx, new(big.Int).SetUint64(i)) Require(t, err) for _, tx := range block.Transactions() { if _, ok := txAcceptStatus[tx.Hash()]; ok { diff --git a/system_tests/validation_mock_test.go b/system_tests/validation_mock_test.go index 2c6321d009..88421e4c4b 100644 --- a/system_tests/validation_mock_test.go +++ b/system_tests/validation_mock_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/node" @@ -60,8 +61,8 @@ func (s *mockSpawner) WasmModuleRoots() ([]common.Hash, error) { return mockWasmModuleRoots, nil } -func (s *mockSpawner) StylusArchs() []string { - return []string{"mock"} +func (s *mockSpawner) StylusArchs() []rawdb.Target { + return []rawdb.Target{"mock"} } func (s *mockSpawner) Launch(entry *validator.ValidationInput, moduleRoot common.Hash) validator.ValidationRun { diff --git a/util/arbmath/bips.go b/util/arbmath/bips.go index 8b7c47d82b..646dad3a92 100644 --- a/util/arbmath/bips.go +++ b/util/arbmath/bips.go @@ -20,7 +20,7 @@ func PercentToBips(percentage int64) Bips { } func BigToBips(natural *big.Int) Bips { - return Bips(natural.Uint64()) + return Bips(natural.Int64()) } func BigMulByBips(value *big.Int, bips Bips) *big.Int { @@ -51,5 +51,5 @@ func (bips Bips) Uint64() uint64 { func BigDivToBips(dividend, divisor *big.Int) Bips { value := BigMulByInt(dividend, int64(OneInBips)) value.Div(value, divisor) - return Bips(BigToUintSaturating(value)) + return Bips(BigToIntSaturating(value)) } diff --git a/util/arbmath/math.go b/util/arbmath/math.go index 62af1e26e0..e5bed67f6d 100644 --- a/util/arbmath/math.go +++ b/util/arbmath/math.go @@ -117,6 +117,18 @@ func BigToUintSaturating(value *big.Int) uint64 { return value.Uint64() } +// BigToUintSaturating casts a huge to an int, saturating if out of bounds +func BigToIntSaturating(value *big.Int) int64 { + if !value.IsInt64() { + if value.Sign() < 0 { + return math.MinInt64 + } else { + return math.MaxInt64 + } + } + return value.Int64() +} + // BigToUintOrPanic casts a huge to a uint, panicking if out of bounds func BigToUintOrPanic(value *big.Int) uint64 { if value.Sign() < 0 { @@ -260,10 +272,12 @@ func BigFloatMulByUint(multiplicand *big.Float, multiplier uint64) *big.Float { } func MaxSignedValue[T Signed]() T { + // #nosec G115 return T((uint64(1) << (8*unsafe.Sizeof(T(0)) - 1)) - 1) } func MinSignedValue[T Signed]() T { + // #nosec G115 return T(uint64(1) << ((8 * unsafe.Sizeof(T(0))) - 1)) } diff --git a/util/arbmath/math_test.go b/util/arbmath/math_test.go index 1be60dc58b..528666dc19 100644 --- a/util/arbmath/math_test.go +++ b/util/arbmath/math_test.go @@ -35,6 +35,7 @@ func TestMath(t *testing.T) { input := rand.Uint64() / 256 approx := ApproxSquareRoot(input) correct := math.Sqrt(float64(input)) + // #nosec G115 diff := int(approx) - int(correct) if diff < -1 || diff > 1 { Fail(t, "sqrt approximation off by too much", diff, input, approx, correct) @@ -46,6 +47,7 @@ func TestMath(t *testing.T) { input := uint64(i) approx := ApproxSquareRoot(input) correct := math.Sqrt(float64(input)) + // #nosec G115 diff := int(approx) - int(correct) if diff < 0 || diff > 1 { Fail(t, "sqrt approximation off by too much", diff, input, approx, correct) @@ -57,6 +59,7 @@ func TestMath(t *testing.T) { input := uint64(1 << i) approx := ApproxSquareRoot(input) correct := math.Sqrt(float64(input)) + // #nosec G115 diff := int(approx) - int(correct) if diff != 0 { Fail(t, "incorrect", "2^", i, diff, approx, correct) diff --git a/util/arbmath/uint24.go b/util/arbmath/uint24.go index 818f871a23..a0c5aa27b7 100644 --- a/util/arbmath/uint24.go +++ b/util/arbmath/uint24.go @@ -9,10 +9,10 @@ import ( "math/big" ) -const MaxUint24 = 1<<24 - 1 // 16777215 - type Uint24 uint32 +const MaxUint24 = 1<<24 - 1 // 16777215 + func (value Uint24) ToBig() *big.Int { return UintToBig(uint64(value)) } @@ -26,8 +26,9 @@ func (value Uint24) ToUint64() uint64 { } func IntToUint24[T uint32 | uint64](value T) (Uint24, error) { + // #nosec G115 if value > T(MaxUint24) { - return Uint24(MaxUint24), errors.New("value out of range") + return MaxUint24, errors.New("value out of range") } return Uint24(value), nil } @@ -40,6 +41,7 @@ func BigToUint24OrPanic(value *big.Int) Uint24 { if !value.IsUint64() || value.Uint64() > MaxUint24 { panic("big.Int value exceeds the max Uint24") } + // #nosec G115 return Uint24(value.Uint64()) } diff --git a/util/blobs/blobs.go b/util/blobs/blobs.go index 405c776bad..f5914edd2e 100644 --- a/util/blobs/blobs.go +++ b/util/blobs/blobs.go @@ -41,6 +41,7 @@ func fillBlobBits(blob []byte, data []byte) ([]byte, error) { accBits += 8 data = data[1:] } + // #nosec G115 blob[fieldElement*32] = uint8(acc & ((1 << spareBlobBits) - 1)) accBits -= spareBlobBits if accBits < 0 { @@ -88,6 +89,7 @@ func DecodeBlobs(blobs []kzg4844.Blob) ([]byte, error) { acc |= uint16(blob[fieldIndex*32]) << accBits accBits += spareBlobBits if accBits >= 8 { + // #nosec G115 rlpData = append(rlpData, uint8(acc)) acc >>= 8 accBits -= 8 diff --git a/util/headerreader/header_reader.go b/util/headerreader/header_reader.go index 074d24338e..c8041dc871 100644 --- a/util/headerreader/header_reader.go +++ b/util/headerreader/header_reader.go @@ -340,6 +340,7 @@ func (s *HeaderReader) logIfHeaderIsOld() { if storedHeader == nil { return } + // #nosec G115 l1Timetamp := time.Unix(int64(storedHeader.Time), 0) headerTime := time.Since(l1Timetamp) if headerTime >= s.config().OldHeaderTimeout { diff --git a/util/merkletree/merkleTree.go b/util/merkletree/merkleTree.go index 1b15d51d98..fffa9bcabc 100644 --- a/util/merkletree/merkleTree.go +++ b/util/merkletree/merkleTree.go @@ -43,8 +43,8 @@ func NewLevelAndLeaf(level, leaf uint64) LevelAndLeaf { func (place LevelAndLeaf) ToBigInt() *big.Int { return new(big.Int).Add( - new(big.Int).Lsh(big.NewInt(int64(place.Level)), 192), - big.NewInt(int64(place.Leaf)), + new(big.Int).Lsh(new(big.Int).SetUint64(place.Level), 192), + new(big.Int).SetUint64(place.Leaf), ) } diff --git a/util/rpcclient/rpcclient.go b/util/rpcclient/rpcclient.go index be5825a28d..a35d4b6665 100644 --- a/util/rpcclient/rpcclient.go +++ b/util/rpcclient/rpcclient.go @@ -101,7 +101,7 @@ func (c *RpcClient) Close() { } type limitedMarshal struct { - limit int + limit uint value any } @@ -113,16 +113,18 @@ func (m limitedMarshal) String() string { } else { str = string(marshalled) } - if m.limit == 0 || len(str) <= m.limit { + // #nosec G115 + limit := int(m.limit) + if m.limit <= 0 || len(str) <= limit { return str } prefix := str[:m.limit/2-1] - postfix := str[len(str)-m.limit/2+1:] + postfix := str[len(str)-limit/2+1:] return fmt.Sprintf("%v..%v", prefix, postfix) } type limitedArgumentsMarshal struct { - limit int + limit uint args []any } @@ -162,9 +164,9 @@ func (c *RpcClient) CallContext(ctx_in context.Context, result interface{}, meth return errors.New("not connected") } logId := c.logId.Add(1) - log.Trace("sending RPC request", "method", method, "logId", logId, "args", limitedArgumentsMarshal{int(c.config().ArgLogLimit), args}) + log.Trace("sending RPC request", "method", method, "logId", logId, "args", limitedArgumentsMarshal{c.config().ArgLogLimit, args}) var err error - for i := 0; i < int(c.config().Retries)+1; i++ { + for i := uint(0); i < c.config().Retries+1; i++ { retryDelay := c.config().RetryDelay if i > 0 && retryDelay > 0 { select { @@ -188,7 +190,7 @@ func (c *RpcClient) CallContext(ctx_in context.Context, result interface{}, meth cancelCtx() logger := log.Trace - limit := int(c.config().ArgLogLimit) + limit := c.config().ArgLogLimit if err != nil && !IsAlreadyKnownError(err) { logger = log.Info } diff --git a/util/sharedmetrics/sharedmetrics.go b/util/sharedmetrics/sharedmetrics.go index 377eef5352..9b4b3609bc 100644 --- a/util/sharedmetrics/sharedmetrics.go +++ b/util/sharedmetrics/sharedmetrics.go @@ -11,8 +11,10 @@ var ( ) func UpdateSequenceNumberGauge(sequenceNumber arbutil.MessageIndex) { + // #nosec G115 latestSequenceNumberGauge.Update(int64(sequenceNumber)) } func UpdateSequenceNumberInBlockGauge(sequenceNumber arbutil.MessageIndex) { + // #nosec G115 sequenceNumberInBlockGauge.Update(int64(sequenceNumber)) } diff --git a/util/stack/stack.go b/util/stack/stack.go new file mode 100644 index 0000000000..1b7ac3f9d9 --- /dev/null +++ b/util/stack/stack.go @@ -0,0 +1,39 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +package stack + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/log" +) + +type Stack[T any] []T + +func NewStack[T any]() *Stack[T] { + return &Stack[T]{} +} + +func (s *Stack[T]) Push(v T) { + if s == nil { + log.Warn("trying to push nil stack") + return + } + *s = append(*s, v) +} + +func (s *Stack[T]) Pop() (T, error) { + if s == nil { + var zeroVal T + return zeroVal, fmt.Errorf("trying to pop nil stack") + } + if len(*s) == 0 { + var zeroVal T + return zeroVal, fmt.Errorf("trying to pop empty stack") + } + i := len(*s) - 1 + val := (*s)[i] + *s = (*s)[:i] + return val, nil +} diff --git a/util/testhelpers/testhelpers.go b/util/testhelpers/testhelpers.go index b1b08708e7..d681b422bf 100644 --- a/util/testhelpers/testhelpers.go +++ b/util/testhelpers/testhelpers.go @@ -65,6 +65,7 @@ func RandomCallValue(limit int64) *big.Int { // Computes a psuedo-random uint64 on the interval [min, max] func RandomUint32(min, max uint32) uint32 { + //#nosec G115 return uint32(RandomUint64(uint64(min), uint64(max))) } diff --git a/validator/client/redis/producer.go b/validator/client/redis/producer.go index b3ad0f8839..f98c246d0e 100644 --- a/validator/client/redis/producer.go +++ b/validator/client/redis/producer.go @@ -6,6 +6,7 @@ import ( "sync/atomic" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/log" "github.com/go-redis/redis/v8" "github.com/offchainlabs/nitro/pubsub" @@ -32,11 +33,20 @@ func (c ValidationClientConfig) Enabled() bool { return c.RedisURL != "" } +func (c ValidationClientConfig) Validate() error { + for _, arch := range c.StylusArchs { + if !rawdb.Target(arch).IsValid() { + return fmt.Errorf("Invalid stylus arch: %v", arch) + } + } + return nil +} + var DefaultValidationClientConfig = ValidationClientConfig{ Name: "redis validation client", Room: 2, RedisURL: "", - StylusArchs: []string{"wavm"}, + StylusArchs: []string{string(rawdb.TargetWavm)}, ProducerConfig: pubsub.DefaultProducerConfig, CreateStreams: true, } @@ -46,7 +56,7 @@ var TestValidationClientConfig = ValidationClientConfig{ Room: 2, RedisURL: "", StreamPrefix: "test-", - StylusArchs: []string{"wavm"}, + StylusArchs: []string{string(rawdb.TargetWavm)}, ProducerConfig: pubsub.TestProducerConfig, CreateStreams: false, } @@ -152,8 +162,12 @@ func (c *ValidationClient) Name() string { return c.config.Name } -func (c *ValidationClient) StylusArchs() []string { - return c.config.StylusArchs +func (c *ValidationClient) StylusArchs() []rawdb.Target { + stylusArchs := make([]rawdb.Target, 0, len(c.config.StylusArchs)) + for _, arch := range c.config.StylusArchs { + stylusArchs = append(stylusArchs, rawdb.Target(arch)) + } + return stylusArchs } func (c *ValidationClient) Room() int { diff --git a/validator/client/validation_client.go b/validator/client/validation_client.go index 05d947db3d..00bd992f46 100644 --- a/validator/client/validation_client.go +++ b/validator/client/validation_client.go @@ -8,7 +8,6 @@ import ( "encoding/base64" "errors" "fmt" - "runtime" "sync/atomic" "time" @@ -22,6 +21,7 @@ import ( "github.com/offchainlabs/nitro/validator/server_common" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/rpc" @@ -31,7 +31,7 @@ type ValidationClient struct { stopwaiter.StopWaiter client *rpcclient.RpcClient name string - stylusArchs []string + stylusArchs []rawdb.Target room atomic.Int32 wasmModuleRoots []common.Hash } @@ -40,7 +40,7 @@ func NewValidationClient(config rpcclient.ClientConfigFetcher, stack *node.Node) return &ValidationClient{ client: rpcclient.NewRpcClient(config, stack), name: "not started", - stylusArchs: []string{"not started"}, + stylusArchs: []rawdb.Target{"not started"}, } } @@ -67,20 +67,20 @@ func (c *ValidationClient) Start(ctx context.Context) error { if len(name) == 0 { return errors.New("couldn't read name from server") } - var stylusArchs []string + var stylusArchs []rawdb.Target if err := c.client.CallContext(ctx, &stylusArchs, server_api.Namespace+"_stylusArchs"); err != nil { var rpcError rpc.Error ok := errors.As(err, &rpcError) if !ok || rpcError.ErrorCode() != -32601 { return fmt.Errorf("could not read stylus arch from server: %w", err) } - stylusArchs = []string{"pre-stylus"} // validation does not support stylus + stylusArchs = []rawdb.Target{rawdb.Target("pre-stylus")} // invalid, will fail if trying to validate block with stylus } else { if len(stylusArchs) == 0 { return fmt.Errorf("could not read stylus archs from validation server") } for _, stylusArch := range stylusArchs { - if stylusArch != "wavm" && stylusArch != runtime.GOARCH && stylusArch != "mock" { + if stylusArch != rawdb.TargetWavm && stylusArch != rawdb.LocalTarget() && stylusArch != "mock" { return fmt.Errorf("unsupported stylus architecture: %v", stylusArch) } } @@ -102,6 +102,7 @@ func (c *ValidationClient) Start(ctx context.Context) error { } else { log.Info("connected to validation server", "name", name, "room", room) } + // #nosec G115 c.room.Store(int32(room)) c.wasmModuleRoots = moduleRoots c.name = name @@ -117,11 +118,11 @@ func (c *ValidationClient) WasmModuleRoots() ([]common.Hash, error) { return nil, errors.New("not started") } -func (c *ValidationClient) StylusArchs() []string { +func (c *ValidationClient) StylusArchs() []rawdb.Target { if c.Started() { return c.stylusArchs } - return []string{"not started"} + return []rawdb.Target{"not started"} } func (c *ValidationClient) Stop() { diff --git a/validator/interface.go b/validator/interface.go index 80aa2c1fcc..81b40ae5cf 100644 --- a/validator/interface.go +++ b/validator/interface.go @@ -4,6 +4,7 @@ import ( "context" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/offchainlabs/nitro/util/containers" ) @@ -13,7 +14,7 @@ type ValidationSpawner interface { Start(context.Context) error Stop() Name() string - StylusArchs() []string + StylusArchs() []rawdb.Target Room() int } diff --git a/validator/server_api/json.go b/validator/server_api/json.go index 90746e4c57..dbe2bb1fee 100644 --- a/validator/server_api/json.go +++ b/validator/server_api/json.go @@ -11,6 +11,7 @@ import ( "os" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/offchainlabs/nitro/arbcompress" "github.com/offchainlabs/nitro/arbutil" @@ -63,7 +64,7 @@ type InputJSON struct { BatchInfo []BatchInfoJson DelayedMsgB64 string StartState validator.GoGlobalState - UserWasms map[string]map[common.Hash]string + UserWasms map[rawdb.Target]map[common.Hash]string DebugChain bool } @@ -95,14 +96,14 @@ func ValidationInputToJson(entry *validator.ValidationInput) *InputJSON { DelayedMsgB64: base64.StdEncoding.EncodeToString(entry.DelayedMsg), StartState: entry.StartState, PreimagesB64: jsonPreimagesMap, - UserWasms: make(map[string]map[common.Hash]string), + UserWasms: make(map[rawdb.Target]map[common.Hash]string), DebugChain: entry.DebugChain, } for _, binfo := range entry.BatchInfo { encData := base64.StdEncoding.EncodeToString(binfo.Data) res.BatchInfo = append(res.BatchInfo, BatchInfoJson{Number: binfo.Number, DataB64: encData}) } - for arch, wasms := range entry.UserWasms { + for target, wasms := range entry.UserWasms { archWasms := make(map[common.Hash]string) for moduleHash, data := range wasms { compressed, err := arbcompress.CompressLevel(data, 1) @@ -111,7 +112,7 @@ func ValidationInputToJson(entry *validator.ValidationInput) *InputJSON { } archWasms[moduleHash] = base64.StdEncoding.EncodeToString(compressed) } - res.UserWasms[arch] = archWasms + res.UserWasms[target] = archWasms } return res } @@ -127,7 +128,7 @@ func ValidationInputFromJson(entry *InputJSON) (*validator.ValidationInput, erro DelayedMsgNr: entry.DelayedMsgNr, StartState: entry.StartState, Preimages: preimages, - UserWasms: make(map[string]map[common.Hash][]byte), + UserWasms: make(map[rawdb.Target]map[common.Hash][]byte), DebugChain: entry.DebugChain, } delayed, err := base64.StdEncoding.DecodeString(entry.DelayedMsgB64) @@ -146,7 +147,7 @@ func ValidationInputFromJson(entry *InputJSON) (*validator.ValidationInput, erro } valInput.BatchInfo = append(valInput.BatchInfo, decInfo) } - for arch, wasms := range entry.UserWasms { + for target, wasms := range entry.UserWasms { archWasms := make(map[common.Hash][]byte) for moduleHash, encoded := range wasms { decoded, err := base64.StdEncoding.DecodeString(encoded) @@ -171,7 +172,7 @@ func ValidationInputFromJson(entry *InputJSON) (*validator.ValidationInput, erro } archWasms[moduleHash] = uncompressed } - valInput.UserWasms[arch] = archWasms + valInput.UserWasms[target] = archWasms } return valInput, nil } diff --git a/validator/server_arb/execution_run_test.go b/validator/server_arb/execution_run_test.go index bdc1eefc4d..479db58515 100644 --- a/validator/server_arb/execution_run_test.go +++ b/validator/server_arb/execution_run_test.go @@ -194,7 +194,7 @@ func Test_machineHashesWithStep(t *testing.T) { Batch: 1, PosInBatch: mm.totalSteps - 1, })) - if len(hashes) >= int(maxIterations) { + if uint64(len(hashes)) >= maxIterations { t.Fatal("Wanted fewer hashes than the max iterations") } for i := range hashes { diff --git a/validator/server_arb/machine_cache.go b/validator/server_arb/machine_cache.go index 23fcdef6d6..55ef61cf11 100644 --- a/validator/server_arb/machine_cache.go +++ b/validator/server_arb/machine_cache.go @@ -239,6 +239,7 @@ func (c *MachineCache) getClosestMachine(stepCount uint64) (int, MachineInterfac if c.machineStepInterval == 0 || stepsFromStart > c.machineStepInterval*uint64(len(c.machines)-1) { index = len(c.machines) - 1 } else { + // #nosec G115 index = int(stepsFromStart / c.machineStepInterval) } return index, c.machines[index] diff --git a/validator/server_arb/validator_spawner.go b/validator/server_arb/validator_spawner.go index 1d4126dc7c..eb53070303 100644 --- a/validator/server_arb/validator_spawner.go +++ b/validator/server_arb/validator_spawner.go @@ -21,6 +21,7 @@ import ( "github.com/offchainlabs/nitro/validator/valnode/redis" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" ) @@ -88,8 +89,8 @@ func (s *ArbitratorSpawner) WasmModuleRoots() ([]common.Hash, error) { return s.locator.ModuleRoots(), nil } -func (s *ArbitratorSpawner) StylusArchs() []string { - return []string{"wavm"} +func (s *ArbitratorSpawner) StylusArchs() []rawdb.Target { + return []rawdb.Target{rawdb.TargetWavm} } func (s *ArbitratorSpawner) Name() string { @@ -122,14 +123,14 @@ func (v *ArbitratorSpawner) loadEntryToMachine(ctx context.Context, entry *valid return fmt.Errorf("error while trying to add sequencer msg for proving: %w", err) } } - if len(entry.UserWasms["wavm"]) == 0 { + if len(entry.UserWasms[rawdb.TargetWavm]) == 0 { for stylusArch, wasms := range entry.UserWasms { if len(wasms) > 0 { return fmt.Errorf("bad stylus arch loaded to machine. Expected wavm. Got: %s", stylusArch) } } } - for moduleHash, module := range entry.UserWasms["wavm"] { + for moduleHash, module := range entry.UserWasms[rawdb.TargetWavm] { err = mach.AddUserWasm(moduleHash, module) if err != nil { log.Error( @@ -178,7 +179,10 @@ func (v *ArbitratorSpawner) execute( } steps += count } + + // #nosec G115 arbitratorValidationSteps.Update(int64(mach.GetStepCount())) + if mach.IsErrored() { log.Error("machine entered errored state during attempted validation", "block", entry.Id) return validator.GoGlobalState{}, errors.New("machine entered errored state during attempted validation") diff --git a/validator/server_jit/jit_machine.go b/validator/server_jit/jit_machine.go index e4fb840cbb..e7753748ab 100644 --- a/validator/server_jit/jit_machine.go +++ b/validator/server_jit/jit_machine.go @@ -9,13 +9,14 @@ import ( "errors" "fmt" "io" + "math" "net" "os" "os/exec" - "runtime" "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/offchainlabs/nitro/util/arbmath" @@ -125,6 +126,13 @@ func (machine *JitMachine) prove( writeUint32 := func(data uint32) error { return writeExact(arbmath.Uint32ToBytes(data)) } + writeIntAsUint32 := func(data int) error { + if data < 0 || data > math.MaxUint32 { + return fmt.Errorf("attempted to write out-of-bounds int %v as uint32", data) + } + // #nosec G115 + return writeUint32(uint32(data)) + } writeUint64 := func(data uint64) error { return writeExact(arbmath.UintToBytes(data)) } @@ -192,14 +200,14 @@ func (machine *JitMachine) prove( // send known preimages preimageTypes := entry.Preimages - if err := writeUint32(uint32(len(preimageTypes))); err != nil { + if err := writeIntAsUint32(len(preimageTypes)); err != nil { return state, err } for ty, preimages := range preimageTypes { if err := writeUint8(uint8(ty)); err != nil { return state, err } - if err := writeUint32(uint32(len(preimages))); err != nil { + if err := writeIntAsUint32(len(preimages)); err != nil { return state, err } for hash, preimage := range preimages { @@ -212,18 +220,19 @@ func (machine *JitMachine) prove( } } - userWasms := entry.UserWasms[runtime.GOARCH] + localTarget := rawdb.LocalTarget() + userWasms := entry.UserWasms[localTarget] // if there are user wasms, but only for wrong architecture - error if len(userWasms) == 0 { for arch, userWasms := range entry.UserWasms { if len(userWasms) != 0 { - return state, fmt.Errorf("bad stylus arch for validation input. got: %v, expected: %v", arch, runtime.GOARCH) + return state, fmt.Errorf("bad stylus arch for validation input. got: %v, expected: %v", arch, localTarget) } } } - if err := writeUint32(uint32(len(userWasms))); err != nil { + if err := writeIntAsUint32(len(userWasms)); err != nil { return state, err } for moduleHash, program := range userWasms { @@ -300,6 +309,7 @@ func (machine *JitMachine) prove( if memoryUsed > uint64(machine.wasmMemoryUsageLimit) { log.Warn("memory used by jit wasm exceeds the wasm memory usage limit", "limit", machine.wasmMemoryUsageLimit, "memoryUsed", memoryUsed) } + // #nosec G115 jitWasmMemoryUsage.Update(int64(memoryUsed)) return state, nil default: diff --git a/validator/server_jit/spawner.go b/validator/server_jit/spawner.go index 5ba3664109..92b50b17cb 100644 --- a/validator/server_jit/spawner.go +++ b/validator/server_jit/spawner.go @@ -9,6 +9,7 @@ import ( flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/offchainlabs/nitro/util/stopwaiter" "github.com/offchainlabs/nitro/validator" @@ -71,8 +72,8 @@ func (v *JitSpawner) WasmModuleRoots() ([]common.Hash, error) { return v.locator.ModuleRoots(), nil } -func (v *JitSpawner) StylusArchs() []string { - return []string{runtime.GOARCH} +func (v *JitSpawner) StylusArchs() []rawdb.Target { + return []rawdb.Target{rawdb.LocalTarget()} } func (v *JitSpawner) execute( diff --git a/validator/validation_entry.go b/validator/validation_entry.go index 133a67a8a8..2c357659ad 100644 --- a/validator/validation_entry.go +++ b/validator/validation_entry.go @@ -2,6 +2,7 @@ package validator import ( "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/offchainlabs/nitro/arbutil" ) @@ -16,7 +17,7 @@ type ValidationInput struct { HasDelayedMsg bool DelayedMsgNr uint64 Preimages map[arbutil.PreimageType]map[common.Hash][]byte - UserWasms map[string]map[common.Hash][]byte + UserWasms map[rawdb.Target]map[common.Hash][]byte BatchInfo []BatchInfo DelayedMsg []byte StartState GoGlobalState diff --git a/validator/valnode/validation_api.go b/validator/valnode/validation_api.go index 6245ffc5e3..a79ac7fa55 100644 --- a/validator/valnode/validation_api.go +++ b/validator/valnode/validation_api.go @@ -12,6 +12,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/offchainlabs/nitro/util/stopwaiter" "github.com/offchainlabs/nitro/validator" @@ -44,7 +45,7 @@ func (a *ValidationServerAPI) WasmModuleRoots() ([]common.Hash, error) { return a.spawner.WasmModuleRoots() } -func (a *ValidationServerAPI) StylusArchs() ([]string, error) { +func (a *ValidationServerAPI) StylusArchs() ([]rawdb.Target, error) { return a.spawner.StylusArchs(), nil } diff --git a/wavmio/stub.go b/wavmio/stub.go index 7fd29e2062..1395fb4235 100644 --- a/wavmio/stub.go +++ b/wavmio/stub.go @@ -60,13 +60,13 @@ func parsePreimageBytes(path string) { if read != len(lenBuf) { panic(fmt.Sprintf("missing bytes reading len got %d", read)) } - fieldSize := int(binary.LittleEndian.Uint64(lenBuf)) + fieldSize := binary.LittleEndian.Uint64(lenBuf) dataBuf := make([]byte, fieldSize) read, err = file.Read(dataBuf) if err != nil { panic(err) } - if read != fieldSize { + if uint64(read) != fieldSize { panic("missing bytes reading data") } hash := crypto.Keccak256Hash(dataBuf) @@ -125,7 +125,7 @@ func ReadInboxMessage(msgNum uint64) []byte { } func ReadDelayedInboxMessage(seqNum uint64) []byte { - if seqNum < delayedMsgFirstPos || (int(seqNum-delayedMsgFirstPos) > len(delayedMsgs)) { + if seqNum < delayedMsgFirstPos || (seqNum-delayedMsgFirstPos > uint64(len(delayedMsgs))) { panic(fmt.Sprintf("trying to read bad delayed msg %d", seqNum)) } return delayedMsgs[seqNum-delayedMsgFirstPos] diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index 16a8f64daf..00ae0f0dcf 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -135,6 +135,7 @@ func (cc *ClientConnection) writeBacklog(ctx context.Context, segment backlog.Ba msgs := prevSegment.Messages() if isFirstSegment && prevSegment.Contains(uint64(cc.requestedSeqNum)) { + // #nosec G115 requestedIdx := int(cc.requestedSeqNum) - int(prevSegment.Start()) // This might be false if messages were added after we fetched the segment's messages if len(msgs) >= requestedIdx {