From 6ff2edc4166ab65c6243247da2371e203d643b21 Mon Sep 17 00:00:00 2001 From: andri lim Date: Wed, 21 Feb 2024 23:04:59 +0700 Subject: [PATCH] Fix styles (#2046) * Fix styles * Fix copyright year --- nimbus/core/clique/clique_genvote.nim | 4 +- nimbus/db/aristo/aristo_check/check_be.nim | 6 +-- nimbus/db/aristo/aristo_debug.nim | 14 +++---- nimbus/db/ledger/accounts_cache.nim | 4 +- nimbus/db/ledger/accounts_ledger.nim | 4 +- nimbus/db/ledger/backend/accounts_cache.nim | 4 +- nimbus/db/ledger/backend/accounts_ledger.nim | 8 ++-- nimbus/db/ledger/base.nim | 4 +- nimbus/db/ledger/base/base_desc.nim | 2 +- nimbus/evm/state.nim | 2 +- nimbus/rpc/experimental.nim | 4 +- nimbus/sync/snap/worker/db/hexary_debug.nim | 8 ++-- stateless/does-not-compile/json_from_tree.nim | 6 +-- .../does-not-compile/json_witness_gen.nim | 8 ++-- stateless/multi_keys.nim | 32 ++++++++-------- stateless/test_block_witness.nim | 8 ++-- stateless/test_witness_json.nim | 2 +- stateless/test_witness_keys.nim | 10 ++--- stateless/test_witness_verification.nim | 4 +- stateless/witness_from_tree.nim | 6 +-- tests/test_aristo/test_backend.nim | 2 +- tests/test_aristo/test_filter.nim | 4 +- tests/test_aristo/test_tx.nim | 2 +- tests/test_blockchain_json.nim | 2 +- tests/test_clique/pool.nim | 2 +- tests/test_configuration.nim | 4 +- tests/test_sync_snap.nim | 2 +- tests/test_sync_snap/test_helpers.nim | 4 +- tests/test_sync_snap/test_node_range.nim | 8 ++-- tests/test_txpool.nim | 38 +++++++++---------- tests/test_txpool/helpers.nim | 4 +- tests/test_txpool/setup.nim | 6 +-- tests/test_txpool2.nim | 8 ++-- 33 files changed, 113 insertions(+), 113 deletions(-) diff --git a/nimbus/core/clique/clique_genvote.nim b/nimbus/core/clique/clique_genvote.nim index fc3d8a6dfc..908cba783f 100644 --- a/nimbus/core/clique/clique_genvote.nim +++ b/nimbus/core/clique/clique_genvote.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2018 Status Research & Development GmbH +# Copyright (c) 2018-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -196,7 +196,7 @@ proc cliqueGenvote*( ## ## [..] ## - c.clique_genvote(voter, seal, + c.cliqueGenvote(voter, seal, parent = c.cfg.db.getCanonicalHead, elapsed = elapsed, voteInOk = voteInOk, diff --git a/nimbus/db/aristo/aristo_check/check_be.nim b/nimbus/db/aristo/aristo_check/check_be.nim index f40f84d03d..ea9e2f3c3f 100644 --- a/nimbus/db/aristo/aristo_check/check_be.nim +++ b/nimbus/db/aristo/aristo_check/check_be.nim @@ -93,7 +93,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef]( discard vids.merge Interval[VertexID,uint64].new( VertexID(LEAST_FREE_VID),high(VertexID)) - for (vid,vtx) in T.walkVtxBE db: + for (vid,vtx) in T.walkVtxBe db: if not vtx.isValid: return err((vid,CheckBeVtxInvalid)) let rc = db.getKeyBE vid @@ -115,8 +115,8 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef]( if vtx.ePfx.len == 0: return err((vid,CheckBeVtxExtPfxMissing)) - for (vid,key) in T.walkKeyBE db: - if not key.isvalid: + for (vid,key) in T.walkKeyBe db: + if not key.isValid: return err((vid,CheckBeKeyInvalid)) let vtx = db.getVtxBE(vid).valueOr: return err((vid,CheckBeVtxMissing)) diff --git a/nimbus/db/aristo/aristo_debug.nim b/nimbus/db/aristo/aristo_debug.nim index b9e64b7bd6..f45615ffcd 100644 --- a/nimbus/db/aristo/aristo_debug.nim +++ b/nimbus/db/aristo/aristo_debug.nim @@ -40,7 +40,7 @@ proc del(xMap: var VidsByKeyTab; key: HashKey; vids: HashSet[VertexID]) = for vid in vids: xMap.del(key, vid) -proc add(xMap: var VidsByKeyTab; key: Hashkey; vid: VertexID) = +proc add(xMap: var VidsByKeyTab; key: HashKey; vid: VertexID) = xMap.withValue(key,value): value[].incl vid do: # else if not found @@ -354,7 +354,7 @@ proc ppXMap*( # Extra reverse lookups if 0 < revKeys.len: proc ppRevKey(vid: VertexID): string = - "(ø," & revOnly.getOrVoid(vid).ppkey(db) & ")" + "(ø," & revOnly.getOrVoid(vid).ppKey(db) & ")" var (i, r) = (0, revKeys[0]) result &= revKeys[0].ppRevKey for n in 1 ..< revKeys.len: @@ -517,14 +517,14 @@ proc ppLayer( result &= info.doPrefix(0 < tLen) & layer.delta.sTab.ppSTab(db,indent+2) if lTabOk: let - tlen = layer.final.lTab.len + tLen = layer.final.lTab.len info = "lTab(" & $tLen & ")" result &= info.doPrefix(0 < tLen) & layer.final.lTab.ppLTab(db,indent+2) if kMapOk: let tLen = layer.delta.kMap.len - ulen = layer.delta.pAmk.len - lInf = if tLen == uLen: $tLen else: $tLen & "," & $ulen + uLen = layer.delta.pAmk.len + lInf = if tLen == uLen: $tLen else: $tLen & "," & $uLen info = "kMap(" & lInf & ")" result &= info.doPrefix(0 < tLen + uLen) result &= db.ppXMap(layer.delta.kMap, layer.delta.pAmk, indent+2) @@ -691,14 +691,14 @@ proc pp*( indent = 4; ): string = let db = db.orDefault - "{" & pAmk.sortedkeys + "{" & pAmk.sortedKeys .mapIt((it, pAmk.getOrVoid it)) .mapIt("(" & it[0].ppKey(db) & "," & it[1].ppVid & ")") .join("," & indent.toPfx(1)) & "}" proc pp*(pAmk: VidsByKeyTab; db = AristoDbRef(nil); indent = 4): string = let db = db.orDefault - "{" & pAmk.sortedkeys + "{" & pAmk.sortedKeys .mapIt((it, pAmk.getOrVoid it)) .mapIt("(" & it[0].ppKey(db) & "," & it[1].ppVids & ")") .join("," & indent.toPfx(1)) & "}" diff --git a/nimbus/db/ledger/accounts_cache.nim b/nimbus/db/ledger/accounts_cache.nim index 0d79854ea7..28cf5db64f 100644 --- a/nimbus/db/ledger/accounts_cache.nim +++ b/nimbus/db/ledger/accounts_cache.nim @@ -680,14 +680,14 @@ proc collectWitnessData*(ac: AccountsCache) = do: ac.witnessCache[address] = witnessData(acc) -func multiKeys(slots: HashSet[UInt256]): MultikeysRef = +func multiKeys(slots: HashSet[UInt256]): MultiKeysRef = if slots.len == 0: return new result for x in slots: result.add x.toBytesBE result.sort() -proc makeMultiKeys*(ac: AccountsCache): MultikeysRef = +proc makeMultiKeys*(ac: AccountsCache): MultiKeysRef = # this proc is called after we done executing a block new result for k, v in ac.witnessCache: diff --git a/nimbus/db/ledger/accounts_ledger.nim b/nimbus/db/ledger/accounts_ledger.nim index 6ededbfa76..bcf9b28841 100644 --- a/nimbus/db/ledger/accounts_ledger.nim +++ b/nimbus/db/ledger/accounts_ledger.nim @@ -715,14 +715,14 @@ proc collectWitnessData*(ac: AccountsLedgerRef) = do: ac.witnessCache[address] = witnessData(acc) -func multiKeys(slots: HashSet[UInt256]): MultikeysRef = +func multiKeys(slots: HashSet[UInt256]): MultiKeysRef = if slots.len == 0: return new result for x in slots: result.add x.toBytesBE result.sort() -proc makeMultiKeys*(ac: AccountsLedgerRef): MultikeysRef = +proc makeMultiKeys*(ac: AccountsLedgerRef): MultiKeysRef = # this proc is called after we done executing a block new result for k, v in ac.witnessCache: diff --git a/nimbus/db/ledger/backend/accounts_cache.nim b/nimbus/db/ledger/backend/accounts_cache.nim index 57c5a90087..10993566cc 100644 --- a/nimbus/db/ledger/backend/accounts_cache.nim +++ b/nimbus/db/ledger/backend/accounts_cache.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2023 Status Research & Development GmbH +# Copyright (c) 2023-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -129,7 +129,7 @@ proc ledgerMethods(lc: impl.AccountsCache): LedgerFns = logEntriesFn: proc(): seq[Log] = lc.logEntries(), - makeMultiKeysFn: proc(): MultikeysRef = + makeMultiKeysFn: proc(): MultiKeysRef = lc.makeMultiKeys(), persistFn: proc(clearEmptyAccount: bool, clearCache: bool) = diff --git a/nimbus/db/ledger/backend/accounts_ledger.nim b/nimbus/db/ledger/backend/accounts_ledger.nim index 0aa9f36594..9d0cac0b66 100644 --- a/nimbus/db/ledger/backend/accounts_ledger.nim +++ b/nimbus/db/ledger/backend/accounts_ledger.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2023 Status Research & Development GmbH +# Copyright (c) 2023-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -27,7 +27,7 @@ func savePoint(sp: LedgerSpRef): impl.LedgerSavePoint = wrp.LedgerSavePoint(sp).sp # ---------------- - + proc ledgerMethods(lc: impl.AccountsLedgerRef): LedgerFns = LedgerFns( accessListFn: proc(eAddr: EthAddress) = @@ -47,7 +47,7 @@ proc ledgerMethods(lc: impl.AccountsLedgerRef): LedgerFns = beginSavepointFn: proc(): LedgerSpRef = wrp.LedgerSavePoint(sp: lc.beginSavepoint()), - + clearStorageFn: proc(eAddr: EthAddress) = lc.clearStorage(eAddr), @@ -120,7 +120,7 @@ proc ledgerMethods(lc: impl.AccountsLedgerRef): LedgerFns = logEntriesFn: proc(): seq[Log] = lc.logEntries(), - makeMultiKeysFn: proc(): MultikeysRef = + makeMultiKeysFn: proc(): MultiKeysRef = lc.makeMultiKeys(), persistFn: proc(clearEmptyAccount: bool, clearCache: bool) = diff --git a/nimbus/db/ledger/base.nim b/nimbus/db/ledger/base.nim index 6e45927749..b6395c81a4 100644 --- a/nimbus/db/ledger/base.nim +++ b/nimbus/db/ledger/base.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2023 Status Research & Development GmbH +# Copyright (c) 2023-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -266,7 +266,7 @@ proc logEntries*(ldg: LedgerRef): seq[Log] = result = ldg.methods.logEntriesFn() ldg.ifTrackApi: debug apiTxt, ctx, elapsed, result -proc makeMultiKeys*(ldg: LedgerRef): MultikeysRef = +proc makeMultiKeys*(ldg: LedgerRef): MultiKeysRef = ldg.beginTrackApi LdgMakeMultiKeysFn result = ldg.methods.makeMultiKeysFn() ldg.ifTrackApi: debug apiTxt, ctx, elapsed diff --git a/nimbus/db/ledger/base/base_desc.nim b/nimbus/db/ledger/base/base_desc.nim index 2d9b91aa7c..ccf87fa008 100644 --- a/nimbus/db/ledger/base/base_desc.nim +++ b/nimbus/db/ledger/base/base_desc.nim @@ -73,7 +73,7 @@ type IsEmptyAccountFn* = proc(eAddr: EthAddress): bool {.noRaise.} IsTopLevelCleanFn* = proc(): bool {.noRaise.} LogEntriesFn* = proc(): seq[Log] {.noRaise.} - MakeMultiKeysFn* = proc(): MultikeysRef {.noRaise.} + MakeMultiKeysFn* = proc(): MultiKeysRef {.noRaise.} PersistFn* = proc(clearEmptyAccount: bool, clearCache: bool) {.noRaise.} RipemdSpecialFn* = proc() {.noRaise.} RollbackFn* = proc(sp: LedgerSpRef) {.noRaise.} diff --git a/nimbus/evm/state.nim b/nimbus/evm/state.nim index db152615ad..49f8f33b02 100644 --- a/nimbus/evm/state.nim +++ b/nimbus/evm/state.nim @@ -292,7 +292,7 @@ proc `generateWitness=`*(vmState: BaseVMState, status: bool) = proc buildWitness*( vmState: BaseVMState, - mkeys: MultikeysRef): seq[byte] {.raises: [CatchableError].} = + mkeys: MultiKeysRef): seq[byte] {.raises: [CatchableError].} = let rootHash = vmState.stateDB.rootHash let flags = if vmState.fork >= FkSpurious: {wfEIP170} else: {} diff --git a/nimbus/rpc/experimental.nim b/nimbus/rpc/experimental.nim index 7f7a1b0b22..1b515cc7db 100644 --- a/nimbus/rpc/experimental.nim +++ b/nimbus/rpc/experimental.nim @@ -32,7 +32,7 @@ type proc getBlockWitness*( com: CommonRef, blockHeader: BlockHeader, - statePostExecution: bool): (MultikeysRef, BlockWitness) + statePostExecution: bool): (MultiKeysRef, BlockWitness) {.raises: [RlpError, BlockNotFound, ValueError, CatchableError].} = let @@ -67,7 +67,7 @@ proc getBlockWitness*( proc getBlockProofs*( accDB: ReadOnlyStateDB, - mkeys: MultikeysRef): seq[ProofResponse] {.raises: [RlpError].} = + mkeys: MultiKeysRef): seq[ProofResponse] {.raises: [RlpError].} = var blockProofs = newSeq[ProofResponse]() diff --git a/nimbus/sync/snap/worker/db/hexary_debug.nim b/nimbus/sync/snap/worker/db/hexary_debug.nim index 07d8dfd324..badc71b939 100644 --- a/nimbus/sync/snap/worker/db/hexary_debug.nim +++ b/nimbus/sync/snap/worker/db/hexary_debug.nim @@ -61,7 +61,7 @@ proc ppImpl(s: string; hex = false): string = proc ppImpl(key: RepairKey; db: HexaryTreeDbRef): string = if key.isZero: return "ø" - if not key.isNodekey: + if not key.isNodeKey: var num: uint64 (addr num).copyMem(unsafeAddr key.ByteArray33[25], 8) return "%" & $num @@ -146,7 +146,7 @@ proc ppImpl(db: HexaryTreeDbRef; root: NodeKey): seq[string] = accu.add @[(0u64, "($0" & "," & root.ppImpl(db) & ")")] for key,node in db.tab.pairs: accu.add ( - key.ppImpl(db).tokey, + key.ppImpl(db).toKey, "(" & key.ppImpl(db) & "," & node.ppImpl(db) & ")") accu.sorted(cmpIt).mapIt(it[1]) @@ -420,7 +420,7 @@ proc fillFromLeft( # Find first least path var - here = XPath(root: rootKey).pathLeast(rootkey.to(Blob), getFn) + here = XPath(root: rootKey).pathLeast(rootKey.to(Blob), getFn) countSteps = 0 if 0 < here.path.len: @@ -456,7 +456,7 @@ proc fillFromRight( # Find first least path var - here = XPath(root: rootKey).pathMost(rootkey.to(Blob), getFn) + here = XPath(root: rootKey).pathMost(rootKey.to(Blob), getFn) countSteps = 0 if 0 < here.path.len: diff --git a/stateless/does-not-compile/json_from_tree.nim b/stateless/does-not-compile/json_from_tree.nim index c8f58a4779..c2c7f16c41 100644 --- a/stateless/does-not-compile/json_from_tree.nim +++ b/stateless/does-not-compile/json_from_tree.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2020-2023 Status Research & Development GmbH +# Copyright (c) 2020-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -28,7 +28,7 @@ type StackElem = object node: seq[byte] parentGroup: Group - keys: MultikeysRef + keys: MultiKeysRef depth: int storageMode: bool @@ -333,7 +333,7 @@ proc getBranchRecurse(wb: var WitnessBuilder, z: var StackElem) = raise newException(CorruptedTrieDatabase, "HexaryTrie node with an unexpected number of children") -proc buildWitness*(wb: var WitnessBuilder, keys: MultikeysRef): string = +proc buildWitness*(wb: var WitnessBuilder, keys: MultiKeysRef): string = # witness version wb.writeByte(BlockWitnessVersion, "version") diff --git a/stateless/does-not-compile/json_witness_gen.nim b/stateless/does-not-compile/json_witness_gen.nim index 3b34239e1e..fbb3c380d0 100644 --- a/stateless/does-not-compile/json_witness_gen.nim +++ b/stateless/does-not-compile/json_witness_gen.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2020-2023 Status Research & Development GmbH +# Copyright (c) 2020-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -18,7 +18,7 @@ import type DB = TrieDatabaseRef - StorageKeys = tuple[storageRoot: Hash256, keys: MultikeysRef] + StorageKeys = tuple[storageRoot: Hash256, keys: MultiKeysRef] AccountDef = object storageKeys: MultiKeysRef @@ -47,7 +47,7 @@ proc randCode(db: DB): Hash256 = proc randStorage(db: DB, numSlots: int): StorageKeys = if rand(0..1) == 0 or numSlots == 0: - result = (emptyRlpHash, MultikeysRef(nil)) + result = (emptyRlpHash, MultiKeysRef(nil)) else: var trie = initStorageTrie(db) var keys = newSeq[StorageSlot](numSlots) @@ -57,7 +57,7 @@ proc randStorage(db: DB, numSlots: int): StorageKeys = trie.putSlotBytes(keys[i], rlp.encode(randU256())) if rand(0..1) == 0: - result = (trie.rootHash, MultikeysRef(nil)) + result = (trie.rootHash, MultiKeysRef(nil)) else: var m = newMultikeys(keys) result = (trie.rootHash, m) diff --git a/stateless/multi_keys.nim b/stateless/multi_keys.nim index 4dbbe3edc7..710b510839 100644 --- a/stateless/multi_keys.nim +++ b/stateless/multi_keys.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2020-2023 Status Research & Development GmbH +# Copyright (c) 2020-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -22,14 +22,14 @@ type of true: storageSlot*: StorageSlot of false: - storageKeys*: MultikeysRef + storageKeys*: MultiKeysRef address*: EthAddress codeTouched*: bool - Multikeys* = object + MultiKeys* = object keys*: seq[KeyData] - MultikeysRef* = ref Multikeys + MultiKeysRef* = ref MultiKeys Group* = object first*, last*: int16 @@ -41,7 +41,7 @@ type AccountKey* = object address*: EthAddress codeTouched*: bool - storageKeys*: MultikeysRef + storageKeys*: MultiKeysRef MatchGroup* = object match*: bool @@ -73,8 +73,8 @@ func compareNibbles(x: openArray[byte], start: int, n: NibblesSeq): bool = inc i result = true -proc newMultiKeys*(keys: openArray[AccountKey]): MultikeysRef = - result = new Multikeys +proc newMultiKeys*(keys: openArray[AccountKey]): MultiKeysRef = + result = new MultiKeysRef result.keys = newSeq[KeyData](keys.len) for i, a in keys: result.keys[i] = KeyData( @@ -85,15 +85,15 @@ proc newMultiKeys*(keys: openArray[AccountKey]): MultikeysRef = storageKeys: a.storageKeys) result.keys.sort(cmpHash) -proc newMultiKeys*(keys: openArray[StorageSlot]): MultikeysRef = - result = new Multikeys +proc newMultiKeys*(keys: openArray[StorageSlot]): MultiKeysRef = + result = new MultiKeysRef result.keys = newSeq[KeyData](keys.len) for i, a in keys: result.keys[i] = KeyData(storageMode: true, hash: keccakHash(a).data, storageSlot: a) result.keys.sort(cmpHash) # never mix storageMode! -proc add*(m: MultikeysRef, address: EthAddress, codeTouched: bool, storageKeys = MultikeysRef(nil)) = +proc add*(m: MultiKeysRef, address: EthAddress, codeTouched: bool, storageKeys = MultiKeysRef(nil)) = m.keys.add KeyData( storageMode: false, hash: keccakHash(address).data, @@ -101,17 +101,17 @@ proc add*(m: MultikeysRef, address: EthAddress, codeTouched: bool, storageKeys = codeTouched: codeTouched, storageKeys: storageKeys) -proc add*(m: MultikeysRef, slot: StorageSlot) = +proc add*(m: MultiKeysRef, slot: StorageSlot) = m.keys.add KeyData(storageMode: true, hash: keccakHash(slot).data, storageSlot: slot) -proc sort*(m: MultikeysRef) = +proc sort*(m: MultiKeysRef) = m.keys.sort(cmpHash) -func initGroup*(m: MultikeysRef): Group = +func initGroup*(m: MultiKeysRef): Group = type T = type result.last result = Group(first: 0.T, last: (m.keys.len - 1).T) -func groups*(m: MultikeysRef, parentGroup: Group, depth: int): BranchGroup = +func groups*(m: MultiKeysRef, parentGroup: Group, depth: int): BranchGroup = # similar to a branch node, the product of this func # is a 16 bits bitmask and an array of max 16 groups # if the bit is set, the n-th elem of array have a group @@ -133,7 +133,7 @@ func groups*(m: MultikeysRef, parentGroup: Group, depth: int): BranchGroup = setBranchMaskBit(result.mask, nibble.int) result.groups[nibble.int] = g -func groups*(m: MultikeysRef, depth: int, n: NibblesSeq, parentGroup: Group): MatchGroup = +func groups*(m: MultiKeysRef, depth: int, n: NibblesSeq, parentGroup: Group): MatchGroup = # using common-prefix comparison, this func # will produce one match group or no match at all var g = Group(first: parentGroup.first) @@ -178,7 +178,7 @@ func groups*(m: MultikeysRef, depth: int, n: NibblesSeq, parentGroup: Group): Ma func isValidMatch(mg: MatchGroup): bool {.inline.} = result = mg.match and mg.group.first == mg.group.last -proc visitMatch*(m: var MultikeysRef, mg: MatchGroup, depth: int): KeyData = +proc visitMatch*(m: var MultiKeysRef, mg: MatchGroup, depth: int): KeyData = doAssert(mg.isValidMatch, "Multiple identical keys are not allowed") m.keys[mg.group.first].visited = true result = m.keys[mg.group.first] diff --git a/stateless/test_block_witness.nim b/stateless/test_block_witness.nim index 947f4c2522..46ca007ef3 100644 --- a/stateless/test_block_witness.nim +++ b/stateless/test_block_witness.nim @@ -19,15 +19,15 @@ import type Tester = object - keys: MultikeysRef + keys: MultiKeysRef memDB: CoreDbRef proc testGetBranch(tester: Tester, rootHash: KeccakHash, testStatusIMPL: var TestStatus) = - var trie = initAccountsTrie(tester.memdb, rootHash) + var trie = initAccountsTrie(tester.memDB, rootHash) let flags = {wfNoFlag} try: - var wb = initWitnessBuilder(tester.memdb, rootHash, flags) + var wb = initWitnessBuilder(tester.memDB, rootHash, flags) var witness = wb.buildWitness(tester.keys) var db = newCoreDbRef(LegacyDbMemory) @@ -77,7 +77,7 @@ proc setupStateDB(tester: var Tester, wantedState: JsonNode, stateDB: LedgerRef) stateDB.setCode(account, code) stateDB.setBalance(account, balance) - let sKeys = if storageKeys.len != 0: newMultiKeys(storageKeys) else: MultikeysRef(nil) + let sKeys = if storageKeys.len != 0: newMultiKeys(storageKeys) else: MultiKeysRef(nil) let codeTouched = code.len > 0 keys.add(AccountKey(address: account, codeTouched: codeTouched, storageKeys: sKeys)) diff --git a/stateless/test_witness_json.nim b/stateless/test_witness_json.nim index b7bff81c6d..9ba7ab2f2d 100644 --- a/stateless/test_witness_json.nim +++ b/stateless/test_witness_json.nim @@ -152,7 +152,7 @@ proc writeFuzzData(filePath, fileName: string) = var tb = initTreeBuilder(t.output, db, {wfNoFlag}) discard tb.buildTree() - writeFile(filename, t.output) + writeFile(fileName, t.output) proc fuzzTool(): bool {.used.} = var filename: string diff --git a/stateless/test_witness_keys.nim b/stateless/test_witness_keys.nim index 801b9814d2..1a25237203 100644 --- a/stateless/test_witness_keys.nim +++ b/stateless/test_witness_keys.nim @@ -18,7 +18,7 @@ import type DB = CoreDbRef - StorageKeys = tuple[storageRoot: Hash256, keys: MultikeysRef] + StorageKeys = tuple[storageRoot: Hash256, keys: MultiKeysRef] AccountDef = object storageKeys: MultiKeysRef @@ -47,7 +47,7 @@ proc randCode(db: DB): Hash256 = proc randStorage(db: DB): StorageKeys = if rand(0..1) == 0: - result = (emptyRlpHash, MultikeysRef(nil)) + result = (emptyRlpHash, MultiKeysRef(nil)) else: var trie = initStorageTrie(db) let numPairs = rand(1..10) @@ -58,9 +58,9 @@ proc randStorage(db: DB): StorageKeys = trie.putSlotBytes(keys[i], rlp.encode(randU256())) if rand(0..1) == 0: - result = (trie.rootHash, MultikeysRef(nil)) + result = (trie.rootHash, MultiKeysRef(nil)) else: - var m = newMultikeys(keys) + var m = newMultiKeys(keys) result = (trie.rootHash, m) proc randAccount(db: DB): AccountDef = @@ -132,7 +132,7 @@ proc runTest(numPairs: int, testStatusIMPL: var TestStatus, for kd in mkeys.keys: check kd.visited == true -proc initMultiKeys(keys: openArray[string], storageMode: bool = false): MultikeysRef = +proc initMultiKeys(keys: openArray[string], storageMode: bool = false): MultiKeysRef = result.new if storageMode: for i, x in keys: diff --git a/stateless/test_witness_verification.nim b/stateless/test_witness_verification.nim index 536e819fe8..aa0b487915 100644 --- a/stateless/test_witness_verification.nim +++ b/stateless/test_witness_verification.nim @@ -25,7 +25,7 @@ proc getGenesisAlloc(filePath: string): GenesisAlloc = proc setupStateDB( genAccounts: GenesisAlloc, - stateDB: LedgerRef): (Hash256, MultikeysRef) = + stateDB: LedgerRef): (Hash256, MultiKeysRef) = var keys = newSeqOfCap[AccountKey](genAccounts.len) @@ -40,7 +40,7 @@ proc setupStateDB( stateDB.setCode(address, genAccount.code) stateDB.setBalance(address, genAccount.balance) - let sKeys = if storageKeys.len != 0: newMultiKeys(storageKeys) else: MultikeysRef(nil) + let sKeys = if storageKeys.len != 0: newMultiKeys(storageKeys) else: MultiKeysRef(nil) let codeTouched = genAccount.code.len > 0 keys.add(AccountKey(address: address, codeTouched: codeTouched, storageKeys: sKeys)) diff --git a/stateless/witness_from_tree.nim b/stateless/witness_from_tree.nim index e76dd4deba..07ae035e07 100644 --- a/stateless/witness_from_tree.nim +++ b/stateless/witness_from_tree.nim @@ -29,7 +29,7 @@ type StackElem = object node: seq[byte] parentGroup: Group - keys: MultikeysRef + keys: MultiKeysRef depth: int storageMode: bool @@ -319,7 +319,7 @@ proc getBranchRecurse(wb: var WitnessBuilder, z: var StackElem) = for i in nonEmpty(branchMask): let branch = nodeRlp.listElem(i) if branchMaskBitIsSet(path.mask, i): - # it is a match between multikeys and Branch Node elem + # it is a match between MultiKeysRef and Branch Node elem var zz = StackElem( node: branch.getNode, parentGroup: path.groups[i], @@ -345,7 +345,7 @@ proc getBranchRecurse(wb: var WitnessBuilder, z: var StackElem) = raise newException(CorruptedTrieDatabase, "HexaryTrie node with an unexpected number of children") -proc buildWitness*(wb: var WitnessBuilder, keys: MultikeysRef): seq[byte] +proc buildWitness*(wb: var WitnessBuilder, keys: MultiKeysRef): seq[byte] {.raises: [CatchableError].} = # witness version diff --git a/tests/test_aristo/test_backend.nim b/tests/test_aristo/test_backend.nim index 318a8b0e7a..e26b6dccb4 100644 --- a/tests/test_aristo/test_backend.nim +++ b/tests/test_aristo/test_backend.nim @@ -217,7 +217,7 @@ proc testBackendConsistency*( rdb.finish(flush=true) for n,w in list: - if w.root != rootKey or resetDB: + if w.root != rootKey or resetDb: rootKey = w.root count = 0 ndb = AristoDbRef.init() diff --git a/tests/test_aristo/test_filter.nim b/tests/test_aristo/test_filter.nim index f04fb7a5dc..8bd7562116 100644 --- a/tests/test_aristo/test_filter.nim +++ b/tests/test_aristo/test_filter.nim @@ -38,7 +38,7 @@ type proc fifosImpl[T](be: T): seq[seq[(QueueID,FilterRef)]] = var lastChn = -1 - for (qid,val) in be.walkFifoBE: + for (qid,val) in be.walkFifoBe: let chn = (qid.uint64 shr 62).int while lastChn < chn: lastChn.inc @@ -310,7 +310,7 @@ proc isEq(a, b: FilterRef; db: AristoDbRef; noisy = true): bool = for (vid,aKey) in a.kMap.pairs: if b.kMap.hasKey vid: let bKey = b.kMap.getOrVoid vid - if aKey != bkey: + if aKey != bKey: noisy.say "***", "not isEq:", " vid=", vid.pp, " aKey=", aKey.pp, diff --git a/tests/test_aristo/test_tx.nim b/tests/test_aristo/test_tx.nim index 7e4afac87b..93248b55ee 100644 --- a/tests/test_aristo/test_tx.nim +++ b/tests/test_aristo/test_tx.nim @@ -85,7 +85,7 @@ proc randomisedLeafs( db: AristoDbRef; td: var PrngDesc; ): seq[(LeafTie,VertexID)] = - result = db.lTab.pairs.toSeq.filterIt(it[1].isvalid).sorted( + result = db.lTab.pairs.toSeq.filterIt(it[1].isValid).sorted( cmp = proc(a,b: (LeafTie,VertexID)): int = cmp(a[0], b[0])) if 2 < result.len: for n in 0 ..< result.len-1: diff --git a/tests/test_blockchain_json.nim b/tests/test_blockchain_json.nim index 6e992afc38..3e785085b1 100644 --- a/tests/test_blockchain_json.nim +++ b/tests/test_blockchain_json.nim @@ -219,7 +219,7 @@ proc testGetBlockWitness(chain: ChainRef, parentHeader, currentHeader: BlockHead if witnessRoot != parentHeader.stateRoot: raise newException(ValidationError, "Expected witnessRoot == parentHeader.stateRoot") - # use the MultikeysRef to build the block proofs + # use the MultiKeysRef to build the block proofs let ac = newAccountStateDB(chain.com.db, currentHeader.stateRoot, chain.com.pruneTrie) blockProofs = getBlockProofs(state_db.ReadOnlyStateDB(ac), mkeys) diff --git a/tests/test_clique/pool.nim b/tests/test_clique/pool.nim index a7bb95cf1e..1c5a892409 100644 --- a/tests/test_clique/pool.nim +++ b/tests/test_clique/pool.nim @@ -268,7 +268,7 @@ proc resetChainDb(ap: TesterPool; extraData: Blob; debug = false) = networkId = ap.networkId, params = ap.boot) ap.chain = newChain(com) - com.initializeEmptyDB() + com.initializeEmptyDb() ap.noisy = debug proc initTesterPool(ap: TesterPool): TesterPool {.discardable.} = diff --git a/tests/test_configuration.nim b/tests/test_configuration.nim index fbb60dc070..08f32bece4 100644 --- a/tests/test_configuration.nim +++ b/tests/test_configuration.nim @@ -179,7 +179,7 @@ proc configurationMain*() = test "bootstrap-node and bootstrap-file": let conf = makeTestConfig() - let bootnodes = conf.getBootnodes() + let bootnodes = conf.getBootNodes() let bootNodeLen = bootnodes.len check bootNodeLen > 0 # mainnet bootnodes @@ -220,7 +220,7 @@ proc configurationMain*() = let conf = makeConfig(@["--custom-network:" & chainid1]) check conf.networkId == 1.NetworkId check conf.networkParams.config.londonBlock.get() == 1337 - check conf.getBootnodes().len == 0 + check conf.getBootNodes().len == 0 test "json-rpc enabled when json-engine api enabled and share same port": let conf = makeConfig(@["--engine-api", "--engine-api-port:8545", "--http-port:8545"]) diff --git a/tests/test_sync_snap.nim b/tests/test_sync_snap.nim index 78e073b16d..b8b033f4ce 100644 --- a/tests/test_sync_snap.nim +++ b/tests/test_sync_snap.nim @@ -400,7 +400,7 @@ proc snapRunner(noisy = true; specs: SnapSyncSpecs) {.used.} = pruneTrie = true, params = specs.network.networkParams).newChain) - dsc.chn.com.initializeEmptyDB() + dsc.chn.com.initializeEmptyDb() suite &"SyncSnap: verify \"{dsc.info}\" snapshot against full sync": diff --git a/tests/test_sync_snap/test_helpers.nim b/tests/test_sync_snap/test_helpers.nim index 9c08c588c9..340fde5f4d 100644 --- a/tests/test_sync_snap/test_helpers.nim +++ b/tests/test_sync_snap/test_helpers.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2022-2023 Status Research & Development GmbH +# Copyright (c) 2022-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -39,7 +39,7 @@ template isImportOk*(rc: Result[SnapAccountsGaps,HexaryError]): bool = proc lastTwo*(a: openArray[string]): seq[string] = if 1 < a.len: @[a[^2],a[^1]] else: a.toSeq -proc isOK*(rc: ValidationResult): bool = +proc isOk*(rc: ValidationResult): bool = rc == ValidationResult.OK # ------------------------------------------------------------------------------ diff --git a/tests/test_sync_snap/test_node_range.nim b/tests/test_sync_snap/test_node_range.nim index 01baf90f93..7fa55fb50d 100644 --- a/tests/test_sync_snap/test_node_range.nim +++ b/tests/test_sync_snap/test_node_range.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2022-2023 Status Research & Development GmbH +# Copyright (c) 2022-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -324,7 +324,7 @@ proc test_NodeRangeDecompose*( rootKey = root.to(NodeKey) baseTag = accKeys[0].to(NodeTag) + 1.u256 firstTag = baseTag.hexaryNearbyRight(rootKey, db).get( - otherwise = low(Nodetag)) + otherwise = low(NodeTag)) lastTag = accKeys[^2].to(NodeTag) topTag = accKeys[^1].to(NodeTag) - 1.u256 @@ -514,11 +514,11 @@ proc test_NodeRangeProof*( " proof=", proof.ppNodeKeys(dbg), "\n\n ", " base=", iv.minPt, - "\n ", iv.minPt.hexaryPath(rootKey,db).ppHexpath(dbg), + "\n ", iv.minPt.hexaryPath(rootKey,db).ppHexPath(dbg), "\n\n ", " pfx=", pfxNbls, " nPfx=", nPfxNblsLen, - "\n ", pfxNbls.hexaryPath(rootKey,db).ppHexpath(dbg), + "\n ", pfxNbls.hexaryPath(rootKey,db).ppHexPath(dbg), "\n" check rx == typeof(rx).ok() diff --git a/tests/test_txpool.nim b/tests/test_txpool.nim index e15a862c7b..f463f918d7 100644 --- a/tests/test_txpool.nim +++ b/tests/test_txpool.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2018-2023 Status Research & Development GmbH +# Copyright (c) 2018-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -173,12 +173,12 @@ proc runTxLoader(noisy = true; capture = loadSpecs) = check capture.numTxs == xp.nItems.total # Set txs to pseudo random status - check xp.verify.isOK + check xp.verify.isOk xp.setItemStatusFromInfo # Boundary conditions regarding nonces might be violated by running # setItemStatusFromInfo() => xp.txDB.verify() rather than xp.verify() - check xp.txDB.verify.isOK + check xp.txDB.verify.isOk check txList.len == 0 check xp.nItems.disposed == 0 @@ -230,12 +230,12 @@ proc runTxPoolTests(noisy = true) = # Set txs to pseudo random status xq.setItemStatusFromInfo - check xq.txDB.verify.isOK + check xq.txDB.verify.isOk elapNoisy.showElapsed("Forward delete-walk ID queue"): for item in xq.txDB.byItemID.nextValues: if not xq.addOrFlushGroupwise(groupLen, seen, item, veryNoisy): break - check xq.txDB.verify.isOK + check xq.txDB.verify.isOk check seen.len == xq.nItems.total check seen.len < groupLen @@ -248,12 +248,12 @@ proc runTxPoolTests(noisy = true) = # Set txs to pseudo random status xq.setItemStatusFromInfo - check xq.txDB.verify.isOK + check xq.txDB.verify.isOk elapNoisy.showElapsed("Revese delete-walk ID queue"): for item in xq.txDB.byItemID.nextValues: if not xq.addOrFlushGroupwise(groupLen, seen, item, veryNoisy): break - check xq.txDB.verify.isOK + check xq.txDB.verify.isOk check seen.len == xq.nItems.total check seen.len < groupLen @@ -367,7 +367,7 @@ proc runTxPoolTests(noisy = true) = evictedItems = (evictionMeter.value - evictedBase).int impliedItems = (impliedEvictionMeter.value - impliedBase).int - check xq.txDB.verify.isOK + check xq.txDB.verify.isOk check disposedItems + disposedBase + xq.nItems.total == txList.len check 0 < evictedItems check evictedItems <= disposedItems @@ -447,7 +447,7 @@ proc runTxPoolTests(noisy = true) = check xq.txDB.reassign(item, toBucket) if moveNumItems <= count: break collect - check xq.txDB.verify.isOK + check xq.txDB.verify.isOk case fromBucket of txItemPending: @@ -515,7 +515,7 @@ proc runTxPoolTests(noisy = true) = # verify that a new item was derived from the waste basket pivot item let wbItem = xq.getItem(thisItem.itemID).value check thisItem.info == wbItem.info - check thisItem.timestamp < wbItem.timestamp + check thisItem.timeStamp < wbItem.timeStamp proc runTxPackerTests(noisy = true) = @@ -548,7 +548,7 @@ proc runTxPackerTests(noisy = true) = var nextKey = ntBaseFee for _ in [1, 2, 3]: let rcNextKey = feesList.gt(nextKey.GasPriceEx) - check rcNextKey.isOK + check rcNextKey.isOk nextKey = rcNextKey.value.key.uint64.GasPrice ntNextFee = nextKey + keyStep.GasPrice @@ -595,7 +595,7 @@ proc runTxPackerTests(noisy = true) = xq.triggerReorg # now, xq should look like xr - check xq.verify.isOK + check xq.verify.isOk check xq.nItems == xr.nItems block: @@ -622,7 +622,7 @@ proc runTxPackerTests(noisy = true) = # employ packer # xq.jobCommit(forceMaintenance = true) check xq.packerVmExec.isOk - check xq.verify.isOK + check xq.verify.isOk # verify that the test did not degenerate check 0 < xq.gasTotals.packed @@ -648,7 +648,7 @@ proc runTxPackerTests(noisy = true) = # re-pack bucket #xq.jobCommit(forceMaintenance = true) check xq.packerVmExec.isOk - check xq.verify.isOK + check xq.verify.isOk let items1 = xq.toItems(txItemPacked) @@ -668,7 +668,7 @@ proc runTxPackerTests(noisy = true) = # delete last item from packed bucket xq.disposeItems(lastItem) - check xq.verify.isOK + check xq.verify.isOk # set new minimum target price xq.minPreLondonGasPrice = lowerPrice @@ -678,7 +678,7 @@ proc runTxPackerTests(noisy = true) = # not necessarily a buckets re-org resulting in a change #xq.jobCommit(forceMaintenance = true) check xq.packerVmExec.isOk - check xq.verify.isOK + check xq.verify.isOk let items = xq.toItems(txItemPacked) @@ -835,13 +835,13 @@ proc runTxPackerTests(noisy = true) = # Test low-level function for adding the new block to the database #xq.chain.maxMode = (packItemsMaxGasLimit in xq.flags) xq.chain.clearAccounts - check xq.chain.vmState.processBlock(hdr, bdy).isOK + check xq.chain.vmState.processBlock(hdr, bdy).isOk setErrorLevel() # Re-allocate using VM environment from `persistBlocks()` let vmstate2 = BaseVMState.new(hdr, bcCom) - check vmstate2.processBlock(hdr, bdy).isOK + check vmstate2.processBlock(hdr, bdy).isOk # This should not have changed check canonicalHead == xq.chain.com.db.getCanonicalHead @@ -850,7 +850,7 @@ proc runTxPackerTests(noisy = true) = # turning off header verification. let c = bcCom.newChain(extraValidation = false) - check c.persistBlocks(@[hdr], @[bdy]).isOK + check c.persistBlocks(@[hdr], @[bdy]).isOk if bcCom.consensus == ConsensusType.POS: # PoS consensus will force the new blockheader as head diff --git a/tests/test_txpool/helpers.nim b/tests/test_txpool/helpers.nim index 4b90c45cad..8903c52e8f 100644 --- a/tests/test_txpool/helpers.nim +++ b/tests/test_txpool/helpers.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2022-2023 Status Research & Development GmbH +# Copyright (c) 2022-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -180,7 +180,7 @@ proc pp*(w: TxChainGasLimits): string = # Public functions, other # ------------------------------------------------------------------------------ -proc isOK*(rc: ValidationResult): bool = +proc isOk*(rc: ValidationResult): bool = rc == ValidationResult.OK proc toHex*(acc: EthAddress): string = diff --git a/tests/test_txpool/setup.nim b/tests/test_txpool/setup.nim index ab9439c66f..2c01597919 100644 --- a/tests/test_txpool/setup.nim +++ b/tests/test_txpool/setup.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2022-2023 Status Research & Development GmbH +# Copyright (c) 2022-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -47,7 +47,7 @@ proc blockChainForTesting*(network: NetworkID): CommonRef = networkId = network, params = network.networkParams) - result.initializeEmptyDB + result.initializeEmptyDb proc toTxPool*( com: CommonRef; ## to be modified @@ -187,7 +187,7 @@ proc toTxPool*( let itemID = item.itemID doAssert result.nItems.disposed == 0 timeGap = result.getItem(itemID).value.timeStamp + middleOfTimeGap - let t = timegap.format(tFmt, utc()) + let t = timeGap.format(tFmt, utc()) noisy.say &"{delayMSecs}ms time gap centered around {t}" delayMSecs.sleep diff --git a/tests/test_txpool2.nim b/tests/test_txpool2.nim index 3fea20d667..e2f1dd0086 100644 --- a/tests/test_txpool2.nim +++ b/tests/test_txpool2.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2022-2023 Status Research & Development GmbH +# Copyright (c) 2022-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -188,10 +188,10 @@ proc runTxPoolCliqueTest*() = test "Store generated block in block chain database": xp.chain.clearAccounts - check xp.chain.vmState.processBlock(blk.header, body).isOK + check xp.chain.vmState.processBlock(blk.header, body).isOk let vmstate2 = BaseVMState.new(blk.header, com) - check vmstate2.processBlock(blk.header, body).isOK + check vmstate2.processBlock(blk.header, body).isOk test "Clique persistBlocks": let rr = chain.persistBlocks([blk.header], [body]) @@ -231,7 +231,7 @@ proc runTxPoolCliqueTest*() = os.sleep(com.cliquePeriod.int * 1000) xp.chain.clearAccounts - check xp.chain.vmState.processBlock(blk.header, body).isOK + check xp.chain.vmState.processBlock(blk.header, body).isOk let rr = chain.persistBlocks([blk.header], [body]) check rr == ValidationResult.OK