diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ea9b7c7597b..25adee46c3c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,7 +40,7 @@ Ref: https://keepachangelog.com/en/1.0.0/ ### Improvements - +* [#13881](https://github.com/cosmos/cosmos-sdk/pull/13881) Optimize iteration on nested cached KV stores and other operations in general. * (store) [#11646](https://github.com/cosmos/cosmos-sdk/pull/11646) Add store name in tracekv-emitted store traces * (deps) Bump Tendermint version to [v0.34.24](https://github.com/tendermint/tendermint/releases/tag/v0.34.24). @@ -57,6 +57,7 @@ Ref: https://keepachangelog.com/en/1.0.0/ ### Bug Fixes * (store) [#13516](https://github.com/cosmos/cosmos-sdk/pull/13516) Fix state listener that was observing writes at wrong time. +* (store) [#12945](https://github.com/cosmos/cosmos-sdk/pull/12945) Fix nil end semantics in store/cachekv/iterator when iterating a dirty cache. ## v0.45.11 - 2022-11-09 diff --git a/go.mod b/go.mod index 579ff92a6c66..0da3e1efe86a 100644 --- a/go.mod +++ b/go.mod @@ -1,7 +1,7 @@ -go 1.18 - module github.com/cosmos/cosmos-sdk +go 1.18 + require ( github.com/99designs/keyring v1.1.6 github.com/armon/go-metrics v0.3.10 @@ -44,6 +44,7 @@ require ( github.com/tendermint/go-amino v0.16.0 github.com/tendermint/tendermint v0.34.24 github.com/tendermint/tm-db v0.6.6 + github.com/tidwall/btree v1.5.0 golang.org/x/crypto v0.1.0 golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a diff --git a/go.sum b/go.sum index f5b86e013c52..4568a7279f7a 100644 --- a/go.sum +++ b/go.sum @@ -750,6 +750,8 @@ github.com/tendermint/tendermint v0.34.24 h1:879MKKJWYYPJEMMKME+DWUTY4V9f/FBpnZD github.com/tendermint/tendermint v0.34.24/go.mod h1:rXVrl4OYzmIa1I91av3iLv2HS0fGSiucyW9J4aMTpKI= github.com/tendermint/tm-db v0.6.6 h1:EzhaOfR0bdKyATqcd5PNeyeq8r+V4bRPHBfyFdD9kGM= github.com/tendermint/tm-db v0.6.6/go.mod h1:wP8d49A85B7/erz/r4YbKssKw6ylsO/hKtFk7E1aWZI= +github.com/tidwall/btree v1.5.0 h1:iV0yVY/frd7r6qGBXfEYs7DH0gTDgrKTrDjS7xt/IyQ= +github.com/tidwall/btree v1.5.0/go.mod h1:LGm8L/DZjPLmeWGjv5kFrY8dL4uVhMmzmmLYmsObdKE= github.com/tidwall/gjson v1.6.7/go.mod h1:zeFuBCIqD4sN/gmqBzZ4j7Jd6UcA2Fc56x7QFsv+8fI= github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= diff --git a/store/cachekv/benchmark_test.go b/store/cachekv/benchmark_test.go index 59a9e92c7db6..2db62ba5d6c6 100644 --- a/store/cachekv/benchmark_test.go +++ b/store/cachekv/benchmark_test.go @@ -4,133 +4,158 @@ import ( fmt "fmt" "testing" - "github.com/stretchr/testify/require" - - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/types" + "github.com/cosmos/cosmos-sdk/store" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/tendermint/tendermint/libs/log" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + dbm "github.com/tendermint/tm-db" ) -func DoBenchmarkDeepCacheStack(b *testing.B, depth int) { - b.Helper() - db := coretesting.NewMemDB() - initialStore := cachekv.NewStore(dbadapter.Store{DB: db}) +func DoBenchmarkDeepContextStack(b *testing.B, depth int) { + begin := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + end := []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + key := storetypes.NewKVStoreKey("test") - nItems := 20 - for i := 0; i < nItems; i++ { - initialStore.Set([]byte(fmt.Sprintf("hello%03d", i)), []byte{0}) - } + db := dbm.NewMemDB() + cms := store.NewCommitMultiStore(db) + cms.MountStoreWithDB(key, storetypes.StoreTypeIAVL, db) + cms.LoadLatestVersion() + ctx := sdk.NewContext(cms, tmproto.Header{}, false, log.NewNopLogger()) - var stack CacheStack - stack.Reset(initialStore) + var stack ContextStack + stack.Reset(ctx) for i := 0; i < depth; i++ { stack.Snapshot() - store := stack.CurrentStore() - store.Set([]byte(fmt.Sprintf("hello%03d", i)), []byte{byte(i)}) + store := stack.CurrentContext().KVStore(key) + store.Set(begin, []byte("value")) } - store := stack.CurrentStore() + store := stack.CurrentContext().KVStore(key) b.ResetTimer() for i := 0; i < b.N; i++ { - it := store.Iterator(nil, nil) - items := make([][]byte, 0, nItems) - for ; it.Valid(); it.Next() { - items = append(items, it.Key()) - it.Value() - } + it := store.Iterator(begin, end) + it.Valid() + it.Key() + it.Value() + it.Next() it.Close() - require.Equal(b, nItems, len(items)) } } -func BenchmarkDeepCacheStack1(b *testing.B) { - DoBenchmarkDeepCacheStack(b, 1) +func BenchmarkDeepContextStack1(b *testing.B) { + DoBenchmarkDeepContextStack(b, 1) } -func BenchmarkDeepCacheStack3(b *testing.B) { - DoBenchmarkDeepCacheStack(b, 3) +func BenchmarkDeepContextStack3(b *testing.B) { + DoBenchmarkDeepContextStack(b, 3) +} +func BenchmarkDeepContextStack10(b *testing.B) { + DoBenchmarkDeepContextStack(b, 10) } -func BenchmarkDeepCacheStack10(b *testing.B) { - DoBenchmarkDeepCacheStack(b, 10) +func BenchmarkDeepContextStack13(b *testing.B) { + DoBenchmarkDeepContextStack(b, 13) } -func BenchmarkDeepCacheStack13(b *testing.B) { - DoBenchmarkDeepCacheStack(b, 13) +// cachedContext is a pair of cache context and its corresponding commit method. +// They are obtained from the return value of `context.CacheContext()`. +type cachedContext struct { + ctx sdk.Context + commit func() } -// CacheStack manages a stack of nested cache store to -// support the evm `StateDB`'s `Snapshot` and `RevertToSnapshot` methods. -type CacheStack struct { - initialStore types.CacheKVStore +// ContextStack manages the initial context and a stack of cached contexts, +// to support the `StateDB.Snapshot` and `StateDB.RevertToSnapshot` methods. +// +// Copied from an old version of ethermint +type ContextStack struct { // Context of the initial state before transaction execution. // It's the context used by `StateDB.CommitedState`. - cacheStores []types.CacheKVStore + initialCtx sdk.Context + cachedContexts []cachedContext } -// CurrentStore returns the top context of cached stack, +// CurrentContext returns the top context of cached stack, // if the stack is empty, returns the initial context. -func (cs *CacheStack) CurrentStore() types.CacheKVStore { - l := len(cs.cacheStores) +func (cs *ContextStack) CurrentContext() sdk.Context { + l := len(cs.cachedContexts) if l == 0 { - return cs.initialStore + return cs.initialCtx } - return cs.cacheStores[l-1] + return cs.cachedContexts[l-1].ctx } // Reset sets the initial context and clear the cache context stack. -func (cs *CacheStack) Reset(initialStore types.CacheKVStore) { - cs.initialStore = initialStore - cs.cacheStores = nil +func (cs *ContextStack) Reset(ctx sdk.Context) { + cs.initialCtx = ctx + if len(cs.cachedContexts) > 0 { + cs.cachedContexts = []cachedContext{} + } } // IsEmpty returns true if the cache context stack is empty. -func (cs *CacheStack) IsEmpty() bool { - return len(cs.cacheStores) == 0 +func (cs *ContextStack) IsEmpty() bool { + return len(cs.cachedContexts) == 0 } // Commit commits all the cached contexts from top to bottom in order and clears the stack by setting an empty slice of cache contexts. -func (cs *CacheStack) Commit() { +func (cs *ContextStack) Commit() { // commit in order from top to bottom - for i := len(cs.cacheStores) - 1; i >= 0; i-- { - cs.cacheStores[i].Write() + for i := len(cs.cachedContexts) - 1; i >= 0; i-- { + if cs.cachedContexts[i].commit == nil { + panic(fmt.Sprintf("commit function at index %d should not be nil", i)) + } else { + cs.cachedContexts[i].commit() + } } - cs.cacheStores = nil + cs.cachedContexts = []cachedContext{} } // CommitToRevision commit the cache after the target revision, // to improve efficiency of db operations. -func (cs *CacheStack) CommitToRevision(target int) error { - if target < 0 || target >= len(cs.cacheStores) { - return fmt.Errorf("snapshot index %d out of bound [%d..%d)", target, 0, len(cs.cacheStores)) +func (cs *ContextStack) CommitToRevision(target int) error { + if target < 0 || target >= len(cs.cachedContexts) { + return fmt.Errorf("snapshot index %d out of bound [%d..%d)", target, 0, len(cs.cachedContexts)) } // commit in order from top to bottom - for i := len(cs.cacheStores) - 1; i > target; i-- { - cs.cacheStores[i].Write() + for i := len(cs.cachedContexts) - 1; i > target; i-- { + if cs.cachedContexts[i].commit == nil { + return fmt.Errorf("commit function at index %d should not be nil", i) + } + cs.cachedContexts[i].commit() } - cs.cacheStores = cs.cacheStores[0 : target+1] + cs.cachedContexts = cs.cachedContexts[0 : target+1] return nil } // Snapshot pushes a new cached context to the stack, // and returns the index of it. -func (cs *CacheStack) Snapshot() int { - cs.cacheStores = append(cs.cacheStores, cachekv.NewStore(cs.CurrentStore())) - return len(cs.cacheStores) - 1 +func (cs *ContextStack) Snapshot() int { + i := len(cs.cachedContexts) + ctx, commit := cs.CurrentContext().CacheContext() + cs.cachedContexts = append(cs.cachedContexts, cachedContext{ctx: ctx, commit: commit}) + return i } // RevertToSnapshot pops all the cached contexts after the target index (inclusive). // the target should be snapshot index returned by `Snapshot`. // This function panics if the index is out of bounds. -func (cs *CacheStack) RevertToSnapshot(target int) { - if target < 0 || target >= len(cs.cacheStores) { - panic(fmt.Errorf("snapshot index %d out of bound [%d..%d)", target, 0, len(cs.cacheStores))) +func (cs *ContextStack) RevertToSnapshot(target int) { + if target < 0 || target >= len(cs.cachedContexts) { + panic(fmt.Errorf("snapshot index %d out of bound [%d..%d)", target, 0, len(cs.cachedContexts))) + } + cs.cachedContexts = cs.cachedContexts[:target] +} + +// RevertAll discards all the cache contexts. +func (cs *ContextStack) RevertAll() { + if len(cs.cachedContexts) > 0 { + cs.RevertToSnapshot(0) } - cs.cacheStores = cs.cacheStores[:target] } diff --git a/store/cachekv/internal/btree.go b/store/cachekv/internal/btree.go index 209f7e58c4dd..142f754bbd38 100644 --- a/store/cachekv/internal/btree.go +++ b/store/cachekv/internal/btree.go @@ -5,8 +5,6 @@ import ( "errors" "github.com/tidwall/btree" - - "cosmossdk.io/store/types" ) const ( @@ -23,24 +21,23 @@ var errKeyEmpty = errors.New("key cannot be empty") // // We choose tidwall/btree over google/btree here because it provides API to implement step iterator directly. type BTree struct { - tree *btree.BTreeG[item] + tree btree.BTreeG[item] } // NewBTree creates a wrapper around `btree.BTreeG`. -func NewBTree() BTree { - return BTree{ - tree: btree.NewBTreeGOptions(byKeys, btree.Options{ - Degree: bTreeDegree, - NoLocks: false, - }), - } +func NewBTree() *BTree { + return &BTree{tree: *btree.NewBTreeGOptions(byKeys, btree.Options{ + Degree: bTreeDegree, + // Contract: cachekv store must not be called concurrently + NoLocks: true, + })} } -func (bt BTree) Set(key, value []byte) { +func (bt *BTree) Set(key, value []byte) { bt.tree.Set(newItem(key, value)) } -func (bt BTree) Get(key []byte) []byte { +func (bt *BTree) Get(key []byte) []byte { i, found := bt.tree.Get(newItem(key, nil)) if !found { return nil @@ -48,30 +45,22 @@ func (bt BTree) Get(key []byte) []byte { return i.value } -func (bt BTree) Delete(key []byte) { +func (bt *BTree) Delete(key []byte) { bt.tree.Delete(newItem(key, nil)) } -func (bt BTree) Iterator(start, end []byte) (types.Iterator, error) { +func (bt *BTree) Iterator(start, end []byte) (*memIterator, error) { if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { return nil, errKeyEmpty } - return newMemIterator(start, end, bt, true), nil + return NewMemIterator(start, end, bt, make(map[string]struct{}), true), nil } -func (bt BTree) ReverseIterator(start, end []byte) (types.Iterator, error) { +func (bt *BTree) ReverseIterator(start, end []byte) (*memIterator, error) { if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { return nil, errKeyEmpty } - return newMemIterator(start, end, bt, false), nil -} - -// Copy the tree. This is a copy-on-write operation and is very fast because -// it only performs a shadowed copy. -func (bt BTree) Copy() BTree { - return BTree{ - tree: bt.tree.Copy(), - } + return NewMemIterator(start, end, bt, make(map[string]struct{}), false), nil } // item is a btree item with byte slices as keys and values diff --git a/store/cachekv/internal/btree_test.go b/store/cachekv/internal/btree_test.go index 06437997f636..f85a8bbaf109 100644 --- a/store/cachekv/internal/btree_test.go +++ b/store/cachekv/internal/btree_test.go @@ -3,9 +3,8 @@ package internal import ( "testing" + sdk "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/require" - - "cosmossdk.io/store/types" ) func TestGetSetDelete(t *testing.T) { @@ -182,8 +181,7 @@ func TestDBIterator(t *testing.T) { verifyIterator(t, ritr, nil, "reverse iterator with empty db") } -func verifyIterator(t *testing.T, itr types.Iterator, expected []int64, msg string) { - t.Helper() +func verifyIterator(t *testing.T, itr *memIterator, expected []int64, msg string) { i := 0 for itr.Valid() { key := itr.Key() @@ -196,9 +194,9 @@ func verifyIterator(t *testing.T, itr types.Iterator, expected []int64, msg stri } func int642Bytes(i int64) []byte { - return types.Uint64ToBigEndian(uint64(i)) + return sdk.Uint64ToBigEndian(uint64(i)) } func bytes2Int64(buf []byte) int64 { - return int64(types.BigEndianToUint64(buf)) + return int64(sdk.BigEndianToUint64(buf)) } diff --git a/store/cachekv/internal/memiterator.go b/store/cachekv/internal/memiterator.go index 9dbba7587071..2bceb8bc77df 100644 --- a/store/cachekv/internal/memiterator.go +++ b/store/cachekv/internal/memiterator.go @@ -4,26 +4,27 @@ import ( "bytes" "errors" + "github.com/cosmos/cosmos-sdk/store/types" "github.com/tidwall/btree" - - "cosmossdk.io/store/types" ) var _ types.Iterator = (*memIterator)(nil) // memIterator iterates over iterKVCache items. -// if value is nil, means it was deleted. +// if key is nil, means it was deleted. // Implements Iterator. type memIterator struct { - iter btree.IterG[item] + iter btree.GenericIter[item] start []byte end []byte ascending bool + lastKey []byte + deleted map[string]struct{} valid bool } -func newMemIterator(start, end []byte, items BTree, ascending bool) *memIterator { +func NewMemIterator(start, end []byte, items *BTree, deleted map[string]struct{}, ascending bool) *memIterator { iter := items.tree.Iter() var valid bool if ascending { @@ -51,6 +52,8 @@ func newMemIterator(start, end []byte, items BTree, ascending bool) *memIterator start: start, end: end, ascending: ascending, + lastKey: nil, + deleted: deleted, valid: valid, } @@ -61,7 +64,7 @@ func newMemIterator(start, end []byte, items BTree, ascending bool) *memIterator return mi } -func (mi *memIterator) Domain() (start, end []byte) { +func (mi *memIterator) Domain() (start []byte, end []byte) { return mi.start, mi.end } @@ -110,7 +113,21 @@ func (mi *memIterator) Key() []byte { } func (mi *memIterator) Value() []byte { - return mi.iter.Item().value + item := mi.iter.Item() + key := item.key + // We need to handle the case where deleted is modified and includes our current key + // We handle this by maintaining a lastKey object in the iterator. + // If the current key is the same as the last key (and last key is not nil / the start) + // then we are calling value on the same thing as last time. + // Therefore we don't check the mi.deleted to see if this key is included in there. + if _, ok := mi.deleted[string(key)]; ok { + if mi.lastKey == nil || !bytes.Equal(key, mi.lastKey) { + // not re-calling on old last key + return nil + } + } + mi.lastKey = key + return item.value } func (mi *memIterator) assertValid() { diff --git a/store/cachekv/internal/mergeiterator.go b/store/cachekv/internal/mergeiterator.go index 5ca05da5aa02..0d6c37ebacf9 100644 --- a/store/cachekv/internal/mergeiterator.go +++ b/store/cachekv/internal/mergeiterator.go @@ -24,7 +24,7 @@ type cacheMergeIterator struct { var _ types.Iterator = (*cacheMergeIterator)(nil) -func NewCacheMergeIterator(parent, cache types.Iterator, ascending bool) types.Iterator { +func NewCacheMergeIterator(parent, cache types.Iterator, ascending bool) *cacheMergeIterator { iter := &cacheMergeIterator{ parent: parent, cache: cache, diff --git a/store/cachekv/search_benchmark_test.go b/store/cachekv/search_benchmark_test.go index 921bff4e3864..4007c7cda202 100644 --- a/store/cachekv/search_benchmark_test.go +++ b/store/cachekv/search_benchmark_test.go @@ -4,7 +4,7 @@ import ( "strconv" "testing" - db "github.com/tendermint/tm-db" + "github.com/cosmos/cosmos-sdk/store/cachekv/internal" ) func BenchmarkLargeUnsortedMisses(b *testing.B) { @@ -39,6 +39,6 @@ func generateStore() *Store { return &Store{ cache: cache, unsortedCache: unsorted, - sortedCache: db.NewMemDB(), + sortedCache: internal.NewBTree(), } } diff --git a/store/cachekv/store.go b/store/cachekv/store.go index 69f0baba6e32..471271cfde4f 100644 --- a/store/cachekv/store.go +++ b/store/cachekv/store.go @@ -9,6 +9,7 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/internal/conv" + "github.com/cosmos/cosmos-sdk/store/cachekv/internal" "github.com/cosmos/cosmos-sdk/store/tracekv" "github.com/cosmos/cosmos-sdk/store/types" "github.com/cosmos/cosmos-sdk/telemetry" @@ -29,7 +30,7 @@ type Store struct { cache map[string]*cValue deleted map[string]struct{} unsortedCache map[string]struct{} - sortedCache *dbm.MemDB // always ascending sorted + sortedCache *internal.BTree // always ascending sorted parent types.KVStore } @@ -41,7 +42,7 @@ func NewStore(parent types.KVStore) *Store { cache: make(map[string]*cValue), deleted: make(map[string]struct{}), unsortedCache: make(map[string]struct{}), - sortedCache: dbm.NewMemDB(), + sortedCache: internal.NewBTree(), parent: parent, } } @@ -135,6 +136,11 @@ func (store *Store) Write() { val *cValue } + if len(store.cache) == 0 && len(store.deleted) == 0 && len(store.unsortedCache) == 0 { + store.sortedCache = internal.NewBTree() + return + } + // We need a copy of all of the keys. // Not the best. To reduce RAM pressure, we copy the values as well // and clear out the old caches right after the copy. @@ -181,7 +187,7 @@ func (store *Store) Write() { for key := range store.unsortedCache { delete(store.unsortedCache, key) } - store.sortedCache = dbm.NewMemDB() + store.sortedCache = internal.NewBTree() } // CacheWrap implements CacheWrapper. @@ -231,40 +237,9 @@ func (store *Store) iterator(start, end []byte, ascending bool) types.Iterator { } store.dirtyItems(start, end) - cache = newMemIterator(start, end, store.sortedCache, store.deleted, ascending) + cache = internal.NewMemIterator(start, end, store.sortedCache, store.deleted, ascending) - // Modified binary search to find the very first element > 1 - midStr := strL[mid] - if midStr == endQ { - // Handle condition where there might be multiple values equal to startQ. - // We are looking for the very first value < midStL, that i+1 will be the first - // element >= midStr. - for i := mid - 1; i >= 0; i-- { - if strL[i] < midStr { - return i + 1 - } - } - return 0 - } - if midStr < endQ { - left = mid + 1 - } else { // midStrL > startQ - right = mid - 1 - } - } - - // Binary search failed, now let's find a value less than endQ. - for i := right; i >= 0; i-- { - if strL[i] < endQ { - return i - } - } - - return -1 + return internal.NewCacheMergeIterator(parent, cache, ascending) } func findStartIndex(strL []string, startQ string) int { @@ -352,7 +327,7 @@ const minSortSize = 1024 // Constructs a slice of dirty items, to use w/ memIterator. func (store *Store) dirtyItems(start, end []byte) { startStr, endStr := conv.UnsafeBytesToStr(start), conv.UnsafeBytesToStr(end) - if startStr > endStr { + if end != nil && startStr > endStr { // Nothing to do here. return } @@ -367,6 +342,7 @@ func (store *Store) dirtyItems(start, end []byte) { // than just not having the cache. if n < minSortSize { for key := range store.unsortedCache { + // dbm.IsKeyInDomain is nil safe and returns true iff key is greater than start if dbm.IsKeyInDomain(conv.UnsafeStrToBytes(key), start, end) { cacheValue := store.cache[key] unsorted = append(unsorted, &kv.Pair{Key: []byte(key), Value: cacheValue.value}) @@ -384,7 +360,7 @@ func (store *Store) dirtyItems(start, end []byte) { } sort.Strings(strL) - startIndex, endIndex := findStartEndIndex(strL, startStr, endStr) + startIndex, endIndex := findStartEndIndex(strL, startStr, endStr, end) // Since we spent cycles to sort the values, we should process and remove a reasonable amount // ensure start to end is at least minSortSize in size @@ -408,18 +384,24 @@ func (store *Store) dirtyItems(start, end []byte) { store.clearUnsortedCacheSubset(kvL, stateAlreadySorted) } -func findStartEndIndex(strL []string, startStr, endStr string) (int, int) { +func findStartEndIndex(strL []string, startStr, endStr string, end []byte) (int, int) { // Now find the values within the domain // [start, end) startIndex := findStartIndex(strL, startStr) - endIndex := findEndIndex(strL, endStr) + if startIndex < 0 { + startIndex = 0 + } - if endIndex < 0 { + var endIndex int + if end == nil { endIndex = len(strL) - 1 + } else { + endIndex = findEndIndex(strL, endStr) } - if startIndex < 0 { - startIndex = 0 + if endIndex < 0 { + endIndex = len(strL) - 1 } + return startIndex, endIndex } @@ -436,14 +418,11 @@ func (store *Store) clearUnsortedCacheSubset(unsorted []*kv.Pair, sortState sort if item.Value == nil { // deleted element, tracked by store.deleted // setting arbitrary value - // TODO: Don't ignore this error. store.sortedCache.Set(item.Key, []byte{}) continue } - err := store.sortedCache.Set(item.Key, item.Value) - if err != nil { - panic(err) - } + + store.sortedCache.Set(item.Key, item.Value) } } diff --git a/store/cachekv/store_test.go b/store/cachekv/store_test.go index 2885e7125476..7b80a8ca8842 100644 --- a/store/cachekv/store_test.go +++ b/store/cachekv/store_test.go @@ -434,7 +434,7 @@ func TestNilEndIterator(t *testing.T) { // TestIteratorDeadlock demonstrate the deadlock issue in cache store. func TestIteratorDeadlock(t *testing.T) { - mem := dbadapter.Store{DB: coretesting.NewMemDB()} + mem := dbadapter.Store{DB: dbm.NewMemDB()} store := cachekv.NewStore(mem) // the channel buffer is 64 and received once, so put at least 66 elements. for i := 0; i < 66; i++ {