Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MKVS fixes and improvements #2691

Merged
merged 5 commits into from
Feb 19, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .changelog/2691.bugfix.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
storage/mkvs: Fix iterator bug
1 change: 1 addition & 0 deletions .changelog/2691.internal.1.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
go/storage/mkvs: Make Tree an interface
1 change: 1 addition & 0 deletions .changelog/2691.internal.2.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
go/storage/mkvs: Add overlay tree to support rolling back state
2 changes: 1 addition & 1 deletion go/oasis-node/cmd/debug/byzantine/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ type computeBatchContext struct {

ioTree *transaction.Tree
txs []*transaction.Transaction
stateTree *urkel.Tree
stateTree urkel.Tree

stateWriteLog writelog.WriteLog
newStateRoot hash.Hash
Expand Down
50 changes: 28 additions & 22 deletions go/oasis-test-runner/scenario/e2e/storage_sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,38 +114,44 @@ func (sc *storageSyncImpl) Run(childEnv *env.Env) error {
"round", lastCheckpoint,
)

blk, err = ctrl.RuntimeClient.GetBlock(ctx, &runtimeClient.GetBlockRequest{
RuntimeID: runtimeID,
Round: lastCheckpoint,
})
if err != nil {
return fmt.Errorf("failed to get block %d: %w", lastCheckpoint, err)
}

// There should be at least two checkpoints. There may be more depending on the state of garbage
// collection process (which may be one checkpoint behind.)
if len(cps) < 2 {
return fmt.Errorf("incorrect number of checkpoints (expected: >=2 got: %d)", len(cps))
}

var validCps int
for _, cp := range cps {
if cp.Root.Round != blk.Header.Round {
continue
for checkpoint := rt.Storage.CheckpointInterval; checkpoint <= lastCheckpoint; checkpoint += rt.Storage.CheckpointInterval {
blk, err = ctrl.RuntimeClient.GetBlock(ctx, &runtimeClient.GetBlockRequest{
RuntimeID: runtimeID,
Round: checkpoint,
})
if err != nil {
return fmt.Errorf("failed to get block %d: %w", checkpoint, err)
}
var found bool
for _, root := range blk.Header.StorageRoots() {
if root.Equal(&cp.Root) {
found = true
break
for _, cp := range cps {
if cp.Root.Round != blk.Header.Round {
continue
}
var found bool
for _, root := range blk.Header.StorageRoots() {
if root.Equal(&cp.Root) {
found = true
break
}
}
if !found {
return fmt.Errorf("checkpoint for unexpected root %s", cp.Root)
}
sc.logger.Info("found valid checkpoint",
"round", checkpoint,
"root_hash", cp.Root.Hash,
)
validCps++
}
if !found {
return fmt.Errorf("checkpoint for unexpected root %s", cp.Root)
}
validCps++
}
if validCps != 2 {
return fmt.Errorf("incorrect number of valid checkpoints (expected: 2 got: %d)", validCps)
if validCps < 2 {
return fmt.Errorf("incorrect number of valid checkpoints (expected: >=2 got: %d)", validCps)
}

return nil
Expand Down
2 changes: 1 addition & 1 deletion go/runtime/transaction/transaction.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ func (t Transaction) asOutputArtifacts() outputArtifacts {
// Tree is a Merkle tree containing transaction artifacts.
type Tree struct {
ioRoot node.Root
tree *urkel.Tree
tree urkel.Tree
}

// NewTree creates a new transaction artifacts tree.
Expand Down
2 changes: 1 addition & 1 deletion go/storage/api/root_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ type RootCache struct {

// GetTree gets a tree entry from the cache by the root iff present, or creates
// a new tree with the specified root in the node database.
func (rc *RootCache) GetTree(ctx context.Context, root Root) (*urkel.Tree, error) {
func (rc *RootCache) GetTree(ctx context.Context, root Root) (urkel.Tree, error) {
return urkel.NewWithRoot(rc.remoteSyncer, rc.localDB, root, rc.persistEverything), nil
}

Expand Down
2 changes: 1 addition & 1 deletion go/storage/mkvs/urkel/checkpoint/chunk.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ import (

func createChunk(
ctx context.Context,
tree *urkel.Tree,
tree urkel.Tree,
root node.Root,
offset node.Key,
chunkSize uint64,
Expand Down
16 changes: 5 additions & 11 deletions go/storage/mkvs/urkel/commit.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,8 @@ import (
"github.com/oasislabs/oasis-core/go/storage/mkvs/urkel/writelog"
)

// CommitKnown checks that the computed root matches a known root and
// if so, commits tree updates to the underlying database and returns
// the write log.
//
// In case the computed root doesn't match the known root, the update
// is NOT committed and ErrKnownRootMismatch is returned.
func (t *Tree) CommitKnown(ctx context.Context, root node.Root) (writelog.WriteLog, error) {
// Implements Tree.
func (t *tree) CommitKnown(ctx context.Context, root node.Root) (writelog.WriteLog, error) {
writeLog, _, err := t.commitWithHooks(ctx, root.Namespace, root.Round, func(rootHash hash.Hash) error {
if !rootHash.Equal(&root.Hash) {
return ErrKnownRootMismatch
Expand All @@ -27,13 +22,12 @@ func (t *Tree) CommitKnown(ctx context.Context, root node.Root) (writelog.WriteL
return writeLog, err
}

// Commit commits tree updates to the underlying database and returns
// the write log and new merkle root.
func (t *Tree) Commit(ctx context.Context, namespace common.Namespace, round uint64) (writelog.WriteLog, hash.Hash, error) {
// Implements Tree.
func (t *tree) Commit(ctx context.Context, namespace common.Namespace, round uint64) (writelog.WriteLog, hash.Hash, error) {
return t.commitWithHooks(ctx, namespace, round, nil)
}

func (t *Tree) commitWithHooks(
func (t *tree) commitWithHooks(
ctx context.Context,
namespace common.Namespace,
round uint64,
Expand Down
6 changes: 3 additions & 3 deletions go/storage/mkvs/urkel/debug.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,12 @@ import (
"github.com/oasislabs/oasis-core/go/storage/mkvs/urkel/node"
)

// DumpLocal dumps the tree in the local memory into the given writer.
func (t *Tree) DumpLocal(ctx context.Context, w io.Writer, maxDepth node.Depth) {
// Implements Tree.
func (t *tree) DumpLocal(ctx context.Context, w io.Writer, maxDepth node.Depth) {
t.doDumpLocal(ctx, w, t.cache.pendingRoot, 0, maxDepth)
}

func (t *Tree) doDumpLocal(ctx context.Context, w io.Writer, ptr *node.Pointer, depth node.Depth, maxDepth node.Depth) {
func (t *tree) doDumpLocal(ctx context.Context, w io.Writer, ptr *node.Pointer, depth node.Depth, maxDepth node.Depth) {
prefix := strings.Repeat(" ", int(depth)*2)
if ptr == nil {
fmt.Fprint(w, prefix+"<nil>")
Expand Down
26 changes: 14 additions & 12 deletions go/storage/mkvs/urkel/insert.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ import (
"github.com/oasislabs/oasis-core/go/storage/mkvs/urkel/node"
)

// Insert inserts a key/value pair into the tree.
func (t *Tree) Insert(ctx context.Context, key []byte, value []byte) error {
// Implements Tree.
func (t *tree) Insert(ctx context.Context, key []byte, value []byte) error {
t.cache.Lock()
defer t.cache.Unlock()

Expand All @@ -27,16 +27,18 @@ func (t *Tree) Insert(ctx context.Context, key []byte, value []byte) error {
}

// Update the pending write log.
entry := t.pendingWriteLog[node.ToMapKey(key)]
if entry == nil {
t.pendingWriteLog[node.ToMapKey(key)] = &pendingEntry{
key: key,
value: value,
existed: result.existed,
insertedLeaf: result.insertedLeaf,
if !t.withoutWriteLog {
entry := t.pendingWriteLog[node.ToMapKey(key)]
if entry == nil {
t.pendingWriteLog[node.ToMapKey(key)] = &pendingEntry{
key: key,
value: value,
existed: result.existed,
insertedLeaf: result.insertedLeaf,
}
} else {
entry.value = value
}
} else {
entry.value = value
}

t.cache.setPendingRoot(result.newRoot)
Expand All @@ -49,7 +51,7 @@ type insertResult struct {
existed bool
}

func (t *Tree) doInsert(
func (t *tree) doInsert(
ctx context.Context,
ptr *node.Pointer,
bitDepth node.Depth,
Expand Down
32 changes: 19 additions & 13 deletions go/storage/mkvs/urkel/iterator.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,8 @@ import (

var errClosed = errors.New("iterator: use of closed iterator")

// SyncIterate seeks to a given key and then fetches the specified
// number of following items based on key iteration order.
func (t *Tree) SyncIterate(ctx context.Context, request *syncer.IterateRequest) (*syncer.ProofResponse, error) {
// Implements syncer.ReadSyncer.
func (t *tree) SyncIterate(ctx context.Context, request *syncer.IterateRequest) (*syncer.ProofResponse, error) {
t.cache.Lock()
defer t.cache.Unlock()

Expand Down Expand Up @@ -58,7 +57,7 @@ func (t *Tree) SyncIterate(ctx context.Context, request *syncer.IterateRequest)
}, nil
}

func (t *Tree) newFetcherSyncIterate(key node.Key, prefetch uint16) readSyncFetcher {
func (t *tree) newFetcherSyncIterate(key node.Key, prefetch uint16) readSyncFetcher {
return func(ctx context.Context, ptr *node.Pointer, rs syncer.ReadSyncer) (*syncer.Proof, error) {
rsp, err := rs.SyncIterate(ctx, &syncer.IterateRequest{
Tree: syncer.TreeID{
Expand Down Expand Up @@ -125,7 +124,7 @@ type pathAtom struct {

type treeIterator struct {
ctx context.Context
tree *Tree
tree *tree
prefetch uint16
err error
pos []pathAtom
Expand Down Expand Up @@ -155,8 +154,7 @@ func WithProof(root hash.Hash) IteratorOption {
}
}

// NewIterator creates a new iterator over the given tree.
func NewIterator(ctx context.Context, tree *Tree, options ...IteratorOption) Iterator {
func newTreeIterator(ctx context.Context, tree *tree, options ...IteratorOption) Iterator {
it := &treeIterator{
ctx: ctx,
tree: tree,
Expand Down Expand Up @@ -244,7 +242,7 @@ func (it *treeIterator) Next() {
it.value = nil
}

func (it *treeIterator) doNext(ptr *node.Pointer, bitDepth node.Depth, path node.Key, key node.Key, state visitState) error {
func (it *treeIterator) doNext(ptr *node.Pointer, bitDepth node.Depth, path node.Key, key node.Key, state visitState) error { // nolint: gocyclo
// Dereference the node, possibly making a remote request.
nd, err := it.tree.cache.derefNodePtr(it.ctx, ptr, it.tree.newFetcherSyncIterate(key, it.prefetch))
if err != nil {
Expand All @@ -266,9 +264,17 @@ func (it *treeIterator) doNext(ptr *node.Pointer, bitDepth node.Depth, path node
case *node.InternalNode:
// Internal node.
bitLength := bitDepth + n.LabelBitLength
newPath := path.Merge(bitDepth, n.Label, n.LabelBitLength)

// Check if the key is longer than the current path but lexicographically smaller. In this
// case everything in this subtree will be larger so we need to take the first value.
var takeFirst bool
if bitLength > 0 && key.BitLength() >= bitLength && key.Compare(newPath) < 0 {
takeFirst = true
}

// Does lookup key end here? Look into LeafNode.
if (state == visitBefore && key.BitLength() <= bitLength) || state == visitAt {
if (state == visitBefore && (key.BitLength() <= bitLength || takeFirst)) || state == visitAt {
if state == visitBefore {
err := it.doNext(n.LeafNode, bitLength, path, key, visitBefore)
if err != nil {
Expand All @@ -281,17 +287,17 @@ func (it *treeIterator) doNext(ptr *node.Pointer, bitDepth node.Depth, path node
}
}
// Key has not been found, continue with search for next key.
key = key.AppendBit(bitLength, false)
if key.BitLength() <= bitLength {
key = key.AppendBit(bitLength, false)
}
}

if state == visitBefore {
state = visitAt
}

newPath := path.Merge(bitDepth, n.Label, n.LabelBitLength)

// Continue recursively based on a bit value.
if (state == visitAt && !key.GetBit(bitLength)) || state == visitAtLeft {
if (state == visitAt && (!key.GetBit(bitLength) || takeFirst)) || state == visitAtLeft {
if state == visitAt {
err := it.doNext(n.Left, bitLength, newPath.AppendBit(bitLength, false), key, visitBefore)
if err != nil {
Expand Down
49 changes: 49 additions & 0 deletions go/storage/mkvs/urkel/iterator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package urkel

import (
"context"
"encoding/hex"
"testing"

"github.com/stretchr/testify/require"
Expand Down Expand Up @@ -136,6 +137,54 @@ func TestIterator(t *testing.T) {
})
}

func TestIteratorCase1(t *testing.T) {
ctx := context.Background()
tree := New(nil, nil)
defer tree.Close()

items := writelog.WriteLog{
writelog.LogEntry{Key: []byte("key 5"), Value: []byte("fivey")},
writelog.LogEntry{Key: []byte("key 7"), Value: []byte("seven")},
}

tests := []testCase{
{seek: node.Key("key 3"), pos: 0},
}

err := tree.ApplyWriteLog(ctx, writelog.NewStaticIterator(items))
require.NoError(t, err, "ApplyWriteLog")

it := tree.NewIterator(ctx)
defer it.Close()

testIterator(t, items, it, tests)
}

func TestIteratorCase2(t *testing.T) {
ctx := context.Background()
tree := New(nil, nil)
defer tree.Close()

for _, key := range []string{
"54dcb497eb46bc7cb1a1a29d143d5d41f1a684c97e12f2ae536eceb828c15fc34c02",
"54dd32a01981671e87f0ef72cd601a1323f6804ace91bd77cd719473ce40ff6c2b01",
} {
rawKey, err := hex.DecodeString(key)
require.NoError(t, err, "DecodeString")
err = tree.Insert(ctx, rawKey, []byte("value"))
require.NoError(t, err, "Insert")
}

it := tree.NewIterator(ctx)
defer it.Close()

missingKey, err := hex.DecodeString("54da85be3251772db943cba67341d402117c87ada2a9e8aad7171d40b6b4dc9fbc")
require.NoError(t, err, "DecodeString")
it.Seek(missingKey)
require.True(t, it.Valid(), "iterator should be valid")
require.EqualValues(t, []byte("value"), it.Value(), "value should be correct")
}

func TestIteratorEviction(t *testing.T) {
ctx := context.Background()
tree := New(nil, nil, Capacity(0, 0))
Expand Down
12 changes: 6 additions & 6 deletions go/storage/mkvs/urkel/lookup.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ import (
"github.com/oasislabs/oasis-core/go/storage/mkvs/urkel/syncer"
)

// Get looks up an existing key.
func (t *Tree) Get(ctx context.Context, key []byte) ([]byte, error) {
// Implements Tree.
func (t *tree) Get(ctx context.Context, key []byte) ([]byte, error) {
t.cache.Lock()
defer t.cache.Unlock()

Expand All @@ -23,8 +23,8 @@ func (t *Tree) Get(ctx context.Context, key []byte) ([]byte, error) {
return t.doGet(ctx, t.cache.pendingRoot, 0, key, doGetOptions{}, false)
}

// SyncGet fetches a single key and returns the corresponding proof.
func (t *Tree) SyncGet(ctx context.Context, request *syncer.GetRequest) (*syncer.ProofResponse, error) {
// Implements syncer.ReadSyncer.
func (t *tree) SyncGet(ctx context.Context, request *syncer.GetRequest) (*syncer.ProofResponse, error) {
t.cache.Lock()
defer t.cache.Unlock()

Expand Down Expand Up @@ -59,7 +59,7 @@ func (t *Tree) SyncGet(ctx context.Context, request *syncer.GetRequest) (*syncer
}, nil
}

func (t *Tree) newFetcherSyncGet(key node.Key, includeSiblings bool) readSyncFetcher {
func (t *tree) newFetcherSyncGet(key node.Key, includeSiblings bool) readSyncFetcher {
return func(ctx context.Context, ptr *node.Pointer, rs syncer.ReadSyncer) (*syncer.Proof, error) {
rsp, err := rs.SyncGet(ctx, &syncer.GetRequest{
Tree: syncer.TreeID{
Expand All @@ -81,7 +81,7 @@ type doGetOptions struct {
includeSiblings bool
}

func (t *Tree) doGet(
func (t *tree) doGet(
ctx context.Context,
ptr *node.Pointer,
bitDepth node.Depth,
Expand Down
Loading