Skip to content

Commit

Permalink
Support node hash deleted in multiple blocks
Browse files Browse the repository at this point in the history
  • Loading branch information
qdm12 committed Jan 6, 2023
1 parent 897b8fa commit b6d7970
Show file tree
Hide file tree
Showing 5 changed files with 133 additions and 41 deletions.
27 changes: 19 additions & 8 deletions internal/pruner/full/journal.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,6 @@ func storeDeletedNodeHashes(journalDatabase Getter, batch Putter,
BlockNumber: blockNumber,
BlockHash: blockHash,
}
encodedKey, err := scale.Marshal(key)
if err != nil {
return fmt.Errorf("scale encoding journal key: %w", err)
}

deletedNodeHashesSlice := make([]common.Hash, 0, len(deletedNodeHashes))
for deletedNodeHash := range deletedNodeHashes {
Expand All @@ -46,15 +42,26 @@ func storeDeletedNodeHashes(journalDatabase Getter, batch Putter,
"from journal database: %w", err)
}

var journalKeys []journalKey
var keys []journalKey
if len(encodedJournalKeys) > 0 {
err = scale.Unmarshal(encodedJournalKeys, &journalKeys)
// one or more other blocks deleted the same node hash the current
// block deleted as well.
err = scale.Unmarshal(encodedJournalKeys, &keys)
if err != nil {
return fmt.Errorf("scale decoding journal keys for deleted node hash "+
"from journal database: %w", err)
}
}
keys = append(keys, key)

encodedKeys, err := scale.Marshal(keys)
if err != nil {
return fmt.Errorf("scale encoding journal keys: %w", err)
}

err = batch.Put(databaseKey, encodedKey)
err = batch.Put(databaseKey, encodedKeys)
if err != nil {
return fmt.Errorf("putting journal key in database batch: %w", err)
return fmt.Errorf("putting journal keys in database batch: %w", err)
}
}

Expand All @@ -69,6 +76,10 @@ func storeDeletedNodeHashes(journalDatabase Getter, batch Putter,

// We store the deleted node hashes in the journal database
// at the key (block hash + block number)
encodedKey, err := scale.Marshal(key)
if err != nil {
return fmt.Errorf("scale encoding journal key: %w", err)
}
err = batch.Put(encodedKey, encodedDeletedNodeHashes)
if err != nil {
return fmt.Errorf("putting deleted node hashes in database batch: %w", err)
Expand Down
64 changes: 59 additions & 5 deletions internal/pruner/full/journal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,34 +22,75 @@ func Test_storeDeletedNodeHashes(t *testing.T) {
errTest := errors.New("test error")

testCases := map[string]struct {
databaseBuilder func(ctrl *gomock.Controller) Getter
batchBuilder func(ctrl *gomock.Controller) Putter
blockNumber uint32
blockHash common.Hash
deletedNodeHashes map[common.Hash]struct{}
errWrapped error
errMessage string
}{
"get encoded journal keys error": {
databaseBuilder: func(ctrl *gomock.Controller) Getter {
database := NewMockGetter(ctrl)
database.EXPECT().Get(makeDeletedKey(common.Hash{3})).
Return(nil, errTest)
return database
},
batchBuilder: func(ctrl *gomock.Controller) Putter { return nil },
blockHash: common.Hash{2},
deletedNodeHashes: map[common.Hash]struct{}{{3}: {}},
errWrapped: errTest,
errMessage: "getting journal keys for deleted node hash " +
"from journal database: test error",
},
"decode journal keys error": {
databaseBuilder: func(ctrl *gomock.Controller) Getter {
database := NewMockGetter(ctrl)
database.EXPECT().Get(makeDeletedKey(common.Hash{3})).
Return([]byte{99}, nil)
return database
},
batchBuilder: func(ctrl *gomock.Controller) Putter { return nil },
blockHash: common.Hash{2},
deletedNodeHashes: map[common.Hash]struct{}{{3}: {}},
errWrapped: io.EOF,
errMessage: "scale decoding journal keys for deleted node hash " +
"from journal database: reading bytes: EOF",
},
"deleted node hash put error": {
databaseBuilder: func(ctrl *gomock.Controller) Getter {
database := NewMockGetter(ctrl)
database.EXPECT().Get(makeDeletedKey(common.Hash{3})).
Return(nil, nil)
return database
},
batchBuilder: func(ctrl *gomock.Controller) Putter {
database := NewMockPutDeleter(ctrl)
database.EXPECT().Put(
makeDeletedKey(common.Hash{3}),
scaleEncodeJournalKey(1, common.Hash{2}),
scale.MustMarshal([]journalKey{{BlockNumber: 1, BlockHash: common.Hash{2}}}),
).Return(errTest)
return database
},
blockNumber: 1,
blockHash: common.Hash{2},
deletedNodeHashes: map[common.Hash]struct{}{{3}: {}},
errWrapped: errTest,
errMessage: "putting journal key in database batch: test error",
errMessage: "putting journal keys in database batch: test error",
},
"encoded deleted node hashes put error": {
databaseBuilder: func(ctrl *gomock.Controller) Getter {
database := NewMockGetter(ctrl)
database.EXPECT().Get(makeDeletedKey(common.Hash{3})).
Return(nil, nil)
return database
},
batchBuilder: func(ctrl *gomock.Controller) Putter {
database := NewMockPutDeleter(ctrl)
database.EXPECT().Put(
makeDeletedKey(common.Hash{3}),
scaleEncodeJournalKey(1, common.Hash{2}),
scale.MustMarshal([]journalKey{{BlockNumber: 1, BlockHash: common.Hash{2}}}),
).Return(nil)
database.EXPECT().Put(
scaleEncodeJournalKey(1, common.Hash{2}),
Expand All @@ -64,11 +105,23 @@ func Test_storeDeletedNodeHashes(t *testing.T) {
errMessage: "putting deleted node hashes in database batch: test error",
},
"success": {
databaseBuilder: func(ctrl *gomock.Controller) Getter {
database := NewMockGetter(ctrl)
database.EXPECT().Get(makeDeletedKey(common.Hash{3})).
Return(
scale.MustMarshal([]journalKey{{BlockNumber: 5, BlockHash: common.Hash{5}}}),
nil,
)
return database
},
batchBuilder: func(ctrl *gomock.Controller) Putter {
database := NewMockPutDeleter(ctrl)
database.EXPECT().Put(
makeDeletedKey(common.Hash{3}),
scaleEncodeJournalKey(1, common.Hash{2}),
scale.MustMarshal([]journalKey{
{BlockNumber: 5, BlockHash: common.Hash{5}},
{BlockNumber: 1, BlockHash: common.Hash{2}},
}),
).Return(nil)
database.EXPECT().Put(
scaleEncodeJournalKey(1, common.Hash{2}),
Expand All @@ -88,8 +141,9 @@ func Test_storeDeletedNodeHashes(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)

database := testCase.databaseBuilder(ctrl)
batch := testCase.batchBuilder(ctrl)
err := storeDeletedNodeHashes(batch, testCase.blockNumber,
err := storeDeletedNodeHashes(database, batch, testCase.blockNumber,
testCase.blockHash, testCase.deletedNodeHashes)

assert.ErrorIs(t, err, testCase.errWrapped)
Expand Down
3 changes: 2 additions & 1 deletion internal/pruner/full/pruner.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,8 @@ func (p *Pruner) StoreJournalRecord(deletedNodeHashes, insertedNodeHashes map[co
return fmt.Errorf("recording block hash in journal database: %w", err)
}

err = storeDeletedNodeHashes(journalDBBatch, blockNumber, blockHash, deletedNodeHashes)
err = storeDeletedNodeHashes(p.journalDatabase, journalDBBatch, blockNumber,
blockHash, deletedNodeHashes)
if err != nil {
journalDBBatch.Reset()
return fmt.Errorf("storing deleted node hashes for block number %d: %w", blockNumber, err)
Expand Down
24 changes: 16 additions & 8 deletions internal/pruner/full/pruner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,8 @@ func Test_Pruner(t *testing.T) {
"journal_block_number_to_hash_1": common.Hash{101}.ToBytes(),
"journal_" + string(scaleEncodeJournalKey(0, common.Hash{100})): scale.MustMarshal([]common.Hash(nil)),
"journal_" + string(scaleEncodeJournalKey(1, common.Hash{101})): scale.MustMarshal([]common.Hash{{1}}),
"journal_deleted_" + string(common.Hash{1}.ToBytes()): scaleEncodeJournalKey(1, common.Hash{101}),
"journal_deleted_" + string(common.Hash{1}.ToBytes()): scale.MustMarshal(
[]journalKey{{1, common.Hash{101}}}),
}
assertDatabaseContent(t, database, keyValuePairs)

Expand Down Expand Up @@ -102,8 +103,10 @@ func Test_Pruner(t *testing.T) {
"journal_" + string(scaleEncodeJournalKey(0, common.Hash{100})): scale.MustMarshal([]common.Hash(nil)),
"journal_" + string(scaleEncodeJournalKey(1, common.Hash{101})): scale.MustMarshal([]common.Hash{{1}}),
"journal_" + string(scaleEncodeJournalKey(1, common.Hash{102})): scale.MustMarshal([]common.Hash{{3}}),
"journal_deleted_" + string(common.Hash{1}.ToBytes()): scaleEncodeJournalKey(1, common.Hash{101}),
"journal_deleted_" + string(common.Hash{3}.ToBytes()): scaleEncodeJournalKey(1, common.Hash{102}),
"journal_deleted_" + string(common.Hash{1}.ToBytes()): scale.MustMarshal(
[]journalKey{{1, common.Hash{101}}}),
"journal_deleted_" + string(common.Hash{3}.ToBytes()): scale.MustMarshal(
[]journalKey{{1, common.Hash{102}}}),
}
assertDatabaseContent(t, database, keyValuePairs)

Expand Down Expand Up @@ -135,9 +138,12 @@ func Test_Pruner(t *testing.T) {
"journal_" + string(scaleEncodeJournalKey(1, common.Hash{101})): scale.MustMarshal([]common.Hash{{1}}),
"journal_" + string(scaleEncodeJournalKey(1, common.Hash{102})): scale.MustMarshal([]common.Hash{{3}}),
"journal_" + string(scaleEncodeJournalKey(2, common.Hash{103})): scale.MustMarshal([]common.Hash{{5}}),
"journal_deleted_" + string(common.Hash{1}.ToBytes()): scaleEncodeJournalKey(1, common.Hash{101}),
"journal_deleted_" + string(common.Hash{3}.ToBytes()): scaleEncodeJournalKey(1, common.Hash{102}),
"journal_deleted_" + string(common.Hash{5}.ToBytes()): scaleEncodeJournalKey(2, common.Hash{103}),
"journal_deleted_" + string(common.Hash{1}.ToBytes()): scale.MustMarshal(
[]journalKey{{1, common.Hash{101}}}),
"journal_deleted_" + string(common.Hash{3}.ToBytes()): scale.MustMarshal(
[]journalKey{{1, common.Hash{102}}}),
"journal_deleted_" + string(common.Hash{5}.ToBytes()): scale.MustMarshal(
[]journalKey{{2, common.Hash{103}}}),
}
assertDatabaseContent(t, database, keyValuePairs)

Expand Down Expand Up @@ -168,8 +174,10 @@ func Test_Pruner(t *testing.T) {
"journal_block_number_to_hash_3": common.Hash{104}.ToBytes(),
"journal_" + string(scaleEncodeJournalKey(2, common.Hash{103})): scale.MustMarshal([]common.Hash{{5}}),
"journal_" + string(scaleEncodeJournalKey(3, common.Hash{104})): scale.MustMarshal([]common.Hash{{7}}),
"journal_deleted_" + string(common.Hash{5}.ToBytes()): scaleEncodeJournalKey(2, common.Hash{103}),
"journal_deleted_" + string(common.Hash{7}.ToBytes()): scaleEncodeJournalKey(3, common.Hash{104}),
"journal_deleted_" + string(common.Hash{5}.ToBytes()): scale.MustMarshal(
[]journalKey{{2, common.Hash{103}}}),
"journal_deleted_" + string(common.Hash{7}.ToBytes()): scale.MustMarshal(
[]journalKey{{3, common.Hash{104}}}),
}
assertDatabaseContent(t, database, keyValuePairs)
}
56 changes: 37 additions & 19 deletions internal/pruner/full/reinserted.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,37 +30,55 @@ func (p *Pruner) handleInsertedKey(insertedNodeHash common.Hash, blockNumber uin
// Try to find if the node hash was deleted in another block before
// since we no longer want to prune it, as it was re-inserted.
deletedNodeHashKey := makeDeletedKey(insertedNodeHash)
journalKeyDeletedAt, err := p.journalDatabase.Get(deletedNodeHashKey)
encodedJournalKeysDeletedAt, err := p.journalDatabase.Get(deletedNodeHashKey)
nodeHashDeletedInAnotherBlock := !errors.Is(err, chaindb.ErrKeyNotFound)
if !nodeHashDeletedInAnotherBlock {
return nil
} else if err != nil {
return fmt.Errorf("getting journal key for node hash from journal database: %w", err)
return fmt.Errorf("getting journal keys for node hash from journal database: %w", err)
}

var key journalKey
err = scale.Unmarshal(journalKeyDeletedAt, &key)
var journalKeysDeletedAt []journalKey
err = scale.Unmarshal(encodedJournalKeysDeletedAt, &journalKeysDeletedAt)
if err != nil {
return fmt.Errorf("decoding journal key: %w", err)
return fmt.Errorf("decoding journal keys: %w", err)
}

deletedInUncleBlock := key.BlockNumber >= blockNumber
if deletedInUncleBlock {
return nil
for _, journalKeyDeletedAt := range journalKeysDeletedAt {
deletedInUncleBlock := journalKeyDeletedAt.BlockNumber >= blockNumber
if deletedInUncleBlock {
// do not remove the deleted node hash from the uncle block journal data
continue
}

isDescendant, err := p.blockState.IsDescendantOf(journalKeyDeletedAt.BlockHash, blockHash)
if err != nil {
return fmt.Errorf("checking if block %s is descendant of block %s: %w",
journalKeyDeletedAt.BlockHash, blockHash, err)
}
if !isDescendant {
// do not remove the deleted node hash from the non-parent block journal data
continue
}

// Remove node hash from the deleted node hashes of the ancestor block it was deleted in.
err = handleReInsertedKey(insertedNodeHash, journalKeyDeletedAt, p.journalDatabase, journalDBBatch)
if err != nil {
return fmt.Errorf("handling re-inserted key %s: %w", insertedNodeHash, err)
}
}

isDescendant, err := p.blockState.IsDescendantOf(key.BlockHash, blockHash)
return nil
}

func handleReInsertedKey(reInsertedNodeHash common.Hash, journalKeyDeletedAt journalKey,
journalDatabase Getter, journalDBBatch Putter) (err error) {
encodedJournalKeyDeletedAt, err := scale.Marshal(journalKeyDeletedAt)
if err != nil {
return fmt.Errorf("checking if block %s is descendant of block %s: %w",
key.BlockHash, blockHash, err)
}
deletedInUncleBlock = !isDescendant
if deletedInUncleBlock {
return nil
return fmt.Errorf("encoding journal key: %w", err)
}

// Remove node hash from the deleted node hashes of the block it was deleted in.
encodedDeletedNodeHashes, err := p.journalDatabase.Get(journalKeyDeletedAt)
encodedDeletedNodeHashes, err := journalDatabase.Get(encodedJournalKeyDeletedAt)
if err != nil {
return fmt.Errorf("getting deleted node hashes from journal database: %w", err)
}
Expand All @@ -71,7 +89,7 @@ func (p *Pruner) handleInsertedKey(insertedNodeHash common.Hash, blockNumber uin
return fmt.Errorf("decoding deleted node hashes: %w", err)
}
for i, deletedNodeHash := range deletedNodeHashes {
if deletedNodeHash != insertedNodeHash {
if deletedNodeHash != reInsertedNodeHash {
continue
}
lastIndex := len(deletedNodeHashes) - 1
Expand All @@ -86,7 +104,7 @@ func (p *Pruner) handleInsertedKey(insertedNodeHash common.Hash, blockNumber uin
return fmt.Errorf("encoding updated deleted node hashes: %w", err)
}

err = journalDBBatch.Put(journalKeyDeletedAt, encodedDeletedNodeHashes)
err = journalDBBatch.Put(encodedJournalKeyDeletedAt, encodedDeletedNodeHashes)
if err != nil {
return fmt.Errorf("putting updated deleted node hashes in journal database batch: %w", err)
}
Expand Down

0 comments on commit b6d7970

Please sign in to comment.