Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cleanup proposervm ancestors packing #1446

Merged
merged 1 commit into from
Apr 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 4 additions & 6 deletions vms/proposervm/batched_vm.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ func (vm *VM) GetAncestors(
blkID ids.ID,
maxBlocksNum int,
maxBlocksSize int,
maxBlocksRetrivalTime time.Duration,
maxBlocksRetrievalTime time.Duration,
) ([][]byte, error) {
if vm.batchedVM == nil {
return nil, block.ErrRemoteVMNotImplemented
Expand All @@ -50,23 +50,21 @@ func (vm *VM) GetAncestors(
// is repr. by an int.
currentByteLength += wrappers.IntLen + len(blkBytes)
elapsedTime := vm.Clock.Time().Sub(startTime)
if len(res) > 0 && (currentByteLength >= maxBlocksSize || maxBlocksRetrivalTime <= elapsedTime) {
if len(res) > 0 && (currentByteLength >= maxBlocksSize || maxBlocksRetrievalTime <= elapsedTime) {
return res, nil // reached maximum size or ran out of time
}

res = append(res, blkBytes)
blkID = blk.ParentID()
maxBlocksNum--

if maxBlocksNum <= 0 {
if len(res) >= maxBlocksNum {
return res, nil
}
}

// snowman++ fork may have been hit.
preMaxBlocksNum := maxBlocksNum - len(res)
preMaxBlocksSize := maxBlocksSize - currentByteLength
preMaxBlocksRetrivalTime := maxBlocksRetrivalTime - time.Since(startTime)
preMaxBlocksRetrivalTime := maxBlocksRetrievalTime - time.Since(startTime)
innerBytes, err := vm.batchedVM.GetAncestors(
ctx,
blkID,
Expand Down
65 changes: 42 additions & 23 deletions vms/proposervm/batched_vm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"github.com/ava-labs/avalanchego/snow/engine/common"
"github.com/ava-labs/avalanchego/snow/engine/snowman/block"
"github.com/ava-labs/avalanchego/snow/validators"
"github.com/ava-labs/avalanchego/utils/math"
"github.com/ava-labs/avalanchego/utils/timer/mockable"
"github.com/ava-labs/avalanchego/version"
"github.com/ava-labs/avalanchego/vms/proposervm/proposer"
Expand Down Expand Up @@ -456,32 +457,32 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) {
// ...Call GetAncestors on them ...
// Note: we assumed that if blkID is not known, that's NOT an error.
// Simply return an empty result
coreVM.GetAncestorsF = func(_ context.Context, blkID ids.ID, _, _ int, _ time.Duration) ([][]byte, error) {
res := make([][]byte, 0, 3)
coreVM.GetAncestorsF = func(_ context.Context, blkID ids.ID, maxBlocksNum, _ int, _ time.Duration) ([][]byte, error) {
sortedBlocks := [][]byte{
coreBlk4.Bytes(),
coreBlk3.Bytes(),
coreBlk2.Bytes(),
coreBlk1.Bytes(),
}
var startIndex int
switch blkID {
case coreBlk4.ID():
res = append(res, coreBlk4.Bytes())
res = append(res, coreBlk3.Bytes())
res = append(res, coreBlk2.Bytes())
res = append(res, coreBlk1.Bytes())
return res, nil
startIndex = 0
case coreBlk3.ID():
res = append(res, coreBlk3.Bytes())
res = append(res, coreBlk2.Bytes())
res = append(res, coreBlk1.Bytes())
return res, nil
startIndex = 1
case coreBlk2.ID():
res = append(res, coreBlk2.Bytes())
res = append(res, coreBlk1.Bytes())
return res, nil
startIndex = 2
case coreBlk1.ID():
res = append(res, coreBlk1.Bytes())
return res, nil
startIndex = 3
default:
return res, nil
return nil, nil // unknown blockID
}

endIndex := math.Min(startIndex+maxBlocksNum, len(sortedBlocks))
return sortedBlocks[startIndex:endIndex], nil
}

// load all known blocks
reqBlkID := builtBlk4.ID()
maxBlocksNum := 1000 // an high value to get all built blocks
maxBlocksSize := 1000000 // an high value to get all built blocks
Expand All @@ -495,13 +496,31 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) {
)

// ... and check returned values are as expected
require.NoError(err, "Error calling GetAncestors: %v", err)
require.Len(res, 4, "GetAncestor returned %v entries instead of %v", len(res), 4)
require.NoError(err, "Error calling GetAncestors")
require.Len(res, 4, "Wrong GetAncestor response")
require.EqualValues(res[0], builtBlk4.Bytes())
require.EqualValues(res[1], builtBlk3.Bytes())
require.EqualValues(res[2], builtBlk2.Bytes())
require.EqualValues(res[3], builtBlk1.Bytes())

// Regression case: load some prefork and some postfork blocks.
reqBlkID = builtBlk4.ID()
maxBlocksNum = 3
res, err = proRemoteVM.GetAncestors(
context.Background(),
reqBlkID,
maxBlocksNum,
maxBlocksSize,
maxBlocksRetrivalTime,
)

// ... and check returned values are as expected
require.NoError(err, "Error calling GetAncestors")
require.Len(res, 3, "Wrong GetAncestor response")
require.EqualValues(res[0], builtBlk4.Bytes())
require.EqualValues(res[1], builtBlk3.Bytes())
require.EqualValues(res[2], builtBlk2.Bytes())

// another good call
reqBlkID = builtBlk1.ID()
res, err = proRemoteVM.GetAncestors(
Expand All @@ -511,8 +530,8 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) {
maxBlocksSize,
maxBlocksRetrivalTime,
)
require.NoError(err, "Error calling GetAncestors: %v", err)
require.Len(res, 1, "GetAncestor returned %v entries instead of %v", len(res), 1)
require.NoError(err, "Error calling GetAncestors")
require.Len(res, 1, "Wrong GetAncestor response")
require.EqualValues(res[0], builtBlk1.Bytes())

// a faulty call
Expand All @@ -524,8 +543,8 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) {
maxBlocksSize,
maxBlocksRetrivalTime,
)
require.NoError(err, "Error calling GetAncestors: %v", err)
require.Len(res, 0, "GetAncestor returned %v entries instead of %v", len(res), 0)
require.NoError(err, "Error calling GetAncestors")
require.Empty(res, "Wrong GetAncestor response")
}

func TestBatchedParseBlockPreForkOnly(t *testing.T) {
Expand Down