diff --git a/api/admin/client.go b/api/admin/client.go index 11ee2e5cef2c..d5a81a81b25e 100644 --- a/api/admin/client.go +++ b/api/admin/client.go @@ -7,7 +7,9 @@ import ( "context" "github.com/ava-labs/avalanchego/api" + "github.com/ava-labs/avalanchego/database/rpcdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/rpc" ) @@ -28,6 +30,7 @@ type Client interface { SetLoggerLevel(ctx context.Context, loggerName, logLevel, displayLevel string, options ...rpc.Option) (map[string]LogAndDisplayLevels, error) GetLoggerLevel(ctx context.Context, loggerName string, options ...rpc.Option) (map[string]LogAndDisplayLevels, error) GetConfig(ctx context.Context, options ...rpc.Option) (interface{}, error) + DBGet(ctx context.Context, key []byte, options ...rpc.Option) ([]byte, error) } // Client implementation for the Avalanche Platform Info API Endpoint @@ -140,3 +143,23 @@ func (c *client) GetConfig(ctx context.Context, options ...rpc.Option) (interfac err := c.requester.SendRequest(ctx, "admin.getConfig", struct{}{}, &res, options...) return res, err } + +func (c *client) DBGet(ctx context.Context, key []byte, options ...rpc.Option) ([]byte, error) { + keyStr, err := formatting.Encode(formatting.HexNC, key) + if err != nil { + return nil, err + } + + res := &DBGetReply{} + err = c.requester.SendRequest(ctx, "admin.dbGet", &DBGetArgs{ + Key: keyStr, + }, res, options...) + if err != nil { + return nil, err + } + + if err := rpcdb.ErrEnumToError[res.ErrorCode]; err != nil { + return nil, err + } + return formatting.Decode(formatting.HexNC, res.Value) +} diff --git a/api/admin/key_value_reader.go b/api/admin/key_value_reader.go new file mode 100644 index 000000000000..bfc7b2cced06 --- /dev/null +++ b/api/admin/key_value_reader.go @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package admin + +import ( + "context" + + "github.com/ava-labs/avalanchego/database" +) + +var _ database.KeyValueReader = (*KeyValueReader)(nil) + +type KeyValueReader struct { + client Client +} + +func NewKeyValueReader(client Client) *KeyValueReader { + return &KeyValueReader{ + client: client, + } +} + +func (r *KeyValueReader) Has(key []byte) (bool, error) { + _, err := r.client.DBGet(context.Background(), key) + if err == database.ErrNotFound { + return false, nil + } + return err == nil, err +} + +func (r *KeyValueReader) Get(key []byte) ([]byte, error) { + return r.client.DBGet(context.Background(), key) +} diff --git a/api/admin/service.go b/api/admin/service.go index cf57a28264e7..4229fcf7221a 100644 --- a/api/admin/service.go +++ b/api/admin/service.go @@ -16,15 +16,20 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/api/server" "github.com/ava-labs/avalanchego/chains" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/rpcdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/registry" + + rpcdbpb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" ) const ( @@ -44,6 +49,7 @@ type Config struct { ProfileDir string LogFactory logging.Factory NodeConfig interface{} + DB database.Database ChainManager chains.Manager HTTPServer server.PathAdderWithReadLock VMRegistry registry.VMRegistry @@ -376,3 +382,35 @@ func (a *Admin) getLogLevels(loggerNames []string) (map[string]LogAndDisplayLeve } return loggerLevels, nil } + +type DBGetArgs struct { + Key string `json:"key"` +} + +type DBGetReply struct { + Value string `json:"value"` + ErrorCode rpcdbpb.Error `json:"errorCode"` +} + +//nolint:stylecheck // renaming this method to DBGet would change the API method from "dbGet" to "dBGet" +func (a *Admin) DbGet(_ *http.Request, args *DBGetArgs, reply *DBGetReply) error { + a.Log.Debug("API called", + zap.String("service", "admin"), + zap.String("method", "dbGet"), + logging.UserString("key", args.Key), + ) + + key, err := formatting.Decode(formatting.HexNC, args.Key) + if err != nil { + return err + } + + value, err := a.DB.Get(key) + if err != nil { + reply.ErrorCode = rpcdb.ErrorToErrEnum[err] + return rpcdb.ErrorToRPCError(err) + } + + reply.Value, err = formatting.Encode(formatting.HexNC, value) + return err +} diff --git a/api/admin/service_test.go b/api/admin/service_test.go index ea159c655c63..a1309a213f60 100644 --- a/api/admin/service_test.go +++ b/api/admin/service_test.go @@ -11,10 +11,14 @@ import ( "go.uber.org/mock/gomock" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/registry" + + rpcdbpb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" ) type loadVMsTest struct { @@ -111,3 +115,56 @@ func TestLoadVMsGetAliasesFails(t *testing.T) { err := resources.admin.LoadVMs(&http.Request{}, nil, &reply) require.ErrorIs(err, errTest) } + +func TestServiceDBGet(t *testing.T) { + a := &Admin{Config: Config{ + Log: logging.NoLog{}, + DB: memdb.New(), + }} + + helloBytes := []byte("hello") + helloHex, err := formatting.Encode(formatting.HexNC, helloBytes) + require.NoError(t, err) + + worldBytes := []byte("world") + worldHex, err := formatting.Encode(formatting.HexNC, worldBytes) + require.NoError(t, err) + + require.NoError(t, a.DB.Put(helloBytes, worldBytes)) + + tests := []struct { + name string + key string + expectedValue string + expectedErrorCode rpcdbpb.Error + }{ + { + name: "key exists", + key: helloHex, + expectedValue: worldHex, + expectedErrorCode: rpcdbpb.Error_ERROR_UNSPECIFIED, + }, + { + name: "key doesn't exist", + key: "", + expectedValue: "", + expectedErrorCode: rpcdbpb.Error_ERROR_NOT_FOUND, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + reply := &DBGetReply{} + require.NoError(a.DbGet( + nil, + &DBGetArgs{ + Key: test.key, + }, + reply, + )) + require.Equal(test.expectedValue, reply.Value) + require.Equal(test.expectedErrorCode, reply.ErrorCode) + }) + } +} diff --git a/chains/manager.go b/chains/manager.go index 50b278361664..19b08267c6c6 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -83,16 +83,16 @@ const ( var ( // Commonly shared VM DB prefix - vmDBPrefix = []byte("vm") + VMDBPrefix = []byte("vm") // Bootstrapping prefixes for LinearizableVMs - vertexDBPrefix = []byte("vertex") - vertexBootstrappingDBPrefix = []byte("vertex_bs") - txBootstrappingDBPrefix = []byte("tx_bs") - blockBootstrappingDBPrefix = []byte("block_bs") + VertexDBPrefix = []byte("vertex") + VertexBootstrappingDBPrefix = []byte("vertex_bs") + TxBootstrappingDBPrefix = []byte("tx_bs") + BlockBootstrappingDBPrefix = []byte("block_bs") // Bootstrapping prefixes for ChainVMs - bootstrappingDB = []byte("bs") + ChainBootstrappingDBPrefix = []byte("bs") errUnknownVMType = errors.New("the vm should have type avalanche.DAGVM or snowman.ChainVM") errCreatePlatformVM = errors.New("attempted to create a chain running the PlatformVM") @@ -596,11 +596,11 @@ func (m *manager) createAvalancheChain( return nil, err } prefixDB := prefixdb.New(ctx.ChainID[:], meterDB) - vmDB := prefixdb.New(vmDBPrefix, prefixDB) - vertexDB := prefixdb.New(vertexDBPrefix, prefixDB) - vertexBootstrappingDB := prefixdb.New(vertexBootstrappingDBPrefix, prefixDB) - txBootstrappingDB := prefixdb.New(txBootstrappingDBPrefix, prefixDB) - blockBootstrappingDB := prefixdb.New(blockBootstrappingDBPrefix, prefixDB) + vmDB := prefixdb.New(VMDBPrefix, prefixDB) + vertexDB := prefixdb.New(VertexDBPrefix, prefixDB) + vertexBootstrappingDB := prefixdb.New(VertexBootstrappingDBPrefix, prefixDB) + txBootstrappingDB := prefixdb.New(TxBootstrappingDBPrefix, prefixDB) + blockBootstrappingDB := prefixdb.New(BlockBootstrappingDBPrefix, prefixDB) vtxBlocker, err := queue.NewWithMissing(vertexBootstrappingDB, "vtx", ctx.AvalancheRegisterer) if err != nil { @@ -999,8 +999,8 @@ func (m *manager) createSnowmanChain( return nil, err } prefixDB := prefixdb.New(ctx.ChainID[:], meterDB) - vmDB := prefixdb.New(vmDBPrefix, prefixDB) - bootstrappingDB := prefixdb.New(bootstrappingDB, prefixDB) + vmDB := prefixdb.New(VMDBPrefix, prefixDB) + bootstrappingDB := prefixdb.New(ChainBootstrappingDBPrefix, prefixDB) blocked, err := queue.NewWithMissing(bootstrappingDB, "block", ctx.Registerer) if err != nil { diff --git a/database/prefixdb/db.go b/database/prefixdb/db.go index 64a644918241..d8af4b101900 100644 --- a/database/prefixdb/db.go +++ b/database/prefixdb/db.go @@ -39,29 +39,57 @@ type Database struct { closed bool } +func newDB(prefix []byte, db database.Database) *Database { + return &Database{ + dbPrefix: prefix, + db: db, + bufferPool: sync.Pool{ + New: func() interface{} { + return make([]byte, 0, defaultBufCap) + }, + }, + } +} + // New returns a new prefixed database func New(prefix []byte, db database.Database) *Database { if prefixDB, ok := db.(*Database); ok { - simplePrefix := make([]byte, len(prefixDB.dbPrefix)+len(prefix)) - copy(simplePrefix, prefixDB.dbPrefix) - copy(simplePrefix[len(prefixDB.dbPrefix):], prefix) - return NewNested(simplePrefix, prefixDB.db) + return newDB( + JoinPrefixes(prefixDB.dbPrefix, prefix), + prefixDB.db, + ) } - return NewNested(prefix, db) + return newDB( + MakePrefix(prefix), + db, + ) } // NewNested returns a new prefixed database without attempting to compress // prefixes. func NewNested(prefix []byte, db database.Database) *Database { - return &Database{ - dbPrefix: hashing.ComputeHash256(prefix), - db: db, - bufferPool: sync.Pool{ - New: func() interface{} { - return make([]byte, 0, defaultBufCap) - }, - }, - } + return newDB( + MakePrefix(prefix), + db, + ) +} + +func MakePrefix(prefix []byte) []byte { + return hashing.ComputeHash256(prefix) +} + +func JoinPrefixes(firstPrefix, secondPrefix []byte) []byte { + simplePrefix := make([]byte, len(firstPrefix)+len(secondPrefix)) + copy(simplePrefix, firstPrefix) + copy(simplePrefix[len(firstPrefix):], secondPrefix) + return MakePrefix(simplePrefix) +} + +func PrefixKey(prefix, key []byte) []byte { + prefixedKey := make([]byte, len(prefix)+len(key)) + copy(prefixedKey, prefix) + copy(prefixedKey[len(prefix):], key) + return prefixedKey } // Assumes that it is OK for the argument to db.db.Has diff --git a/database/rpcdb/db_client.go b/database/rpcdb/db_client.go index 9f91667b41b6..c71ccd0603e6 100644 --- a/database/rpcdb/db_client.go +++ b/database/rpcdb/db_client.go @@ -43,7 +43,7 @@ func (db *DatabaseClient) Has(key []byte) (bool, error) { if err != nil { return false, err } - return resp.Has, errEnumToError[resp.Err] + return resp.Has, ErrEnumToError[resp.Err] } // Get attempts to return the value that was mapped to the key that was provided @@ -54,7 +54,7 @@ func (db *DatabaseClient) Get(key []byte) ([]byte, error) { if err != nil { return nil, err } - return resp.Value, errEnumToError[resp.Err] + return resp.Value, ErrEnumToError[resp.Err] } // Put attempts to set the value this key maps to @@ -66,7 +66,7 @@ func (db *DatabaseClient) Put(key, value []byte) error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } // Delete attempts to remove any mapping from the key @@ -77,7 +77,7 @@ func (db *DatabaseClient) Delete(key []byte) error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } // NewBatch returns a new batch @@ -120,7 +120,7 @@ func (db *DatabaseClient) Compact(start, limit []byte) error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } // Close attempts to close the database @@ -130,7 +130,7 @@ func (db *DatabaseClient) Close() error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } func (db *DatabaseClient) HealthCheck(ctx context.Context) (interface{}, error) { @@ -175,7 +175,7 @@ func (b *batch) Write() error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } func (b *batch) Inner() database.Batch { @@ -224,7 +224,7 @@ func (it *iterator) fetch() { if err != nil { it.setError(err) } else { - it.setError(errEnumToError[resp.Err]) + it.setError(ErrEnumToError[resp.Err]) } close(it.fetchedData) @@ -324,7 +324,7 @@ func (it *iterator) updateError() { if err != nil { it.setError(err) } else { - it.setError(errEnumToError[resp.Err]) + it.setError(ErrEnumToError[resp.Err]) } } diff --git a/database/rpcdb/db_server.go b/database/rpcdb/db_server.go index 6bcbd4e0276b..8a07a672d070 100644 --- a/database/rpcdb/db_server.go +++ b/database/rpcdb/db_server.go @@ -50,8 +50,8 @@ func (db *DatabaseServer) Has(_ context.Context, req *rpcdbpb.HasRequest) (*rpcd has, err := db.db.Has(req.Key) return &rpcdbpb.HasResponse{ Has: has, - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } // Get delegates the Get call to the managed database and returns the result @@ -59,34 +59,34 @@ func (db *DatabaseServer) Get(_ context.Context, req *rpcdbpb.GetRequest) (*rpcd value, err := db.db.Get(req.Key) return &rpcdbpb.GetResponse{ Value: value, - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } // Put delegates the Put call to the managed database and returns the result func (db *DatabaseServer) Put(_ context.Context, req *rpcdbpb.PutRequest) (*rpcdbpb.PutResponse, error) { err := db.db.Put(req.Key, req.Value) - return &rpcdbpb.PutResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.PutResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // Delete delegates the Delete call to the managed database and returns the // result func (db *DatabaseServer) Delete(_ context.Context, req *rpcdbpb.DeleteRequest) (*rpcdbpb.DeleteResponse, error) { err := db.db.Delete(req.Key) - return &rpcdbpb.DeleteResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.DeleteResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // Compact delegates the Compact call to the managed database and returns the // result func (db *DatabaseServer) Compact(_ context.Context, req *rpcdbpb.CompactRequest) (*rpcdbpb.CompactResponse, error) { err := db.db.Compact(req.Start, req.Limit) - return &rpcdbpb.CompactResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.CompactResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // Close delegates the Close call to the managed database and returns the result func (db *DatabaseServer) Close(context.Context, *rpcdbpb.CloseRequest) (*rpcdbpb.CloseResponse, error) { err := db.db.Close() - return &rpcdbpb.CloseResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.CloseResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // HealthCheck performs a heath check against the underlying database. @@ -109,22 +109,22 @@ func (db *DatabaseServer) WriteBatch(_ context.Context, req *rpcdbpb.WriteBatchR for _, put := range req.Puts { if err := batch.Put(put.Key, put.Value); err != nil { return &rpcdbpb.WriteBatchResponse{ - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } } for _, del := range req.Deletes { if err := batch.Delete(del.Key); err != nil { return &rpcdbpb.WriteBatchResponse{ - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } } err := batch.Write() return &rpcdbpb.WriteBatchResponse{ - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } // NewIteratorWithStartAndPrefix allocates an iterator and returns the iterator @@ -177,7 +177,7 @@ func (db *DatabaseServer) IteratorError(_ context.Context, req *rpcdbpb.Iterator return nil, errUnknownIterator } err := it.Error() - return &rpcdbpb.IteratorErrorResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.IteratorErrorResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // IteratorRelease attempts to release the resources allocated to an iterator @@ -193,5 +193,5 @@ func (db *DatabaseServer) IteratorRelease(_ context.Context, req *rpcdbpb.Iterat err := it.Error() it.Release() - return &rpcdbpb.IteratorReleaseResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.IteratorReleaseResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } diff --git a/database/rpcdb/errors.go b/database/rpcdb/errors.go index 2cd759b6d612..52788cc0a42a 100644 --- a/database/rpcdb/errors.go +++ b/database/rpcdb/errors.go @@ -10,18 +10,18 @@ import ( ) var ( - errEnumToError = map[rpcdbpb.Error]error{ + ErrEnumToError = map[rpcdbpb.Error]error{ rpcdbpb.Error_ERROR_CLOSED: database.ErrClosed, rpcdbpb.Error_ERROR_NOT_FOUND: database.ErrNotFound, } - errorToErrEnum = map[error]rpcdbpb.Error{ + ErrorToErrEnum = map[error]rpcdbpb.Error{ database.ErrClosed: rpcdbpb.Error_ERROR_CLOSED, database.ErrNotFound: rpcdbpb.Error_ERROR_NOT_FOUND, } ) -func errorToRPCError(err error) error { - if _, ok := errorToErrEnum[err]; ok { +func ErrorToRPCError(err error) error { + if _, ok := ErrorToErrEnum[err]; ok { return nil } return err diff --git a/node/node.go b/node/node.go index 7842259c8a93..c7d9615c05b5 100644 --- a/node/node.go +++ b/node/node.go @@ -1325,6 +1325,7 @@ func (n *Node) initAdminAPI() error { service, err := admin.NewService( admin.Config{ Log: n.Log, + DB: n.DB, ChainManager: n.chainManager, HTTPServer: n.APIServer, ProfileDir: n.Config.ProfilerConfig.Dir, diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 92e646ed6975..91cf32822e76 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -61,35 +61,35 @@ var ( errValidatorSetAlreadyPopulated = errors.New("validator set already populated") errIsNotSubnet = errors.New("is not a subnet") - blockIDPrefix = []byte("blockID") - blockPrefix = []byte("block") - validatorsPrefix = []byte("validators") - currentPrefix = []byte("current") - pendingPrefix = []byte("pending") - validatorPrefix = []byte("validator") - delegatorPrefix = []byte("delegator") - subnetValidatorPrefix = []byte("subnetValidator") - subnetDelegatorPrefix = []byte("subnetDelegator") - nestedValidatorWeightDiffsPrefix = []byte("validatorDiffs") - nestedValidatorPublicKeyDiffsPrefix = []byte("publicKeyDiffs") - flatValidatorWeightDiffsPrefix = []byte("flatValidatorDiffs") - flatValidatorPublicKeyDiffsPrefix = []byte("flatPublicKeyDiffs") - txPrefix = []byte("tx") - rewardUTXOsPrefix = []byte("rewardUTXOs") - utxoPrefix = []byte("utxo") - subnetPrefix = []byte("subnet") - subnetOwnerPrefix = []byte("subnetOwner") - transformedSubnetPrefix = []byte("transformedSubnet") - supplyPrefix = []byte("supply") - chainPrefix = []byte("chain") - singletonPrefix = []byte("singleton") - - timestampKey = []byte("timestamp") - currentSupplyKey = []byte("current supply") - lastAcceptedKey = []byte("last accepted") - heightsIndexedKey = []byte("heights indexed") - initializedKey = []byte("initialized") - prunedKey = []byte("pruned") + BlockIDPrefix = []byte("blockID") + BlockPrefix = []byte("block") + ValidatorsPrefix = []byte("validators") + CurrentPrefix = []byte("current") + PendingPrefix = []byte("pending") + ValidatorPrefix = []byte("validator") + DelegatorPrefix = []byte("delegator") + SubnetValidatorPrefix = []byte("subnetValidator") + SubnetDelegatorPrefix = []byte("subnetDelegator") + NestedValidatorWeightDiffsPrefix = []byte("validatorDiffs") + NestedValidatorPublicKeyDiffsPrefix = []byte("publicKeyDiffs") + FlatValidatorWeightDiffsPrefix = []byte("flatValidatorDiffs") + FlatValidatorPublicKeyDiffsPrefix = []byte("flatPublicKeyDiffs") + TxPrefix = []byte("tx") + RewardUTXOsPrefix = []byte("rewardUTXOs") + UTXOPrefix = []byte("utxo") + SubnetPrefix = []byte("subnet") + SubnetOwnerPrefix = []byte("subnetOwner") + TransformedSubnetPrefix = []byte("transformedSubnet") + SupplyPrefix = []byte("supply") + ChainPrefix = []byte("chain") + SingletonPrefix = []byte("singleton") + + TimestampKey = []byte("timestamp") + CurrentSupplyKey = []byte("current supply") + LastAcceptedKey = []byte("last accepted") + HeightsIndexedKey = []byte("heights indexed") + InitializedKey = []byte("initialized") + PrunedKey = []byte("pruned") ) // Chain collects all methods to manage the state of the chain for block @@ -491,7 +491,7 @@ func New( // If the pruned key is on disk, we must delete it to ensure our disk // can't get into a partially pruned state if the node restarts mid-way // through pruning. - if err := s.singletonDB.Delete(prunedKey); err != nil { + if err := s.singletonDB.Delete(PrunedKey); err != nil { return nil, fmt.Errorf("failed to remove prunedKey from singletonDB: %w", err) } @@ -532,24 +532,24 @@ func newState( baseDB := versiondb.New(db) - validatorsDB := prefixdb.New(validatorsPrefix, baseDB) + validatorsDB := prefixdb.New(ValidatorsPrefix, baseDB) - currentValidatorsDB := prefixdb.New(currentPrefix, validatorsDB) - currentValidatorBaseDB := prefixdb.New(validatorPrefix, currentValidatorsDB) - currentDelegatorBaseDB := prefixdb.New(delegatorPrefix, currentValidatorsDB) - currentSubnetValidatorBaseDB := prefixdb.New(subnetValidatorPrefix, currentValidatorsDB) - currentSubnetDelegatorBaseDB := prefixdb.New(subnetDelegatorPrefix, currentValidatorsDB) + currentValidatorsDB := prefixdb.New(CurrentPrefix, validatorsDB) + currentValidatorBaseDB := prefixdb.New(ValidatorPrefix, currentValidatorsDB) + currentDelegatorBaseDB := prefixdb.New(DelegatorPrefix, currentValidatorsDB) + currentSubnetValidatorBaseDB := prefixdb.New(SubnetValidatorPrefix, currentValidatorsDB) + currentSubnetDelegatorBaseDB := prefixdb.New(SubnetDelegatorPrefix, currentValidatorsDB) - pendingValidatorsDB := prefixdb.New(pendingPrefix, validatorsDB) - pendingValidatorBaseDB := prefixdb.New(validatorPrefix, pendingValidatorsDB) - pendingDelegatorBaseDB := prefixdb.New(delegatorPrefix, pendingValidatorsDB) - pendingSubnetValidatorBaseDB := prefixdb.New(subnetValidatorPrefix, pendingValidatorsDB) - pendingSubnetDelegatorBaseDB := prefixdb.New(subnetDelegatorPrefix, pendingValidatorsDB) + pendingValidatorsDB := prefixdb.New(PendingPrefix, validatorsDB) + pendingValidatorBaseDB := prefixdb.New(ValidatorPrefix, pendingValidatorsDB) + pendingDelegatorBaseDB := prefixdb.New(DelegatorPrefix, pendingValidatorsDB) + pendingSubnetValidatorBaseDB := prefixdb.New(SubnetValidatorPrefix, pendingValidatorsDB) + pendingSubnetDelegatorBaseDB := prefixdb.New(SubnetDelegatorPrefix, pendingValidatorsDB) - nestedValidatorWeightDiffsDB := prefixdb.New(nestedValidatorWeightDiffsPrefix, validatorsDB) - nestedValidatorPublicKeyDiffsDB := prefixdb.New(nestedValidatorPublicKeyDiffsPrefix, validatorsDB) - flatValidatorWeightDiffsDB := prefixdb.New(flatValidatorWeightDiffsPrefix, validatorsDB) - flatValidatorPublicKeyDiffsDB := prefixdb.New(flatValidatorPublicKeyDiffsPrefix, validatorsDB) + nestedValidatorWeightDiffsDB := prefixdb.New(NestedValidatorWeightDiffsPrefix, validatorsDB) + nestedValidatorPublicKeyDiffsDB := prefixdb.New(NestedValidatorPublicKeyDiffsPrefix, validatorsDB) + flatValidatorWeightDiffsDB := prefixdb.New(FlatValidatorWeightDiffsPrefix, validatorsDB) + flatValidatorPublicKeyDiffsDB := prefixdb.New(FlatValidatorPublicKeyDiffsPrefix, validatorsDB) txCache, err := metercacher.New( "tx_cache", @@ -560,7 +560,7 @@ func newState( return nil, err } - rewardUTXODB := prefixdb.New(rewardUTXOsPrefix, baseDB) + rewardUTXODB := prefixdb.New(RewardUTXOsPrefix, baseDB) rewardUTXOsCache, err := metercacher.New[ids.ID, []*avax.UTXO]( "reward_utxos_cache", metricsReg, @@ -570,15 +570,15 @@ func newState( return nil, err } - utxoDB := prefixdb.New(utxoPrefix, baseDB) + utxoDB := prefixdb.New(UTXOPrefix, baseDB) utxoState, err := avax.NewMeteredUTXOState(utxoDB, txs.GenesisCodec, metricsReg, execCfg.ChecksumsEnabled) if err != nil { return nil, err } - subnetBaseDB := prefixdb.New(subnetPrefix, baseDB) + subnetBaseDB := prefixdb.New(SubnetPrefix, baseDB) - subnetOwnerDB := prefixdb.New(subnetOwnerPrefix, baseDB) + subnetOwnerDB := prefixdb.New(SubnetOwnerPrefix, baseDB) subnetOwnerCache, err := metercacher.New[ids.ID, fxOwnerAndSize]( "subnet_owner_cache", metricsReg, @@ -638,11 +638,11 @@ func newState( addedBlockIDs: make(map[uint64]ids.ID), blockIDCache: blockIDCache, - blockIDDB: prefixdb.New(blockIDPrefix, baseDB), + blockIDDB: prefixdb.New(BlockIDPrefix, baseDB), addedBlocks: make(map[ids.ID]block.Block), blockCache: blockCache, - blockDB: prefixdb.New(blockPrefix, baseDB), + blockDB: prefixdb.New(BlockPrefix, baseDB), currentStakers: newBaseStakers(), pendingStakers: newBaseStakers(), @@ -672,7 +672,7 @@ func newState( flatValidatorPublicKeyDiffsDB: flatValidatorPublicKeyDiffsDB, addedTxs: make(map[ids.ID]*txAndStatus), - txDB: prefixdb.New(txPrefix, baseDB), + txDB: prefixdb.New(TxPrefix, baseDB), txCache: txCache, addedRewardUTXOs: make(map[ids.ID][]*avax.UTXO), @@ -692,18 +692,18 @@ func newState( transformedSubnets: make(map[ids.ID]*txs.Tx), transformedSubnetCache: transformedSubnetCache, - transformedSubnetDB: prefixdb.New(transformedSubnetPrefix, baseDB), + transformedSubnetDB: prefixdb.New(TransformedSubnetPrefix, baseDB), modifiedSupplies: make(map[ids.ID]uint64), supplyCache: supplyCache, - supplyDB: prefixdb.New(supplyPrefix, baseDB), + supplyDB: prefixdb.New(SupplyPrefix, baseDB), addedChains: make(map[ids.ID][]*txs.Tx), - chainDB: prefixdb.New(chainPrefix, baseDB), + chainDB: prefixdb.New(ChainPrefix, baseDB), chainCache: chainCache, chainDBCache: chainDBCache, - singletonDB: prefixdb.New(singletonPrefix, baseDB), + singletonDB: prefixdb.New(SingletonPrefix, baseDB), }, nil } @@ -764,16 +764,16 @@ func (s *state) GetPendingStakerIterator() (StakerIterator, error) { } func (s *state) shouldInit() (bool, error) { - has, err := s.singletonDB.Has(initializedKey) + has, err := s.singletonDB.Has(InitializedKey) return !has, err } func (s *state) doneInit() error { - return s.singletonDB.Put(initializedKey, nil) + return s.singletonDB.Put(InitializedKey, nil) } func (s *state) ShouldPrune() (bool, error) { - has, err := s.singletonDB.Has(prunedKey) + has, err := s.singletonDB.Has(PrunedKey) if err != nil { return true, err } @@ -800,7 +800,7 @@ func (s *state) ShouldPrune() (bool, error) { } func (s *state) donePrune() error { - return s.singletonDB.Put(prunedKey, nil) + return s.singletonDB.Put(PrunedKey, nil) } func (s *state) GetSubnets() ([]*txs.Tx, error) { @@ -1394,21 +1394,21 @@ func (s *state) load() error { } func (s *state) loadMetadata() error { - timestamp, err := database.GetTimestamp(s.singletonDB, timestampKey) + timestamp, err := database.GetTimestamp(s.singletonDB, TimestampKey) if err != nil { return err } s.persistedTimestamp = timestamp s.SetTimestamp(timestamp) - currentSupply, err := database.GetUInt64(s.singletonDB, currentSupplyKey) + currentSupply, err := database.GetUInt64(s.singletonDB, CurrentSupplyKey) if err != nil { return err } s.persistedCurrentSupply = currentSupply s.SetCurrentSupply(constants.PrimaryNetworkID, currentSupply) - lastAccepted, err := database.GetID(s.singletonDB, lastAcceptedKey) + lastAccepted, err := database.GetID(s.singletonDB, LastAcceptedKey) if err != nil { return err } @@ -1417,7 +1417,7 @@ func (s *state) loadMetadata() error { // Lookup the most recently indexed range on disk. If we haven't started // indexing the weights, then we keep the indexed heights as nil. - indexedHeightsBytes, err := s.singletonDB.Get(heightsIndexedKey) + indexedHeightsBytes, err := s.singletonDB.Get(HeightsIndexedKey) if err == database.ErrNotFound { return nil } @@ -2410,19 +2410,19 @@ func (s *state) writeChains() error { func (s *state) writeMetadata() error { if !s.persistedTimestamp.Equal(s.timestamp) { - if err := database.PutTimestamp(s.singletonDB, timestampKey, s.timestamp); err != nil { + if err := database.PutTimestamp(s.singletonDB, TimestampKey, s.timestamp); err != nil { return fmt.Errorf("failed to write timestamp: %w", err) } s.persistedTimestamp = s.timestamp } if s.persistedCurrentSupply != s.currentSupply { - if err := database.PutUInt64(s.singletonDB, currentSupplyKey, s.currentSupply); err != nil { + if err := database.PutUInt64(s.singletonDB, CurrentSupplyKey, s.currentSupply); err != nil { return fmt.Errorf("failed to write current supply: %w", err) } s.persistedCurrentSupply = s.currentSupply } if s.persistedLastAccepted != s.lastAccepted { - if err := database.PutID(s.singletonDB, lastAcceptedKey, s.lastAccepted); err != nil { + if err := database.PutID(s.singletonDB, LastAcceptedKey, s.lastAccepted); err != nil { return fmt.Errorf("failed to write last accepted: %w", err) } s.persistedLastAccepted = s.lastAccepted @@ -2433,7 +2433,7 @@ func (s *state) writeMetadata() error { if err != nil { return err } - if err := s.singletonDB.Put(heightsIndexedKey, indexedHeightsBytes); err != nil { + if err := s.singletonDB.Put(HeightsIndexedKey, indexedHeightsBytes); err != nil { return fmt.Errorf("failed to write indexed range: %w", err) } } diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 23e88a646368..4e2962587b40 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -1257,8 +1257,8 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { require := require.New(t) baseDB := memdb.New() - vmDB := prefixdb.New([]byte("vm"), baseDB) - bootstrappingDB := prefixdb.New([]byte("bootstrapping"), baseDB) + vmDB := prefixdb.New(chains.VMDBPrefix, baseDB) + bootstrappingDB := prefixdb.New(chains.ChainBootstrappingDBPrefix, baseDB) blocked, err := queue.NewWithMissing(bootstrappingDB, "", prometheus.NewRegistry()) require.NoError(err)