diff --git a/chains/manager.go b/chains/manager.go index b4f28b1290be..a1158e67716c 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -205,8 +205,8 @@ type ManagerConfig struct { MeterVMEnabled bool // Should each VM be wrapped with a MeterVM Metrics metrics.MultiGatherer - AcceptedFrontierGossipFrequency time.Duration - ConsensusAppConcurrency int + FrontierPollFrequency time.Duration + ConsensusAppConcurrency int // Max Time to spend fetching a container and its // ancestors when responding to a GetAncestors @@ -824,7 +824,7 @@ func (m *manager) createAvalancheChain( ctx, vdrs, msgChan, - m.AcceptedFrontierGossipFrequency, + m.FrontierPollFrequency, m.ConsensusAppConcurrency, m.ResourceTracker, validators.UnhandledSubnetConnector, // avalanche chains don't use subnet connector @@ -1166,7 +1166,7 @@ func (m *manager) createSnowmanChain( ctx, vdrs, msgChan, - m.AcceptedFrontierGossipFrequency, + m.FrontierPollFrequency, m.ConsensusAppConcurrency, m.ResourceTracker, subnetConnector, diff --git a/config/config.go b/config/config.go index db080ec2514a..d894e832d889 100644 --- a/config/config.go +++ b/config/config.go @@ -60,8 +60,9 @@ const ( subnetConfigFileExt = ".json" ipResolutionTimeout = 30 * time.Second - ipcDeprecationMsg = "IPC API is deprecated" - keystoreDeprecationMsg = "keystore API is deprecated" + ipcDeprecationMsg = "IPC API is deprecated" + keystoreDeprecationMsg = "keystore API is deprecated" + acceptedFrontierGossipDeprecationMsg = "push-based accepted frontier gossip is deprecated" ) var ( @@ -72,6 +73,12 @@ var ( IpcsChainIDsKey: ipcDeprecationMsg, IpcsPathKey: ipcDeprecationMsg, KeystoreAPIEnabledKey: keystoreDeprecationMsg, + ConsensusGossipAcceptedFrontierValidatorSizeKey: acceptedFrontierGossipDeprecationMsg, + ConsensusGossipAcceptedFrontierNonValidatorSizeKey: acceptedFrontierGossipDeprecationMsg, + ConsensusGossipAcceptedFrontierPeerSizeKey: acceptedFrontierGossipDeprecationMsg, + ConsensusGossipOnAcceptValidatorSizeKey: acceptedFrontierGossipDeprecationMsg, + ConsensusGossipOnAcceptNonValidatorSizeKey: acceptedFrontierGossipDeprecationMsg, + ConsensusGossipOnAcceptPeerSizeKey: acceptedFrontierGossipDeprecationMsg, } errSybilProtectionDisabledStakerWeights = errors.New("sybil protection disabled weights must be positive") @@ -1320,9 +1327,9 @@ func GetNodeConfig(v *viper.Viper) (node.Config, error) { } // Gossiping - nodeConfig.AcceptedFrontierGossipFrequency = v.GetDuration(ConsensusAcceptedFrontierGossipFrequencyKey) - if nodeConfig.AcceptedFrontierGossipFrequency < 0 { - return node.Config{}, fmt.Errorf("%s must be >= 0", ConsensusAcceptedFrontierGossipFrequencyKey) + nodeConfig.FrontierPollFrequency = v.GetDuration(ConsensusFrontierPollFrequencyKey) + if nodeConfig.FrontierPollFrequency < 0 { + return node.Config{}, fmt.Errorf("%s must be >= 0", ConsensusFrontierPollFrequencyKey) } // App handling diff --git a/config/flags.go b/config/flags.go index 20f42381320f..6e381e1c6a85 100644 --- a/config/flags.go +++ b/config/flags.go @@ -177,9 +177,9 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Duration(BenchlistMinFailingDurationKey, constants.DefaultBenchlistMinFailingDuration, "Minimum amount of time messages to a peer must be failing before the peer is benched") // Router - fs.Duration(ConsensusAcceptedFrontierGossipFrequencyKey, constants.DefaultAcceptedFrontierGossipFrequency, "Frequency of gossiping accepted frontiers") fs.Uint(ConsensusAppConcurrencyKey, constants.DefaultConsensusAppConcurrency, "Maximum number of goroutines to use when handling App messages on a chain") fs.Duration(ConsensusShutdownTimeoutKey, constants.DefaultConsensusShutdownTimeout, "Timeout before killing an unresponsive chain") + fs.Duration(ConsensusFrontierPollFrequencyKey, constants.DefaultFrontierPollFrequency, "Frequency of polling for new consensus frontiers") fs.Uint(ConsensusGossipAcceptedFrontierValidatorSizeKey, constants.DefaultConsensusGossipAcceptedFrontierValidatorSize, "Number of validators to gossip to when gossiping accepted frontier") fs.Uint(ConsensusGossipAcceptedFrontierNonValidatorSizeKey, constants.DefaultConsensusGossipAcceptedFrontierNonValidatorSize, "Number of non-validators to gossip to when gossiping accepted frontier") fs.Uint(ConsensusGossipAcceptedFrontierPeerSizeKey, constants.DefaultConsensusGossipAcceptedFrontierPeerSize, "Number of peers to gossip to when gossiping accepted frontier") diff --git a/config/keys.go b/config/keys.go index 1fe19cd2424a..c627abfc1b57 100644 --- a/config/keys.go +++ b/config/keys.go @@ -143,8 +143,9 @@ const ( IpcsChainIDsKey = "ipcs-chain-ids" IpcsPathKey = "ipcs-path" MeterVMsEnabledKey = "meter-vms-enabled" - ConsensusAcceptedFrontierGossipFrequencyKey = "consensus-accepted-frontier-gossip-frequency" ConsensusAppConcurrencyKey = "consensus-app-concurrency" + ConsensusShutdownTimeoutKey = "consensus-shutdown-timeout" + ConsensusFrontierPollFrequencyKey = "consensus-frontier-poll-frequency" ConsensusGossipAcceptedFrontierValidatorSizeKey = "consensus-accepted-frontier-gossip-validator-size" ConsensusGossipAcceptedFrontierNonValidatorSizeKey = "consensus-accepted-frontier-gossip-non-validator-size" ConsensusGossipAcceptedFrontierPeerSizeKey = "consensus-accepted-frontier-gossip-peer-size" @@ -154,7 +155,6 @@ const ( AppGossipValidatorSizeKey = "consensus-app-gossip-validator-size" AppGossipNonValidatorSizeKey = "consensus-app-gossip-non-validator-size" AppGossipPeerSizeKey = "consensus-app-gossip-peer-size" - ConsensusShutdownTimeoutKey = "consensus-shutdown-timeout" ProposerVMUseCurrentHeightKey = "proposervm-use-current-height" FdLimitKey = "fd-limit" IndexEnabledKey = "index-enabled" diff --git a/node/config.go b/node/config.go index 87783bd9935b..5839da75960a 100644 --- a/node/config.go +++ b/node/config.go @@ -181,8 +181,8 @@ type Config struct { ConsensusRouter router.Router `json:"-"` RouterHealthConfig router.HealthConfig `json:"routerHealthConfig"` ConsensusShutdownTimeout time.Duration `json:"consensusShutdownTimeout"` - // Gossip a container in the accepted frontier every [AcceptedFrontierGossipFrequency] - AcceptedFrontierGossipFrequency time.Duration `json:"consensusGossipFreq"` + // Poll for new frontiers every [FrontierPollFrequency] + FrontierPollFrequency time.Duration `json:"consensusGossipFreq"` // ConsensusAppConcurrency defines the maximum number of goroutines to // handle App messages per chain. ConsensusAppConcurrency int `json:"consensusAppConcurrency"` diff --git a/node/node.go b/node/node.go index 06544e8f9e6b..40eb8dc16bef 100644 --- a/node/node.go +++ b/node/node.go @@ -1014,7 +1014,7 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { Metrics: n.MetricsGatherer, SubnetConfigs: n.Config.SubnetConfigs, ChainConfigs: n.Config.ChainConfigs, - AcceptedFrontierGossipFrequency: n.Config.AcceptedFrontierGossipFrequency, + FrontierPollFrequency: n.Config.FrontierPollFrequency, ConsensusAppConcurrency: n.Config.ConsensusAppConcurrency, BootstrapMaxTimeGetAncestors: n.Config.BootstrapMaxTimeGetAncestors, BootstrapAncestorsMaxContainersSent: n.Config.BootstrapAncestorsMaxContainersSent, @@ -1084,7 +1084,7 @@ func (n *Node) initVMs() error { ApricotPhase5Time: version.GetApricotPhase5Time(n.Config.NetworkID), BanffTime: version.GetBanffTime(n.Config.NetworkID), CortinaTime: version.GetCortinaTime(n.Config.NetworkID), - DTime: version.GetDTime(n.Config.NetworkID), + DurangoTime: version.GetDurangoTime(n.Config.NetworkID), UseCurrentHeight: n.Config.UseCurrentHeight, }, }), diff --git a/node/overridden_manager.go b/node/overridden_manager.go index 91d8c198a4c3..80295f8636ea 100644 --- a/node/overridden_manager.go +++ b/node/overridden_manager.go @@ -68,6 +68,10 @@ func (o *overriddenManager) Sample(_ ids.ID, size int) ([]ids.NodeID, error) { return o.manager.Sample(o.subnetID, size) } +func (o *overriddenManager) UniformSample(_ ids.ID, size int) ([]ids.NodeID, error) { + return o.manager.UniformSample(o.subnetID, size) +} + func (o *overriddenManager) GetMap(ids.ID) map[ids.NodeID]*validators.GetValidatorOutput { return o.manager.GetMap(o.subnetID) } diff --git a/snow/engine/snowman/issuer.go b/snow/engine/snowman/issuer.go index ca69064105e1..3558d47360dc 100644 --- a/snow/engine/snowman/issuer.go +++ b/snow/engine/snowman/issuer.go @@ -6,6 +6,8 @@ package snowman import ( "context" + "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/utils/set" @@ -13,11 +15,13 @@ import ( // issuer issues [blk] into to consensus after its dependencies are met. type issuer struct { - t *Transitive - blk snowman.Block - abandoned bool - deps set.Set[ids.ID] - push bool + t *Transitive + nodeID ids.NodeID // nodeID of the peer that provided this block + blk snowman.Block + issuedMetric prometheus.Counter + abandoned bool + deps set.Set[ids.ID] + push bool } func (i *issuer) Dependencies() set.Set[ids.ID] { @@ -51,5 +55,5 @@ func (i *issuer) Update(ctx context.Context) { return } // Issue the block into consensus - i.t.errs.Add(i.t.deliver(ctx, i.blk, i.push)) + i.t.errs.Add(i.t.deliver(ctx, i.nodeID, i.blk, i.push, i.issuedMetric)) } diff --git a/snow/engine/snowman/metrics.go b/snow/engine/snowman/metrics.go index ae7cc66cfbfb..dfdc92c636db 100644 --- a/snow/engine/snowman/metrics.go +++ b/snow/engine/snowman/metrics.go @@ -10,6 +10,14 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) +const ( + pullGossipSource = "pull_gossip" + pushGossipSource = "push_gossip" + putGossipSource = "put_gossip" + builtSource = "built" + unknownSource = "unknown" +) + type metrics struct { bootstrapFinished prometheus.Gauge numRequests prometheus.Gauge @@ -27,6 +35,8 @@ type metrics struct { numProcessingAncestorFetchesUnneeded prometheus.Counter getAncestorsBlks metric.Averager selectedVoteIndex metric.Averager + issuerStake metric.Averager + issued *prometheus.CounterVec } func (m *metrics) Initialize(namespace string, reg prometheus.Registerer) error { @@ -115,6 +125,25 @@ func (m *metrics) Initialize(namespace string, reg prometheus.Registerer) error reg, &errs, ) + m.issuerStake = metric.NewAveragerWithErrs( + namespace, + "issuer_stake", + "stake weight of the peer who provided a block that was issued into consensus", + reg, + &errs, + ) + m.issued = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "blks_issued", + Help: "number of blocks that have been issued into consensus by discovery mechanism", + }, []string{"source"}) + + // Register the labels + m.issued.WithLabelValues(pullGossipSource) + m.issued.WithLabelValues(pushGossipSource) + m.issued.WithLabelValues(putGossipSource) + m.issued.WithLabelValues(builtSource) + m.issued.WithLabelValues(unknownSource) errs.Add( reg.Register(m.bootstrapFinished), @@ -131,6 +160,7 @@ func (m *metrics) Initialize(namespace string, reg prometheus.Registerer) error reg.Register(m.numProcessingAncestorFetchesDropped), reg.Register(m.numProcessingAncestorFetchesSucceeded), reg.Register(m.numProcessingAncestorFetchesUnneeded), + reg.Register(m.issued), ) return errs.Err } diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index f0ce42ecf912..7f06698cbab0 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -7,6 +7,8 @@ import ( "context" "fmt" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" "github.com/ava-labs/avalanchego/cache" @@ -32,7 +34,14 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) -const nonVerifiedCacheSize = 64 * units.MiB +const ( + nonVerifiedCacheSize = 64 * units.MiB + + // putGossipPeriod specifies the number of times Gossip will be called per + // Put gossip. This is done to avoid splitting Gossip into multiple + // functions and to allow more frequent pull gossip than push gossip. + putGossipPeriod = 10 +) var _ Engine = (*Transitive)(nil) @@ -59,13 +68,16 @@ type Transitive struct { common.AppHandler validators.Connector - RequestID uint32 + requestID uint32 + + gossipCounter int // track outstanding preference requests polls poll.Set // blocks that have we have sent get requests for but haven't yet received - blkReqs *bimap.BiMap[common.Request, ids.ID] + blkReqs *bimap.BiMap[common.Request, ids.ID] + blkReqSourceMetric map[common.Request]prometheus.Counter // blocks that are queued to be issued to consensus once missing dependencies are fetched // Block ID --> Block @@ -142,11 +154,75 @@ func newTransitive(config Config) (*Transitive, error) { acceptedFrontiers: acceptedFrontiers, polls: polls, blkReqs: bimap.New[common.Request, ids.ID](), + blkReqSourceMetric: make(map[common.Request]prometheus.Counter), } return t, t.metrics.Initialize("", config.Ctx.Registerer) } +func (t *Transitive) Gossip(ctx context.Context) error { + lastAcceptedID, lastAcceptedHeight := t.Consensus.LastAccepted() + if numProcessing := t.Consensus.NumProcessing(); numProcessing == 0 { + t.Ctx.Log.Verbo("sampling from validators", + zap.Stringer("validators", t.Validators), + ) + + // Uniform sampling is used here to reduce bandwidth requirements of + // nodes with a large amount of stake weight. + vdrIDs, err := t.Validators.UniformSample(t.Ctx.SubnetID, 1) + if err != nil { + t.Ctx.Log.Error("skipping block gossip", + zap.String("reason", "no validators"), + zap.Error(err), + ) + return nil + } + + nextHeightToAccept, err := math.Add64(lastAcceptedHeight, 1) + if err != nil { + t.Ctx.Log.Error("skipping block gossip", + zap.String("reason", "block height overflow"), + zap.Stringer("blkID", lastAcceptedID), + zap.Uint64("lastAcceptedHeight", lastAcceptedHeight), + zap.Error(err), + ) + return nil + } + + t.requestID++ + vdrSet := set.Of(vdrIDs...) + preferredID := t.Consensus.Preference() + t.Sender.SendPullQuery(ctx, vdrSet, t.requestID, preferredID, nextHeightToAccept) + } else { + t.Ctx.Log.Debug("skipping block gossip", + zap.String("reason", "blocks currently processing"), + zap.Int("numProcessing", numProcessing), + ) + } + + // TODO: Remove periodic push gossip after v1.11.x is activated + t.gossipCounter++ + t.gossipCounter %= putGossipPeriod + if t.gossipCounter > 0 { + return nil + } + + lastAccepted, err := t.GetBlock(ctx, lastAcceptedID) + if err != nil { + t.Ctx.Log.Warn("dropping gossip request", + zap.String("reason", "block couldn't be loaded"), + zap.Stringer("blkID", lastAcceptedID), + zap.Error(err), + ) + return nil + } + t.Ctx.Log.Verbo("gossiping accepted block to the network", + zap.Stringer("blkID", lastAcceptedID), + ) + t.Sender.SendGossip(ctx, lastAccepted.Bytes()) + return nil +} + func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint32, blkBytes []byte) error { blk, err := t.VM.ParseBlock(ctx, blkBytes) if err != nil { @@ -170,23 +246,39 @@ func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint3 return t.GetFailed(ctx, nodeID, requestID) } - actualBlkID := blk.ID() - expectedBlkID, ok := t.blkReqs.GetValue(common.Request{ - NodeID: nodeID, - RequestID: requestID, - }) - // If the provided block is not the requested block, we need to explicitly - // mark the request as failed to avoid having a dangling dependency. - if ok && actualBlkID != expectedBlkID { - t.Ctx.Log.Debug("incorrect block returned in Put", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Stringer("blkID", actualBlkID), - zap.Stringer("expectedBlkID", expectedBlkID), - ) - // We assume that [blk] is useless because it doesn't match what we - // expected. - return t.GetFailed(ctx, nodeID, requestID) + var ( + req = common.Request{ + NodeID: nodeID, + RequestID: requestID, + } + issuedMetric prometheus.Counter + ) + switch expectedBlkID, ok := t.blkReqs.GetValue(req); { + case ok: + actualBlkID := blk.ID() + if actualBlkID != expectedBlkID { + t.Ctx.Log.Debug("incorrect block returned in Put", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Stringer("blkID", actualBlkID), + zap.Stringer("expectedBlkID", expectedBlkID), + ) + // We assume that [blk] is useless because it doesn't match what we + // expected. + return t.GetFailed(ctx, nodeID, requestID) + } + + issuedMetric = t.blkReqSourceMetric[req] + case requestID == constants.GossipMsgRequestID: + issuedMetric = t.metrics.issued.WithLabelValues(putGossipSource) + default: + // This can happen if this block was provided to this engine while a Get + // request was outstanding. For example, the block may have been locally + // built or the node may have received a PushQuery with this block. + // + // Note: It is still possible this block will be issued here, because + // the block may have previously failed verification. + issuedMetric = t.metrics.issued.WithLabelValues(unknownSource) } if t.wasIssued(blk) { @@ -198,7 +290,7 @@ func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint3 // receive requests to fill the ancestry. dependencies that have already // been fetched, but with missing dependencies themselves won't be requested // from the vdr. - if _, err := t.issueFrom(ctx, nodeID, blk); err != nil { + if _, err := t.issueFrom(ctx, nodeID, blk, issuedMetric); err != nil { return err } return t.buildBlocks(ctx) @@ -206,11 +298,13 @@ func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint3 func (t *Transitive) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { // We don't assume that this function is called after a failed Get message. - // Check to see if we have an outstanding request and also get what the request was for if it exists. - blkID, ok := t.blkReqs.DeleteKey(common.Request{ + // Check to see if we have an outstanding request and also get what the + // request was for if it exists. + req := common.Request{ NodeID: nodeID, RequestID: requestID, - }) + } + blkID, ok := t.blkReqs.DeleteKey(req) if !ok { t.Ctx.Log.Debug("unexpected GetFailed", zap.Stringer("nodeID", nodeID), @@ -218,6 +312,7 @@ func (t *Transitive) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID ) return nil } + delete(t.blkReqSourceMetric, req) // Because the get request was dropped, we no longer expect blkID to be issued. t.blocked.Abandon(ctx, blkID) @@ -229,9 +324,11 @@ func (t *Transitive) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID func (t *Transitive) PullQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, blkID ids.ID, requestedHeight uint64) error { t.sendChits(ctx, nodeID, requestID, requestedHeight) + issuedMetric := t.metrics.issued.WithLabelValues(pushGossipSource) + // Try to issue [blkID] to consensus. // If we're missing an ancestor, request it from [vdr] - if _, err := t.issueFromByID(ctx, nodeID, blkID); err != nil { + if _, err := t.issueFromByID(ctx, nodeID, blkID, issuedMetric); err != nil { return err } @@ -265,12 +362,14 @@ func (t *Transitive) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID t.metrics.numUselessPushQueryBytes.Add(float64(len(blkBytes))) } + issuedMetric := t.metrics.issued.WithLabelValues(pushGossipSource) + // issue the block into consensus. If the block has already been issued, // this will be a noop. If this block has missing dependencies, nodeID will // receive requests to fill the ancestry. dependencies that have already // been fetched, but with missing dependencies themselves won't be requested // from the vdr. - if _, err := t.issueFrom(ctx, nodeID, blk); err != nil { + if _, err := t.issueFrom(ctx, nodeID, blk, issuedMetric); err != nil { return err } @@ -288,7 +387,9 @@ func (t *Transitive) Chits(ctx context.Context, nodeID ids.NodeID, requestID uin zap.Stringer("acceptedID", acceptedID), ) - addedPreferred, err := t.issueFromByID(ctx, nodeID, preferredID) + issuedMetric := t.metrics.issued.WithLabelValues(pullGossipSource) + + addedPreferred, err := t.issueFromByID(ctx, nodeID, preferredID, issuedMetric) if err != nil { return err } @@ -302,7 +403,7 @@ func (t *Transitive) Chits(ctx context.Context, nodeID ids.NodeID, requestID uin responseOptions = []ids.ID{preferredID} ) if preferredID != preferredIDAtHeight { - addedPreferredIDAtHeight, err = t.issueFromByID(ctx, nodeID, preferredIDAtHeight) + addedPreferredIDAtHeight, err = t.issueFromByID(ctx, nodeID, preferredIDAtHeight, issuedMetric) if err != nil { return err } @@ -354,28 +455,6 @@ func (*Transitive) Timeout(context.Context) error { return nil } -func (t *Transitive) Gossip(ctx context.Context) error { - blkID, err := t.VM.LastAccepted(ctx) - if err != nil { - return err - } - - blk, err := t.GetBlock(ctx, blkID) - if err != nil { - t.Ctx.Log.Warn("dropping gossip request", - zap.String("reason", "block couldn't be loaded"), - zap.Stringer("blkID", blkID), - zap.Error(err), - ) - return nil - } - t.Ctx.Log.Verbo("gossiping accepted block to the network", - zap.Stringer("blkID", blkID), - ) - t.Sender.SendGossip(ctx, blk.Bytes()) - return nil -} - func (*Transitive) Halt(context.Context) {} func (t *Transitive) Shutdown(ctx context.Context) error { @@ -409,7 +488,7 @@ func (t *Transitive) Context() *snow.ConsensusContext { } func (t *Transitive) Start(ctx context.Context, startReqID uint32) error { - t.RequestID = startReqID + t.requestID = startReqID lastAcceptedID, err := t.VM.LastAccepted(ctx) if err != nil { return err @@ -442,9 +521,10 @@ func (t *Transitive) Start(ctx context.Context, startReqID uint32) error { case err != nil: return err default: + issuedMetric := t.metrics.issued.WithLabelValues(builtSource) for _, blk := range options { // note that deliver will set the VM's preference - if err := t.deliver(ctx, blk, false); err != nil { + if err := t.deliver(ctx, t.Ctx.NodeID, blk, false, issuedMetric); err != nil { return err } } @@ -604,7 +684,8 @@ func (t *Transitive) buildBlocks(ctx context.Context) error { ) } - added, err := t.issueWithAncestors(ctx, blk) + issuedMetric := t.metrics.issued.WithLabelValues(builtSource) + added, err := t.issueWithAncestors(ctx, blk, issuedMetric) if err != nil { return err } @@ -634,23 +715,33 @@ func (t *Transitive) repoll(ctx context.Context) { // issueFromByID attempts to issue the branch ending with a block [blkID] into consensus. // If we do not have [blkID], request it. // Returns true if the block is processing in consensus or is decided. -func (t *Transitive) issueFromByID(ctx context.Context, nodeID ids.NodeID, blkID ids.ID) (bool, error) { +func (t *Transitive) issueFromByID( + ctx context.Context, + nodeID ids.NodeID, + blkID ids.ID, + issuedMetric prometheus.Counter, +) (bool, error) { blk, err := t.GetBlock(ctx, blkID) if err != nil { - t.sendRequest(ctx, nodeID, blkID) + t.sendRequest(ctx, nodeID, blkID, issuedMetric) return false, nil } - return t.issueFrom(ctx, nodeID, blk) + return t.issueFrom(ctx, nodeID, blk, issuedMetric) } // issueFrom attempts to issue the branch ending with block [blkID] to consensus. // Returns true if the block is processing in consensus or is decided. // If a dependency is missing, request it from [vdr]. -func (t *Transitive) issueFrom(ctx context.Context, nodeID ids.NodeID, blk snowman.Block) (bool, error) { +func (t *Transitive) issueFrom( + ctx context.Context, + nodeID ids.NodeID, + blk snowman.Block, + issuedMetric prometheus.Counter, +) (bool, error) { // issue [blk] and its ancestors to consensus. blkID := blk.ID() for !t.wasIssued(blk) { - if err := t.issue(ctx, blk, false); err != nil { + if err := t.issue(ctx, nodeID, blk, false, issuedMetric); err != nil { return false, err } @@ -660,13 +751,15 @@ func (t *Transitive) issueFrom(ctx context.Context, nodeID ids.NodeID, blk snowm // If we don't have this ancestor, request it from [vdr] if err != nil || !blk.Status().Fetched() { - t.sendRequest(ctx, nodeID, blkID) + t.sendRequest(ctx, nodeID, blkID, issuedMetric) return false, nil } } // Remove any outstanding requests for this block - t.blkReqs.DeleteValue(blkID) + if req, ok := t.blkReqs.DeleteValue(blkID); ok { + delete(t.blkReqSourceMetric, req) + } issued := t.Consensus.Decided(blk) || t.Consensus.Processing(blkID) if issued { @@ -685,12 +778,16 @@ func (t *Transitive) issueFrom(ctx context.Context, nodeID ids.NodeID, blk snowm // issueWithAncestors attempts to issue the branch ending with [blk] to consensus. // Returns true if the block is processing in consensus or is decided. // If a dependency is missing and the dependency hasn't been requested, the issuance will be abandoned. -func (t *Transitive) issueWithAncestors(ctx context.Context, blk snowman.Block) (bool, error) { +func (t *Transitive) issueWithAncestors( + ctx context.Context, + blk snowman.Block, + issuedMetric prometheus.Counter, +) (bool, error) { blkID := blk.ID() // issue [blk] and its ancestors into consensus status := blk.Status() for status.Fetched() && !t.wasIssued(blk) { - err := t.issue(ctx, blk, true) + err := t.issue(ctx, t.Ctx.NodeID, blk, true, issuedMetric) if err != nil { return false, err } @@ -732,20 +829,30 @@ func (t *Transitive) wasIssued(blk snowman.Block) bool { // Issue [blk] to consensus once its ancestors have been issued. // If [push] is true, a push query will be used. Otherwise, a pull query will be // used. -func (t *Transitive) issue(ctx context.Context, blk snowman.Block, push bool) error { +func (t *Transitive) issue( + ctx context.Context, + nodeID ids.NodeID, + blk snowman.Block, + push bool, + issuedMetric prometheus.Counter, +) error { blkID := blk.ID() // mark that the block is queued to be added to consensus once its ancestors have been t.pending[blkID] = blk // Remove any outstanding requests for this block - t.blkReqs.DeleteValue(blkID) + if req, ok := t.blkReqs.DeleteValue(blkID); ok { + delete(t.blkReqSourceMetric, req) + } // Will add [blk] to consensus once its ancestors have been i := &issuer{ - t: t, - blk: blk, - push: push, + t: t, + nodeID: nodeID, + blk: blk, + issuedMetric: issuedMetric, + push: push, } // block on the parent if needed @@ -768,26 +875,31 @@ func (t *Transitive) issue(ctx context.Context, blk snowman.Block, push bool) er } // Request that [vdr] send us block [blkID] -func (t *Transitive) sendRequest(ctx context.Context, nodeID ids.NodeID, blkID ids.ID) { +func (t *Transitive) sendRequest( + ctx context.Context, + nodeID ids.NodeID, + blkID ids.ID, + issuedMetric prometheus.Counter, +) { // There is already an outstanding request for this block if t.blkReqs.HasValue(blkID) { return } - t.RequestID++ - t.blkReqs.Put( - common.Request{ - NodeID: nodeID, - RequestID: t.RequestID, - }, - blkID, - ) + t.requestID++ + req := common.Request{ + NodeID: nodeID, + RequestID: t.requestID, + } + t.blkReqs.Put(req, blkID) + t.blkReqSourceMetric[req] = issuedMetric + t.Ctx.Log.Verbo("sending Get request", zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", t.RequestID), + zap.Uint32("requestID", t.requestID), zap.Stringer("blkID", blkID), ) - t.Sender.SendGet(ctx, nodeID, t.RequestID, blkID) + t.Sender.SendGet(ctx, nodeID, t.requestID, blkID) // Tracks performance statistics t.metrics.numRequests.Set(float64(t.blkReqs.Len())) @@ -811,6 +923,7 @@ func (t *Transitive) sendQuery( t.Ctx.Log.Error("dropped query for block", zap.String("reason", "insufficient number of validators"), zap.Stringer("blkID", blkID), + zap.Int("size", t.Params.K), ) return } @@ -828,28 +941,34 @@ func (t *Transitive) sendQuery( } vdrBag := bag.Of(vdrIDs...) - t.RequestID++ - if !t.polls.Add(t.RequestID, vdrBag) { + t.requestID++ + if !t.polls.Add(t.requestID, vdrBag) { t.Ctx.Log.Error("dropped query for block", zap.String("reason", "failed to add poll"), zap.Stringer("blkID", blkID), - zap.Uint32("requestID", t.RequestID), + zap.Uint32("requestID", t.requestID), ) return } vdrSet := set.Of(vdrIDs...) if push { - t.Sender.SendPushQuery(ctx, vdrSet, t.RequestID, blkBytes, nextHeightToAccept) + t.Sender.SendPushQuery(ctx, vdrSet, t.requestID, blkBytes, nextHeightToAccept) } else { - t.Sender.SendPullQuery(ctx, vdrSet, t.RequestID, blkID, nextHeightToAccept) + t.Sender.SendPullQuery(ctx, vdrSet, t.requestID, blkID, nextHeightToAccept) } } // issue [blk] to consensus // If [push] is true, a push query will be used. Otherwise, a pull query will be // used. -func (t *Transitive) deliver(ctx context.Context, blk snowman.Block, push bool) error { +func (t *Transitive) deliver( + ctx context.Context, + nodeID ids.NodeID, + blk snowman.Block, + push bool, + issuedMetric prometheus.Counter, +) error { blkID := blk.ID() if t.Consensus.Decided(blk) || t.Consensus.Processing(blkID) { return nil @@ -875,7 +994,7 @@ func (t *Transitive) deliver(ctx context.Context, blk snowman.Block, push bool) // By ensuring that the parent is either processing or accepted, it is // guaranteed that the parent was successfully verified. This means that // calling Verify on this block is allowed. - blkAdded, err := t.addUnverifiedBlockToConsensus(ctx, blk) + blkAdded, err := t.addUnverifiedBlockToConsensus(ctx, nodeID, blk, issuedMetric) if err != nil { return err } @@ -899,7 +1018,7 @@ func (t *Transitive) deliver(ctx context.Context, blk snowman.Block, push bool) } for _, blk := range options { - blkAdded, err := t.addUnverifiedBlockToConsensus(ctx, blk) + blkAdded, err := t.addUnverifiedBlockToConsensus(ctx, nodeID, blk, issuedMetric) if err != nil { return err } @@ -931,13 +1050,17 @@ func (t *Transitive) deliver(ctx context.Context, blk snowman.Block, push bool) t.removeFromPending(blk) t.blocked.Fulfill(ctx, blkID) - t.blkReqs.DeleteValue(blkID) + if req, ok := t.blkReqs.DeleteValue(blkID); ok { + delete(t.blkReqSourceMetric, req) + } } for _, blk := range dropped { blkID := blk.ID() t.removeFromPending(blk) t.blocked.Abandon(ctx, blkID) - t.blkReqs.DeleteValue(blkID) + if req, ok := t.blkReqs.DeleteValue(blkID); ok { + delete(t.blkReqSourceMetric, req) + } } // If we should issue multiple queries at the same time, we need to repoll @@ -979,12 +1102,18 @@ func (t *Transitive) addToNonVerifieds(blk snowman.Block) { // addUnverifiedBlockToConsensus returns whether the block was added and an // error if one occurred while adding it to consensus. -func (t *Transitive) addUnverifiedBlockToConsensus(ctx context.Context, blk snowman.Block) (bool, error) { +func (t *Transitive) addUnverifiedBlockToConsensus( + ctx context.Context, + nodeID ids.NodeID, + blk snowman.Block, + issuedMetric prometheus.Counter, +) (bool, error) { blkID := blk.ID() // make sure this block is valid if err := blk.Verify(ctx); err != nil { t.Ctx.Log.Debug("block verification failed", + zap.Stringer("nodeID", nodeID), zap.Stringer("blkID", blkID), zap.Error(err), ) @@ -994,10 +1123,13 @@ func (t *Transitive) addUnverifiedBlockToConsensus(ctx context.Context, blk snow return false, nil } + issuedMetric.Inc() t.nonVerifieds.Remove(blkID) t.nonVerifiedCache.Evict(blkID) t.metrics.numNonVerifieds.Set(float64(t.nonVerifieds.Len())) + t.metrics.issuerStake.Observe(float64(t.Validators.GetWeight(t.Ctx.SubnetID, nodeID))) t.Ctx.Log.Verbo("adding block to consensus", + zap.Stringer("nodeID", nodeID), zap.Stringer("blkID", blkID), ) return true, t.Consensus.Add(ctx, &memoryBlock{ diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go index 405f1abd3666..26f6c1127bbf 100644 --- a/snow/engine/snowman/transitive_test.go +++ b/snow/engine/snowman/transitive_test.go @@ -412,7 +412,13 @@ func TestEngineMultipleQuery(t *testing.T) { } } - require.NoError(te.issue(context.Background(), blk0, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk0, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) blk1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -522,10 +528,22 @@ func TestEngineBlockedIssue(t *testing.T) { } } - require.NoError(te.issue(context.Background(), blk1, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk1, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) blk0.StatusV = choices.Processing - require.NoError(te.issue(context.Background(), blk0, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk0, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) require.Equal(blk1.ID(), te.Consensus.Preference()) } @@ -558,7 +576,13 @@ func TestEngineAbandonResponse(t *testing.T) { return nil, errUnknownBlock } - require.NoError(te.issue(context.Background(), blk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) require.NoError(te.QueryFailed(context.Background(), vdr, 1)) require.Empty(te.blocked) @@ -797,7 +821,13 @@ func TestVoteCanceling(t *testing.T) { require.Equal(uint64(1), requestedHeight) } - require.NoError(te.issue(context.Background(), blk, true)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + true, + te.metrics.issued.WithLabelValues(unknownSource), + )) require.Equal(1, te.polls.Len()) @@ -858,7 +888,13 @@ func TestEngineNoQuery(t *testing.T) { BytesV: []byte{1}, } - require.NoError(te.issue(context.Background(), blk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) } func TestEngineNoRepollQuery(t *testing.T) { @@ -961,7 +997,13 @@ func TestEngineAbandonChit(t *testing.T) { reqID = requestID } - require.NoError(te.issue(context.Background(), blk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) fakeBlkID := ids.GenerateTestID() vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { @@ -1016,7 +1058,13 @@ func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { reqID = requestID } - require.NoError(te.issue(context.Background(), blk, true)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + true, + te.metrics.issued.WithLabelValues(unknownSource), + )) fakeBlkID := ids.GenerateTestID() vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { @@ -1099,7 +1147,13 @@ func TestEngineBlockingChitRequest(t *testing.T) { return blockingBlk, nil } - require.NoError(te.issue(context.Background(), parentBlk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + parentBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) sender.CantSendChits = false @@ -1110,7 +1164,13 @@ func TestEngineBlockingChitRequest(t *testing.T) { sender.CantSendPullQuery = false missingBlk.StatusV = choices.Processing - require.NoError(te.issue(context.Background(), missingBlk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + missingBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) require.Empty(te.blocked) } @@ -1163,7 +1223,13 @@ func TestEngineBlockingChitResponse(t *testing.T) { } } - require.NoError(te.issue(context.Background(), blockingBlk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blockingBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) queryRequestID := new(uint32) sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { @@ -1174,7 +1240,13 @@ func TestEngineBlockingChitResponse(t *testing.T) { require.Equal(uint64(1), requestedHeight) } - require.NoError(te.issue(context.Background(), issuedBlk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + issuedBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) sender.SendPushQueryF = nil sender.CantSendPushQuery = false @@ -1185,7 +1257,13 @@ func TestEngineBlockingChitResponse(t *testing.T) { sender.CantSendPullQuery = false missingBlk.StatusV = choices.Processing - require.NoError(te.issue(context.Background(), missingBlk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + missingBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) } func TestEngineRetryFetch(t *testing.T) { @@ -1281,9 +1359,21 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { return nil, errUnknownBlock } } - require.NoError(te.issue(context.Background(), validBlk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + validBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) sender.SendPushQueryF = nil - require.NoError(te.issue(context.Background(), invalidBlk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + invalidBlk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) require.NoError(te.Chits(context.Background(), vdr, *reqID, invalidBlkID, invalidBlkID, invalidBlkID)) require.Equal(choices.Accepted, validBlk.Status()) @@ -1292,7 +1382,7 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { func TestEngineGossip(t *testing.T) { require := require.New(t) - _, _, sender, vm, te, gBlk := setupDefaultConfig(t) + nodeID, _, sender, vm, te, gBlk := setupDefaultConfig(t) vm.LastAcceptedF = func(context.Context) (ids.ID, error) { return gBlk.ID(), nil @@ -1302,15 +1392,15 @@ func TestEngineGossip(t *testing.T) { return gBlk, nil } - called := new(bool) - sender.SendGossipF = func(_ context.Context, blkBytes []byte) { - *called = true - require.Equal(gBlk.Bytes(), blkBytes) + var calledSendPullQuery bool + sender.SendPullQueryF = func(_ context.Context, nodeIDs set.Set[ids.NodeID], _ uint32, _ ids.ID, _ uint64) { + calledSendPullQuery = true + require.Equal(set.Of(nodeID), nodeIDs) } require.NoError(te.Gossip(context.Background())) - require.True(*called) + require.True(calledSendPullQuery) } func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { @@ -1666,7 +1756,13 @@ func TestEngineDoubleChit(t *testing.T) { require.Equal(blk.ID(), blkID) require.Equal(uint64(1), requestedHeight) } - require.NoError(te.issue(context.Background(), blk, false)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + false, + te.metrics.issued.WithLabelValues(unknownSource), + )) vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { @@ -2785,7 +2881,13 @@ func TestEngineApplyAcceptedFrontierInQueryFailed(t *testing.T) { require.Equal(uint64(1), requestedHeight) } - require.NoError(te.issue(context.Background(), blk, true)) + require.NoError(te.issue( + context.Background(), + te.Ctx.NodeID, + blk, + true, + te.metrics.issued.WithLabelValues(unknownSource), + )) vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { diff --git a/snow/validators/manager.go b/snow/validators/manager.go index c42ea779d96b..8cf634f29bd7 100644 --- a/snow/validators/manager.go +++ b/snow/validators/manager.go @@ -85,6 +85,10 @@ type Manager interface { // If sampling the requested size isn't possible, an error will be returned. Sample(subnetID ids.ID, size int) ([]ids.NodeID, error) + // UniformSample returns a collection of validatorIDs in the subnet. + // If sampling the requested size isn't possible, an error will be returned. + UniformSample(subnetID ids.ID, size int) ([]ids.NodeID, error) + // Map of the validators in this subnet GetMap(subnetID ids.ID) map[ids.NodeID]*GetValidatorOutput @@ -253,6 +257,21 @@ func (m *manager) Sample(subnetID ids.ID, size int) ([]ids.NodeID, error) { return set.Sample(size) } +func (m *manager) UniformSample(subnetID ids.ID, size int) ([]ids.NodeID, error) { + if size == 0 { + return nil, nil + } + + m.lock.RLock() + set, exists := m.subnetToVdrs[subnetID] + m.lock.RUnlock() + if !exists { + return nil, ErrMissingValidators + } + + return set.UniformSample(size) +} + func (m *manager) GetMap(subnetID ids.ID) map[ids.NodeID]*GetValidatorOutput { m.lock.RLock() set, exists := m.subnetToVdrs[subnetID] diff --git a/snow/validators/set.go b/snow/validators/set.go index dfa294a70bbe..564cd107153a 100644 --- a/snow/validators/set.go +++ b/snow/validators/set.go @@ -243,6 +243,13 @@ func (s *vdrSet) Sample(size int) ([]ids.NodeID, error) { return s.sample(size) } +func (s *vdrSet) UniformSample(size int) ([]ids.NodeID, error) { + s.lock.RLock() + defer s.lock.RUnlock() + + return s.uniformSample(size) +} + func (s *vdrSet) sample(size int) ([]ids.NodeID, error) { if !s.samplerInitialized { if err := s.sampler.Initialize(s.weights); err != nil { @@ -263,6 +270,22 @@ func (s *vdrSet) sample(size int) ([]ids.NodeID, error) { return list, nil } +func (s *vdrSet) uniformSample(size int) ([]ids.NodeID, error) { + uniform := sampler.NewUniform() + uniform.Initialize(uint64(len(s.vdrSlice))) + + indices, err := uniform.Sample(size) + if err != nil { + return nil, err + } + + list := make([]ids.NodeID, size) + for i, index := range indices { + list[i] = s.vdrSlice[index].NodeID + } + return list, nil +} + func (s *vdrSet) TotalWeight() (uint64, error) { s.lock.RLock() defer s.lock.RUnlock() diff --git a/utils/constants/networking.go b/utils/constants/networking.go index d26f3db1070d..a9417eac37c9 100644 --- a/utils/constants/networking.go +++ b/utils/constants/networking.go @@ -71,12 +71,12 @@ const ( DefaultBenchlistMinFailingDuration = 2*time.Minute + 30*time.Second // Router - DefaultAcceptedFrontierGossipFrequency = 10 * time.Second DefaultConsensusAppConcurrency = 2 DefaultConsensusShutdownTimeout = time.Minute + DefaultFrontierPollFrequency = 100 * time.Millisecond DefaultConsensusGossipAcceptedFrontierValidatorSize = 0 DefaultConsensusGossipAcceptedFrontierNonValidatorSize = 0 - DefaultConsensusGossipAcceptedFrontierPeerSize = 15 + DefaultConsensusGossipAcceptedFrontierPeerSize = 1 DefaultConsensusGossipOnAcceptValidatorSize = 0 DefaultConsensusGossipOnAcceptNonValidatorSize = 0 DefaultConsensusGossipOnAcceptPeerSize = 10 diff --git a/version/constants.go b/version/constants.go index 5d57933e424b..a9bc3d2a6f91 100644 --- a/version/constants.go +++ b/version/constants.go @@ -99,7 +99,7 @@ var ( CortinaXChainStopVertexID map[uint32]ids.ID // TODO: update this before release - DTimes = map[uint32]time.Time{ + DurangoTimes = map[uint32]time.Time{ constants.MainnetID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), constants.FujiID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), } @@ -191,8 +191,8 @@ func GetCortinaTime(networkID uint32) time.Time { return DefaultUpgradeTime } -func GetDTime(networkID uint32) time.Time { - if upgradeTime, exists := DTimes[networkID]; exists { +func GetDurangoTime(networkID uint32) time.Time { + if upgradeTime, exists := DurangoTimes[networkID]; exists { return upgradeTime } return DefaultUpgradeTime diff --git a/vms/platformvm/config/config.go b/vms/platformvm/config/config.go index 05068e46a69d..f9504708f78a 100644 --- a/vms/platformvm/config/config.go +++ b/vms/platformvm/config/config.go @@ -104,8 +104,8 @@ type Config struct { // Time of the Cortina network upgrade CortinaTime time.Time - // Time of the D network upgrade - DTime time.Time + // Time of the Durango network upgrade + DurangoTime time.Time // UseCurrentHeight forces [GetMinimumHeight] to return the current height // of the P-Chain instead of the oldest block in the [recentlyAccepted] @@ -133,9 +133,8 @@ func (c *Config) IsCortinaActivated(timestamp time.Time) bool { return !timestamp.Before(c.CortinaTime) } -// TODO: Rename -func (c *Config) IsDActivated(timestamp time.Time) bool { - return !timestamp.Before(c.DTime) +func (c *Config) IsDurangoActivated(timestamp time.Time) bool { + return !timestamp.Before(c.DurangoTime) } func (c *Config) GetCreateBlockchainTxFee(timestamp time.Time) uint64 { diff --git a/vms/platformvm/txs/executor/staker_tx_verification.go b/vms/platformvm/txs/executor/staker_tx_verification.go index 9ec4880e4a44..7fae0e78a85b 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification.go +++ b/vms/platformvm/txs/executor/staker_tx_verification.go @@ -37,7 +37,7 @@ var ( ErrDuplicateValidator = errors.New("duplicate validator") ErrDelegateToPermissionedValidator = errors.New("delegation to permissioned validator") ErrWrongStakedAssetID = errors.New("incorrect staked assetID") - ErrDUpgradeNotActive = errors.New("attempting to use a D-upgrade feature prior to activation") + ErrDurangoUpgradeNotActive = errors.New("attempting to use a Durango-upgrade feature prior to activation") ) // verifySubnetValidatorPrimaryNetworkRequirements verifies the primary @@ -727,8 +727,8 @@ func verifyTransferSubnetOwnershipTx( sTx *txs.Tx, tx *txs.TransferSubnetOwnershipTx, ) error { - if !backend.Config.IsDActivated(chainState.GetTimestamp()) { - return ErrDUpgradeNotActive + if !backend.Config.IsDurangoActivated(chainState.GetTimestamp()) { + return ErrDurangoUpgradeNotActive } // Verify the tx is well-formed diff --git a/vms/platformvm/txs/executor/standard_tx_executor.go b/vms/platformvm/txs/executor/standard_tx_executor.go index 22bab59afd3b..63069cb5d5d5 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor.go +++ b/vms/platformvm/txs/executor/standard_tx_executor.go @@ -516,8 +516,8 @@ func (e *StandardTxExecutor) TransferSubnetOwnershipTx(tx *txs.TransferSubnetOwn } func (e *StandardTxExecutor) BaseTx(tx *txs.BaseTx) error { - if !e.Backend.Config.IsDActivated(e.State.GetTimestamp()) { - return ErrDUpgradeNotActive + if !e.Backend.Config.IsDurangoActivated(e.State.GetTimestamp()) { + return ErrDurangoUpgradeNotActive } // Verify the tx is well-formed