diff --git a/analyzer/consensus/consensus.go b/analyzer/consensus/consensus.go index 5b3abb4a5..15fcc5ebb 100644 --- a/analyzer/consensus/consensus.go +++ b/analyzer/consensus/consensus.go @@ -38,6 +38,8 @@ import ( const ( consensusAnalyzerName = "consensus" + + validatorUptimesViewRefreshQuery = `REFRESH MATERIALIZED VIEW CONCURRENTLY views.validator_uptimes` ) type EventType = apiTypes.ConsensusEventType // alias for brevity @@ -316,6 +318,10 @@ func (m *processor) queueDbUpdates(batch *storage.QueryBatch, data allData) erro } } + if m.mode == analyzer.SlowSyncMode { + batch.Queue(validatorUptimesViewRefreshQuery) + } + return nil } diff --git a/api/spec/v1.yaml b/api/spec/v1.yaml index 8b39a8e54..2e8fb96fa 100644 --- a/api/spec/v1.yaml +++ b/api/spec/v1.yaml @@ -2085,8 +2085,33 @@ components: description: An array containing details of the last 100 consensus blocks, indicating whether each block was signed by the validator. Only available when querying a single validator. items: allOf: [$ref: '#/components/schemas/ValidatorSignedBlock'] + uptime: + allOf: [$ref: '#/components/schemas/ValidatorUptime'] + description: The validator's uptime statistics for a period of time up to now. description: | - An validator registered at the consensus layer. + A validator registered at the consensus layer. + + ValidatorUptime: + type: object + properties: + window_length: + type: integer + format: uint64 + description: The length of the window this object describes, in blocks. + partial_length: + type: integer + format: uint64 + description: The length of the partial windows within window_length, in blocks. + overall_uptime: + type: integer + format: uint64 + description: The number of blocks signed by the validator out of the last window_length blocks. + partial_uptimes: + type: array + description: An array showing the signed block counts for each partial slot within window_length. + items: + type: integer + format: uint64 ValidatorSignedBlock: type: object diff --git a/storage/client/client.go b/storage/client/client.go index 6fc49b5c3..976b98d20 100644 --- a/storage/client/client.go +++ b/storage/client/client.go @@ -44,6 +44,12 @@ const ( maxTotalCount = 1000 ) +var ( + // These two should be kept the same as in the views.validator_uptimes materialized view. + uptimeWindowBlocks = uint64(14400) + uptimeSlotBlocks = uint64(1200) +) + // StorageClient is a wrapper around a storage.TargetStorage // with knowledge of network semantics. type StorageClient struct { @@ -1191,6 +1197,11 @@ func (c *StorageClient) ProposalVotes(ctx context.Context, proposalID uint64, p return &vs, nil } +type validatorBlockCounts struct { + OverallCount uint64 + SlotCounts []uint64 +} + // Validators returns a list of validators, or optionally the single validator matching `address`. func (c *StorageClient) Validators(ctx context.Context, p apiTypes.GetConsensusValidatorsParams, address *staking.Address) (*ValidatorList, error) { var epoch Epoch @@ -1213,6 +1224,26 @@ func (c *StorageClient) Validators(ctx context.Context, p apiTypes.GetConsensusV return nil, wrapError(err) } + // Prepare validator uptime metadata. + uptimes, err := c.withTotalCount(ctx, queries.ValidatorUptimes, address) + if err != nil { + return nil, wrapError(err) + } + defer uptimes.rows.Close() + uptimesBySigner := map[string]validatorBlockCounts{} + for uptimes.rows.Next() { + var signerEntityID string + var signedSlots []uint64 + var signedOverall uint64 + if err = uptimes.rows.Scan(&signerEntityID, &signedSlots, &signedOverall); err != nil { + return nil, wrapError(err) + } + uptimesBySigner[signerEntityID] = validatorBlockCounts{ + OverallCount: signedOverall, + SlotCounts: signedSlots, + } + } + res, err := c.withTotalCount( ctx, queries.ValidatorsData, @@ -1282,6 +1313,15 @@ func (c *StorageClient) Validators(ctx context.Context, p apiTypes.GetConsensusV } } + if uptime, ok := uptimesBySigner[v.EntityID]; ok { + v.Uptime = &apiTypes.ValidatorUptime{ + OverallUptime: &uptime.OverallCount, + PartialUptimes: &uptime.SlotCounts, + WindowLength: &uptimeWindowBlocks, + PartialLength: &uptimeSlotBlocks, + } + } + if next > 0 { v.CurrentCommissionBound.EpochEnd = next } diff --git a/storage/client/queries/queries.go b/storage/client/queries/queries.go index 403d52b7c..6b0e4e477 100644 --- a/storage/client/queries/queries.go +++ b/storage/client/queries/queries.go @@ -433,6 +433,58 @@ const ( LIMIT $4::bigint OFFSET $5::bigint` + ValidatorUptimes = ` + SELECT * FROM views.validator_uptimes + WHERE ($1::text IS NULL OR signer_entity_id = $1::text) + ORDER BY signer_entity_id` + /*ValidatorUptimes = ` + -- With a limit of 14400 blocks, this is the last ~24 hrs of signatures. + WITH last_window_blocks AS ( + SELECT height, signer_entity_ids + FROM chain.blocks + ORDER BY height DESC + LIMIT $2 + ), + -- Generate a series of 12 slots representing ~2 hours within the window. + all_slots AS ( + SELECT generate_series(0, 11) AS slot_id + ), + -- Slots of blocks of ~2 hours within the main window, with expanded signers. + slot_blocks AS ( + SELECT + height, + UNNEST(signer_entity_ids) AS signer_entity_id, + (ROW_NUMBER() OVER (ORDER BY height DESC) - 1) / $3 AS slot_id + FROM last_window_blocks + ), + -- Count signed blocks in each slot. + slot_counts AS ( + SELECT + signer_entity_id, + slot_id, + COUNT(height) AS signed_blocks_count + FROM + slot_blocks + WHERE + ($1::text IS NULL OR signer_entity_id = $1::text) + GROUP BY + signer_entity_id, slot_id + ) + -- Group windows per signer and calculate overall percentage. + SELECT + signers.signer_entity_id, + ARRAY_AGG(COALESCE(slot_counts.signed_blocks_count, 0) ORDER BY slot_counts.slot_id) AS slot_signed, + COALESCE(SUM(signed_blocks_count), 0) AS overall_signed + FROM + -- Ensure we have all windows for each signer, even if they didn't sign in a particular window. + (SELECT DISTINCT signer_entity_id FROM slot_counts) AS signers + CROSS JOIN all_slots + LEFT JOIN slot_counts ON signers.signer_entity_id = slot_counts.signer_entity_id AND all_slots.slot_id = slot_counts.slot_id + GROUP BY + signers.signer_entity_id + ORDER BY + signers.signer_entity_id`*/ + RuntimeBlocks = ` SELECT round, block_hash, timestamp, num_transactions, size, gas_used FROM chain.runtime_blocks diff --git a/storage/migrations/07_validator_uptimes.up.sql b/storage/migrations/07_validator_uptimes.up.sql new file mode 100644 index 000000000..856dbc556 --- /dev/null +++ b/storage/migrations/07_validator_uptimes.up.sql @@ -0,0 +1,50 @@ +BEGIN; + +CREATE MATERIALIZED VIEW views.validator_uptimes AS + -- With a limit of 14400 blocks, this is the last ~24 hrs of signatures. + WITH last_window_blocks AS ( + SELECT height, signer_entity_ids + FROM chain.blocks + ORDER BY height DESC + LIMIT 14400 + ), + -- Generate a series of 12 slots representing ~2 hours within the window. + all_slots AS ( + SELECT generate_series(0, 11) AS slot_id + ), + -- Slots of blocks of ~2 hours within the main window, with expanded signers. + slot_blocks AS ( + SELECT + height, + UNNEST(signer_entity_ids) AS signer_entity_id, + (ROW_NUMBER() OVER (ORDER BY height DESC) - 1) / 1200 AS slot_id + FROM last_window_blocks + ), + -- Count signed blocks in each slot. + slot_counts AS ( + SELECT + signer_entity_id, + slot_id, + COUNT(height) AS signed_blocks_count + FROM + slot_blocks + -- Compute this for all validators; the client can select from the view if needed. + GROUP BY + signer_entity_id, slot_id + ) + -- Group windows per signer and calculate overall percentage. + SELECT + signers.signer_entity_id, + ARRAY_AGG(COALESCE(slot_counts.signed_blocks_count, 0) ORDER BY slot_counts.slot_id) AS slot_signed, + COALESCE(SUM(signed_blocks_count), 0) AS overall_signed + FROM + -- Ensure we have all windows for each signer, even if they didn't sign in a particular window. + (SELECT DISTINCT signer_entity_id FROM slot_counts) AS signers + CROSS JOIN all_slots + LEFT JOIN slot_counts ON signers.signer_entity_id = slot_counts.signer_entity_id AND all_slots.slot_id = slot_counts.slot_id + GROUP BY + signers.signer_entity_id; + +CREATE UNIQUE INDEX ix_views_validator_uptimes_signer_entity_id ON views.validator_uptimes (signer_entity_id); -- A unique index is required for CONCURRENTLY refreshing the view. + +END;