Skip to content

Commit

Permalink
Merge pull request #100474 from cockroachdb/blathers/backport-release…
Browse files Browse the repository at this point in the history
…-22.2-100287

release-22.2: kvserver: add `leases.requests.latency` metric
  • Loading branch information
erikgrinaker authored Apr 30, 2023
2 parents f013742 + 8e7edf0 commit bb0e721
Show file tree
Hide file tree
Showing 4 changed files with 25 additions and 2 deletions.
17 changes: 15 additions & 2 deletions pkg/kv/kvserver/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,12 @@ var (
Measurement: "Lease Requests",
Unit: metric.Unit_COUNT,
}
metaLeaseRequestLatency = metric.Metadata{
Name: "leases.requests.latency",
Help: "Lease request latency (all types and outcomes, coalesced)",
Measurement: "Latency",
Unit: metric.Unit_NANOSECONDS,
}
metaLeaseTransferSuccessCount = metric.Metadata{
Name: "leases.transfers.success",
Help: "Number of successful lease transfers",
Expand Down Expand Up @@ -1663,6 +1669,7 @@ type StoreMetrics struct {
// lease).
LeaseRequestSuccessCount *metric.Counter
LeaseRequestErrorCount *metric.Counter
LeaseRequestLatency metric.IHistogram
LeaseTransferSuccessCount *metric.Counter
LeaseTransferErrorCount *metric.Counter
LeaseExpirationCount *metric.Gauge
Expand Down Expand Up @@ -2189,8 +2196,14 @@ func newStoreMetrics(histogramWindow time.Duration) *StoreMetrics {
OverReplicatedRangeCount: metric.NewGauge(metaOverReplicatedRangeCount),

// Lease request metrics.
LeaseRequestSuccessCount: metric.NewCounter(metaLeaseRequestSuccessCount),
LeaseRequestErrorCount: metric.NewCounter(metaLeaseRequestErrorCount),
LeaseRequestSuccessCount: metric.NewCounter(metaLeaseRequestSuccessCount),
LeaseRequestErrorCount: metric.NewCounter(metaLeaseRequestErrorCount),
LeaseRequestLatency: metric.NewHistogram(metric.HistogramOptions{
Mode: metric.HistogramModePreferHdrLatency,
Metadata: metaLeaseRequestLatency,
Duration: histogramWindow,
Buckets: metric.NetworkLatencyBuckets,
}),
LeaseTransferSuccessCount: metric.NewCounter(metaLeaseTransferSuccessCount),
LeaseTransferErrorCount: metric.NewCounter(metaLeaseTransferErrorCount),
LeaseExpirationCount: metric.NewGauge(metaLeaseExpirationCount),
Expand Down
5 changes: 5 additions & 0 deletions pkg/kv/kvserver/replica_range_lease.go
Original file line number Diff line number Diff line change
Expand Up @@ -449,6 +449,11 @@ func (p *pendingLeaseRequest) requestLease(
status kvserverpb.LeaseStatus,
leaseReq roachpb.Request,
) error {
started := timeutil.Now()
defer func() {
p.repl.store.metrics.LeaseRequestLatency.RecordValue(timeutil.Since(started).Nanoseconds())
}()

// If requesting an epoch-based lease & current state is expired,
// potentially heartbeat our own liveness or increment epoch of
// prior owner. Note we only do this if the previous lease was
Expand Down
4 changes: 4 additions & 0 deletions pkg/ts/catalog/chart_catalog.go
Original file line number Diff line number Diff line change
Expand Up @@ -1670,6 +1670,10 @@ var charts = []sectionDescription{
Title: "Stuck Acquisition Count",
Metrics: []string{"requests.slow.lease"},
},
{
Title: "Lease Request Latency",
Metrics: []string{"leases.requests.latency"},
},
{
Title: "Succcess Rate",
Metrics: []string{
Expand Down
1 change: 1 addition & 0 deletions pkg/ts/catalog/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ var histogramMetricsNames = map[string]struct{}{
"streaming.flush_hist_nanos": {},
"kv.replica_read_batch_evaluate.latency": {},
"kv.replica_write_batch_evaluate.latency": {},
"leases.requests.latency": {},
}

func allInternalTSMetricsNames() []string {
Expand Down

0 comments on commit bb0e721

Please sign in to comment.