Skip to content

Commit

Permalink
kvstreamer: avoid an allocation on the hot path
Browse files Browse the repository at this point in the history
This commit avoids an allocation in `logStatistics` call when we don't
have verbose logging enabled (previously, we would always get an
allocation in `humanizeutil.IBytes` call). Also this avoids a few atomic
loads too. (Noticed this while looking at the CPU profile of one of the
production clusters.)

Release note: None
  • Loading branch information
yuzefovich committed Dec 19, 2023
1 parent 9016da6 commit 8a6e4e1
Showing 1 changed file with 20 additions and 18 deletions.
38 changes: 20 additions & 18 deletions pkg/kv/kvclient/kvstreamer/streamer.go
Original file line number Diff line number Diff line change
Expand Up @@ -932,24 +932,26 @@ func (w *workerCoordinator) mainLoop(ctx context.Context) {
// tracing span of the Streamer's user. Some time has been spent to figure it
// out but led to no success. This should be cleaned up.
func (w *workerCoordinator) logStatistics(ctx context.Context) {
avgResponseSize, _ := w.getAvgResponseSize()
log.VEventf(
ctx, 1,
"enqueueCalls=%d enqueuedRequests=%d enqueuedSingleRangeRequests=%d kvPairsRead=%d "+
"batchRequestsIssued=%d resumeBatchRequests=%d resumeSingleRangeRequests=%d "+
"numSpilledResults=%d emptyBatchResponses=%d droppedBatchResponses=%d avgResponseSize=%s",
w.s.enqueueCalls,
w.s.enqueuedRequests,
w.s.enqueuedSingleRangeRequests,
atomic.LoadInt64(w.s.atomics.kvPairsRead),
atomic.LoadInt64(w.s.atomics.batchRequestsIssued),
atomic.LoadInt64(&w.s.atomics.resumeBatchRequests),
atomic.LoadInt64(&w.s.atomics.resumeSingleRangeRequests),
w.s.results.numSpilledResults(),
atomic.LoadInt64(&w.s.atomics.emptyBatchResponses),
atomic.LoadInt64(&w.s.atomics.droppedBatchResponses),
humanizeutil.IBytes(avgResponseSize),
)
if log.ExpensiveLogEnabled(ctx, 1) {
avgResponseSize, _ := w.getAvgResponseSize()
log.Eventf(
ctx,
"enqueueCalls=%d enqueuedRequests=%d enqueuedSingleRangeRequests=%d kvPairsRead=%d "+
"batchRequestsIssued=%d resumeBatchRequests=%d resumeSingleRangeRequests=%d "+
"numSpilledResults=%d emptyBatchResponses=%d droppedBatchResponses=%d avgResponseSize=%s",
w.s.enqueueCalls,
w.s.enqueuedRequests,
w.s.enqueuedSingleRangeRequests,
atomic.LoadInt64(w.s.atomics.kvPairsRead),
atomic.LoadInt64(w.s.atomics.batchRequestsIssued),
atomic.LoadInt64(&w.s.atomics.resumeBatchRequests),
atomic.LoadInt64(&w.s.atomics.resumeSingleRangeRequests),
w.s.results.numSpilledResults(),
atomic.LoadInt64(&w.s.atomics.emptyBatchResponses),
atomic.LoadInt64(&w.s.atomics.droppedBatchResponses),
humanizeutil.IBytes(avgResponseSize),
)
}
}

// waitForRequests blocks until there is at least one request to be served.
Expand Down

0 comments on commit 8a6e4e1

Please sign in to comment.