From 922238db1ce8d1f62a58bb1f4336e4b0fc6add06 Mon Sep 17 00:00:00 2001 From: Bilal Akhtar Date: Thu, 26 Jan 2023 14:47:57 -0500 Subject: [PATCH] storage: Make logging event listener async for DiskSlow The pebble logger could block if we're experiencing a slow / stalling disk. If the call to the pebble logger is synchronous from the EventListener passed into Pebble, it could end up slowing down Pebble's internal disk health checks as those rely on EventListener methods being quick to run. This change updates the logging event listener to asynchronously call the logger on a DiskSlow event. Related to #94373. Epic: none Release note: None. --- pkg/storage/pebble.go | 33 ++++++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/pkg/storage/pebble.go b/pkg/storage/pebble.go index 31c3282a4db0..7f3c47eeb1b2 100644 --- a/pkg/storage/pebble.go +++ b/pkg/storage/pebble.go @@ -987,12 +987,29 @@ func NewPebble(ctx context.Context, cfg PebbleConfig) (p *Pebble, err error) { // disk is stalled. While the logging subsystem should also be robust to // stalls and crash the process if unable to write logs, there's less risk // to sequencing the crashing listener first. + // + // For the same reason, make the logging call asynchronous for DiskSlow events. + // This prevents slow logging calls during a disk slow/stall event from holding + // up Pebble's internal disk health checking, and better obeys the + // EventListener contract for not having any functions block or take a while to + // run. Creating goroutines is acceptable given their low cost, and the low + // write concurrency to Pebble's FS (Pebble compactions + flushes + SQL + // spilling to disk). If the maximum concurrency of DiskSlow events increases + // significantly in the future, we can improve the logic here by queueing up + // most of the logging work (except for the Fatalf call), and have it be done + // by a single goroutine. + lel := pebble.MakeLoggingEventListener(pebbleLogger{ + ctx: logCtx, + depth: 2, // skip over the EventListener stack frame + }) + oldDiskSlow := lel.DiskSlow + lel.DiskSlow = func(info pebble.DiskSlowInfo) { + // Run oldDiskSlow asynchronously. + go oldDiskSlow(info) + } el := pebble.TeeEventListener( p.makeMetricEtcEventListener(ctx), - pebble.MakeLoggingEventListener(pebbleLogger{ - ctx: logCtx, - depth: 2, // skip over the EventListener stack frame - }), + lel, ) p.eventListener = &el @@ -1071,10 +1088,16 @@ func (p *Pebble) makeMetricEtcEventListener(ctx context.Context) pebble.EventLis atomic.AddInt64(&p.diskStallCount, 1) // Note that the below log messages go to the main cockroach log, not // the pebble-specific log. + // + // Run non-fatal log.* calls in separate goroutines as they could block + // if the logging device is also slow/stalling, preventing pebble's disk + // health checking from functioning correctly. See the comment in + // pebble.EventListener on why it's important for this method to return + // quickly. if fatalOnExceeded { log.Fatalf(ctx, "file write stall detected: %s", info) } else { - log.Errorf(ctx, "file write stall detected: %s", info) + go log.Errorf(ctx, "file write stall detected: %s", info) } return }