From 2dac29ae7f8e6ad8cbd65fbfcc4c66d922f0f2d1 Mon Sep 17 00:00:00 2001 From: Bilal Akhtar Date: Thu, 26 Jan 2023 14:47:57 -0500 Subject: [PATCH] storage: Make logging event listener async for DiskSlow The pebble logger could block if we're experiencing a slow / stalling disk. If the call to the pebble logger is synchronous from the EventListener passed into Pebble, it could end up slowing down Pebble's internal disk health checks as those rely on EventListener methods being quick to run. This change updates the logging event listener to asynchronously call the logger on a DiskSlow event. Related to #94373. Epic: none Release note: None. --- pkg/storage/pebble.go | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/pkg/storage/pebble.go b/pkg/storage/pebble.go index 87f3b84a79cb..62448365b510 100644 --- a/pkg/storage/pebble.go +++ b/pkg/storage/pebble.go @@ -855,12 +855,29 @@ func NewPebble(ctx context.Context, cfg PebbleConfig) (p *Pebble, err error) { // disk is stalled. While the logging subsystem should also be robust to // stalls and crash the process if unable to write logs, there's less risk // to sequencing the crashing listener first. + // + // For the same reason, make the logging call asynchronous for DiskSlow events. + // This prevents slow logging calls during a disk slow/stall event from holding + // up Pebble's internal disk health checking, and better obeys the + // EventListener contract for not having any functions block or take a while to + // run. Creating goroutines is acceptable given their low cost, and the low + // write concurrency to Pebble's FS (Pebble compactions + flushes + SQL + // spilling to disk). If the maximum concurrency of DiskSlow events increases + // significantly in the future, we can improve the logic here by queueing up + // most of the logging work (except for the Fatalf call), and have it be done + // by a single goroutine. + lel := pebble.MakeLoggingEventListener(pebbleLogger{ + ctx: logCtx, + depth: 2, // skip over the EventListener stack frame + }) + oldDiskSlow := lel.DiskSlow + lel.DiskSlow = func(info pebble.DiskSlowInfo) { + // Run oldDiskSlow asynchronously. + go oldDiskSlow(info) + } cfg.Opts.EventListener = pebble.TeeEventListener( p.makeMetricEtcEventListener(ctx), - pebble.MakeLoggingEventListener(pebbleLogger{ - ctx: logCtx, - depth: 2, // skip over the EventListener stack frame - }), + lel, ) p.eventListener = &cfg.Opts.EventListener p.wrappedIntentWriter = wrapIntentWriter(ctx, p) @@ -918,6 +935,12 @@ func (p *Pebble) makeMetricEtcEventListener(ctx context.Context) pebble.EventLis atomic.AddInt64(&p.diskStallCount, 1) // Note that the below log messages go to the main cockroach log, not // the pebble-specific log. + // + // Run non-fatal log.* calls in separate goroutines as they could block + // if the logging device is also slow/stalling, preventing pebble's disk + // health checking from functioning correctly. See the comment in + // pebble.EventListener on why it's important for this method to return + // quickly. if fatalOnExceeded { // The write stall may prevent the process from exiting. If // the process won't exit, we can at least terminate all our @@ -930,8 +953,7 @@ func (p *Pebble) makeMetricEtcEventListener(ctx context.Context) pebble.EventLis log.Fatalf(ctx, "disk stall detected: pebble unable to write to %s in %.2f seconds", info.Path, redact.Safe(info.Duration.Seconds())) } else { - log.Errorf(ctx, "disk stall detected: pebble unable to write to %s in %.2f seconds", - info.Path, redact.Safe(info.Duration.Seconds())) + go log.Errorf(ctx, "file write stall detected: %s", info) } return }