diff --git a/pkg/kv/kvserver/consistency_queue_test.go b/pkg/kv/kvserver/consistency_queue_test.go index 0c02fc23ca42..7d4b9adcb627 100644 --- a/pkg/kv/kvserver/consistency_queue_test.go +++ b/pkg/kv/kvserver/consistency_queue_test.go @@ -542,22 +542,25 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) { // RecomputeStats does not see any skew in its MVCC stats when they are // modified concurrently. Note that these writes don't interfere with the // field we modified (SysCount). + // + // We want to run this task under the cluster's stopper, as opposed to the + // first node's stopper, so that the quiesce signal is delivered below before + // individual nodes start shutting down. _ = tc.Stopper().RunAsyncTaskEx(ctx, stop.TaskOpts{ TaskName: "recompute-loop", - // We want to run this task under the cluster's stopper, so that the - // quiesce signal is delivered below before individual nodes start - // shutting down. Since we're going to operate on a specific node, we - // can't mix the cluster stopper's tracer with the node's tracer, hence - // the Sterile option. - SpanOpt: stop.SterileRootSpan, - }, func(ctx context.Context) { + }, func(_ context.Context) { // This channel terminates the loop early if the test takes more than five // seconds. This is useful for stress race runs in CI where the tight loop // can starve the actual work to be done. done := time.After(5 * time.Second) for { - require.NoError(t, db0.Put(ctx, fmt.Sprintf("%s%d", key, rand.Int63()), "ballast")) + // We're using context.Background for the KV call. As explained above, + // this task runs on the cluster's stopper, and so the ctx that was + // passed to this function has a span created with a Tracer that's + // different from the first node's Tracer. If we used the ctx, we'd be + // combining Tracers in the trace, which is illegal. + require.NoError(t, db0.Put(context.Background(), fmt.Sprintf("%s%d", key, rand.Int63()), "ballast")) select { case <-tc.Stopper().ShouldQuiesce(): return