diff --git a/pkg/kv/bulk/sst_batcher_test.go b/pkg/kv/bulk/sst_batcher_test.go index 91a7fd28a66d..302f025c8d6e 100644 --- a/pkg/kv/bulk/sst_batcher_test.go +++ b/pkg/kv/bulk/sst_batcher_test.go @@ -196,8 +196,8 @@ func runTestImport(t *testing.T, batchSizeValue int64) { // However we log an event when forced to retry (in case we need to debug) // slow requests or something, so we can inspect the trace in the test to // determine if requests required the expected number of retries. - - addCtx, getRec, cancel := tracing.ContextWithRecordingSpan(ctx, "add") + tr := s.Tracer().(*tracing.Tracer) + addCtx, getRec, cancel := tracing.ContextWithRecordingSpan(ctx, tr, "add") defer cancel() expectedSplitRetries := 0 for _, batch := range testCase { diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go b/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go index 177ff8e548b1..c5ef262816e8 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go @@ -40,6 +40,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) @@ -3197,8 +3198,9 @@ func TestTxnCoordSenderRetriesAcrossEndTxn(t *testing.T) { }) require.Regexp(t, "injected", txn.CommitInBatch(ctx, b)) + tr := s.Tracer().(*tracing.Tracer) err = kvclientutils.CheckPushResult( - ctx, db, *origTxn, kvclientutils.ExpectAborted, tc.txnRecExpectation) + ctx, db, tr, *origTxn, kvclientutils.ExpectAborted, tc.txnRecExpectation) require.NoError(t, err) }) } diff --git a/pkg/kv/kvclient/kvcoord/transport_test.go b/pkg/kv/kvclient/kvcoord/transport_test.go index 090e576d8b10..799a38bc97d9 100644 --- a/pkg/kv/kvclient/kvcoord/transport_test.go +++ b/pkg/kv/kvclient/kvcoord/transport_test.go @@ -113,7 +113,8 @@ func TestSpanImport(t *testing.T) { expectedErr := "my expected error" server.pErr = roachpb.NewErrorf(expectedErr /* nolint:fmtsafe */) - recCtx, getRec, cancel := tracing.ContextWithRecordingSpan(ctx, "test") + recCtx, getRec, cancel := tracing.ContextWithRecordingSpan( + ctx, tracing.NewTracer(), "test") defer cancel() server.tr = tracing.SpanFromContext(recCtx).Tracer() diff --git a/pkg/kv/kvclient/rangecache/range_cache_test.go b/pkg/kv/kvclient/rangecache/range_cache_test.go index 33088de67baf..a3c5b1d010ca 100644 --- a/pkg/kv/kvclient/rangecache/range_cache_test.go +++ b/pkg/kv/kvclient/rangecache/range_cache_test.go @@ -890,7 +890,8 @@ func TestRangeCacheHandleDoubleSplit(t *testing.T) { var desc *roachpb.RangeDescriptor // Each request goes to a different key. var err error - ctx, getRecording, cancel := tracing.ContextWithRecordingSpan(ctx, "test") + ctx, getRecording, cancel := tracing.ContextWithRecordingSpan( + ctx, tracing.NewTracer(), "test") defer cancel() tok, err := db.cache.lookupInternal( ctx, key, oldToken, diff --git a/pkg/kv/kvnemesis/kvnemesis.go b/pkg/kv/kvnemesis/kvnemesis.go index d5997c45f97b..b2c374d5fa0f 100644 --- a/pkg/kv/kvnemesis/kvnemesis.go +++ b/pkg/kv/kvnemesis/kvnemesis.go @@ -60,7 +60,8 @@ func RunNemesis( for atomic.AddInt64(&stepsStartedAtomic, 1) <= numSteps { step := g.RandStep(rng) - recCtx, collect, cancel := tracing.ContextWithRecordingSpan(ctx, "txn step") + recCtx, collect, cancel := tracing.ContextWithRecordingSpan( + ctx, tracing.NewTracer(), "txn step") err := a.Apply(recCtx, &step) log.VEventf(recCtx, 2, "step: %v", step) step.Trace = collect().String() diff --git a/pkg/kv/kvserver/batcheval/cmd_add_sstable_test.go b/pkg/kv/kvserver/batcheval/cmd_add_sstable_test.go index 9ca442ca4e15..be8d5897e2a2 100644 --- a/pkg/kv/kvserver/batcheval/cmd_add_sstable_test.go +++ b/pkg/kv/kvserver/batcheval/cmd_add_sstable_test.go @@ -69,7 +69,9 @@ func TestDBAddSSTable(t *testing.T) { s, _, db := serverutils.StartServer(t, base.TestServerArgs{Insecure: true}) ctx := context.Background() defer s.Stopper().Stop(ctx) - runTestDBAddSSTable(ctx, t, db, nil) + + tr := s.ClusterSettings().Tracer + runTestDBAddSSTable(ctx, t, db, tr, nil) }) t.Run("store=on-disk", func(t *testing.T) { dir, dirCleanupFn := testutils.TempDir(t) @@ -88,12 +90,17 @@ func TestDBAddSSTable(t *testing.T) { if err != nil { t.Fatal(err) } - runTestDBAddSSTable(ctx, t, db, store) + + tr := s.ClusterSettings().Tracer + runTestDBAddSSTable(ctx, t, db, tr, store) }) } // if store != nil, assume it is on-disk and check ingestion semantics. -func runTestDBAddSSTable(ctx context.Context, t *testing.T, db *kv.DB, store *kvserver.Store) { +func runTestDBAddSSTable( + ctx context.Context, t *testing.T, db *kv.DB, tr *tracing.Tracer, store *kvserver.Store, +) { + tr.TestingIncludeAsyncSpansInRecordings() // we assert on async span traces in this test { key := storage.MVCCKey{Key: []byte("bb"), Timestamp: hlc.Timestamp{WallTime: 2}} data, err := singleKVSSTable(key, roachpb.MakeValueFromString("1").RawBytes) @@ -115,7 +122,7 @@ func runTestDBAddSSTable(ctx context.Context, t *testing.T, db *kv.DB, store *kv } // Do an initial ingest. - ingestCtx, collect, cancel := tracing.ContextWithRecordingSpan(ctx, "test-recording") + ingestCtx, collect, cancel := tracing.ContextWithRecordingSpan(ctx, tr, "test-recording") defer cancel() if err := db.AddSSTable( ingestCtx, "b", "c", data, false /* disallowShadowing */, nil /* stats */, false, /* ingestAsWrites */ @@ -123,7 +130,7 @@ func runTestDBAddSSTable(ctx context.Context, t *testing.T, db *kv.DB, store *kv t.Fatalf("%+v", err) } formatted := collect().String() - if err := testutils.MatchInOrder(formatted, + if err := testutils.MatchEach(formatted, "evaluating AddSSTable", "sideloadable proposal detected", "ingested SSTable at index", @@ -194,7 +201,7 @@ func runTestDBAddSSTable(ctx context.Context, t *testing.T, db *kv.DB, store *kv before = metrics.AddSSTableApplicationCopies.Count() } for i := 0; i < 2; i++ { - ingestCtx, collect, cancel := tracing.ContextWithRecordingSpan(ctx, "test-recording") + ingestCtx, collect, cancel := tracing.ContextWithRecordingSpan(ctx, tr, "test-recording") defer cancel() if err := db.AddSSTable( @@ -202,7 +209,7 @@ func runTestDBAddSSTable(ctx context.Context, t *testing.T, db *kv.DB, store *kv ); err != nil { t.Fatalf("%+v", err) } - if err := testutils.MatchInOrder(collect().String(), + if err := testutils.MatchEach(collect().String(), "evaluating AddSSTable", "sideloadable proposal detected", "ingested SSTable at index", @@ -249,7 +256,7 @@ func runTestDBAddSSTable(ctx context.Context, t *testing.T, db *kv.DB, store *kv before = metrics.AddSSTableApplications.Count() } for i := 0; i < 2; i++ { - ingestCtx, collect, cancel := tracing.ContextWithRecordingSpan(ctx, "test-recording") + ingestCtx, collect, cancel := tracing.ContextWithRecordingSpan(ctx, tr, "test-recording") defer cancel() if err := db.AddSSTable( @@ -257,7 +264,7 @@ func runTestDBAddSSTable(ctx context.Context, t *testing.T, db *kv.DB, store *kv ); err != nil { t.Fatalf("%+v", err) } - if err := testutils.MatchInOrder(collect().String(), + if err := testutils.MatchEach(collect().String(), "evaluating AddSSTable", "via regular write batch", ); err != nil { diff --git a/pkg/kv/kvserver/concurrency/concurrency_manager_test.go b/pkg/kv/kvserver/concurrency/concurrency_manager_test.go index 49f18f363cce..287ea1f49e9c 100644 --- a/pkg/kv/kvserver/concurrency/concurrency_manager_test.go +++ b/pkg/kv/kvserver/concurrency/concurrency_manager_test.go @@ -904,7 +904,8 @@ func newMonitor() *monitor { } func (m *monitor) runSync(opName string, fn func(context.Context)) { - ctx, collect, cancel := tracing.ContextWithRecordingSpan(context.Background(), opName) + ctx, collect, cancel := tracing.ContextWithRecordingSpan( + context.Background(), tracing.NewTracer(), opName) g := &monitoredGoroutine{ opSeq: 0, // synchronous opName: opName, @@ -919,7 +920,8 @@ func (m *monitor) runSync(opName string, fn func(context.Context)) { func (m *monitor) runAsync(opName string, fn func(context.Context)) (cancel func()) { m.seq++ - ctx, collect, cancel := tracing.ContextWithRecordingSpan(context.Background(), opName) + ctx, collect, cancel := tracing.ContextWithRecordingSpan( + context.Background(), tracing.NewTracer(), opName) g := &monitoredGoroutine{ opSeq: m.seq, opName: opName, diff --git a/pkg/kv/kvserver/replica_application_decoder.go b/pkg/kv/kvserver/replica_application_decoder.go index 35f55d3fddf8..ada38b11da4d 100644 --- a/pkg/kv/kvserver/replica_application_decoder.go +++ b/pkg/kv/kvserver/replica_application_decoder.go @@ -151,7 +151,7 @@ func (d *replicaDecoder) createTracingSpans(ctx context.Context) { } else { cmd.ctx, cmd.sp = d.r.AmbientContext.Tracer.StartSpanCtx( ctx, - "raft application", + opName, // NB: we are lying here - we are not actually going to propagate // the recording towards the root. That seems ok. tracing.WithParentAndManualCollection(spanMeta), diff --git a/pkg/kv/kvserver/replica_learner_test.go b/pkg/kv/kvserver/replica_learner_test.go index 32e5cea3a05c..958c31c6170e 100644 --- a/pkg/kv/kvserver/replica_learner_test.go +++ b/pkg/kv/kvserver/replica_learner_test.go @@ -788,6 +788,7 @@ func TestLearnerAndVoterOutgoingFollowerRead(t *testing.T) { }) defer tc.Stopper().Stop(ctx) db := sqlutils.MakeSQLRunner(tc.ServerConn(0)) + tr := tc.Server(0).Tracer().(*tracing.Tracer) db.Exec(t, fmt.Sprintf(`SET CLUSTER SETTING kv.closed_timestamp.target_duration = '%s'`, testingTargetDuration)) db.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.close_fraction = $1`, testingCloseFraction) @@ -812,7 +813,7 @@ func TestLearnerAndVoterOutgoingFollowerRead(t *testing.T) { testutils.SucceedsSoon(t, func() error { // Trace the Send call so we can verify that it hit the exact `learner // replicas cannot serve follower reads` branch that we're trying to test. - sendCtx, collect, cancel := tracing.ContextWithRecordingSpan(ctx, "manual read request") + sendCtx, collect, cancel := tracing.ContextWithRecordingSpan(ctx, tr, "manual read request") defer cancel() _, pErr := repl.Send(sendCtx, req) err := pErr.GoError() diff --git a/pkg/kv/kvserver/replica_sideload_test.go b/pkg/kv/kvserver/replica_sideload_test.go index fddb8c1c0d8b..8f3f6a93fd49 100644 --- a/pkg/kv/kvserver/replica_sideload_test.go +++ b/pkg/kv/kvserver/replica_sideload_test.go @@ -415,7 +415,8 @@ func TestRaftSSTableSideloadingInline(t *testing.T) { } runOne := func(k string, test testCase) { - ctx, collect, cancel := tracing.ContextWithRecordingSpan(context.Background(), "test-recording") + ctx, collect, cancel := tracing.ContextWithRecordingSpan( + context.Background(), tracing.NewTracer(), "test-recording") defer cancel() eng := storage.NewDefaultInMem() @@ -582,7 +583,9 @@ func testRaftSSTableSideloadingProposal(t *testing.T, eng storage.Engine) { defer stopper.Stop(context.Background()) tc.Start(t, stopper) - ctx, collect, cancel := tracing.ContextWithRecordingSpan(context.Background(), "test-recording") + tr := tc.store.ClusterSettings().Tracer + tr.TestingIncludeAsyncSpansInRecordings() // we assert on async span traces in this test + ctx, collect, cancel := tracing.ContextWithRecordingSpan(context.Background(), tr, "test-recording") defer cancel() const ( diff --git a/pkg/kv/kvserver/replica_test.go b/pkg/kv/kvserver/replica_test.go index af4f93da533f..3272582d4c05 100644 --- a/pkg/kv/kvserver/replica_test.go +++ b/pkg/kv/kvserver/replica_test.go @@ -8431,7 +8431,9 @@ func TestFailureToProcessCommandClearsLocalResult(t *testing.T) { } r.mu.Unlock() - opCtx, collect, cancel := tracing.ContextWithRecordingSpan(ctx, "test-recording") + tr := tc.store.ClusterSettings().Tracer + tr.TestingIncludeAsyncSpansInRecordings() // we assert on async span traces in this test + opCtx, collect, cancel := tracing.ContextWithRecordingSpan(ctx, tr, "test-recording") defer cancel() ba = roachpb.BatchRequest{} diff --git a/pkg/kv/kvserver/store.go b/pkg/kv/kvserver/store.go index 8bc6755177ba..4abfc5172a15 100644 --- a/pkg/kv/kvserver/store.go +++ b/pkg/kv/kvserver/store.go @@ -2637,7 +2637,7 @@ func (s *Store) ComputeStatsForKeySpan(startKey, endKey roachpb.RKey) (StoreKeyS // carrying out any changes, returning all trace messages collected along the way. // Intended to help power a debug endpoint. func (s *Store) AllocatorDryRun(ctx context.Context, repl *Replica) (tracing.Recording, error) { - ctx, collect, cancel := tracing.ContextWithRecordingSpan(ctx, "allocator dry run") + ctx, collect, cancel := tracing.ContextWithRecordingSpan(ctx, s.ClusterSettings().Tracer, "allocator dry run") defer cancel() canTransferLease := func() bool { return true } _, err := s.replicateQueue.processOneChange( @@ -2688,7 +2688,7 @@ func (s *Store) ManuallyEnqueue( } ctx, collect, cancel := tracing.ContextWithRecordingSpan( - ctx, fmt.Sprintf("manual %s queue run", queueName)) + ctx, s.ClusterSettings().Tracer, fmt.Sprintf("manual %s queue run", queueName)) defer cancel() if !skipShouldQueue { diff --git a/pkg/kv/txn_external_test.go b/pkg/kv/txn_external_test.go index e8d3268f1b67..95c13716b634 100644 --- a/pkg/kv/txn_external_test.go +++ b/pkg/kv/txn_external_test.go @@ -25,6 +25,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/stretchr/testify/require" ) @@ -127,10 +128,13 @@ func TestRollbackAfterAmbiguousCommit(t *testing.T) { leaseHolder, err := tc.FindRangeLeaseHolder(rdesc, nil /* hint */) require.NoError(t, err) var db *kv.DB + var tr *tracing.Tracer if leaseHolder.NodeID == 1 { db = tc.Servers[1].DB() + tr = tc.Servers[1].Tracer().(*tracing.Tracer) } else { db = tc.Servers[0].DB() + tr = tc.Servers[0].Tracer().(*tracing.Tracer) } txn := db.NewTxn(ctx, "test") @@ -190,7 +194,7 @@ func TestRollbackAfterAmbiguousCommit(t *testing.T) { require.Equal(t, roachpb.STAGING, queryTxnRes.QueriedTxn.Status) // Perform transaction recovery. - require.NoError(t, kvclientutils.CheckPushResult(ctx, db, *txn.TestingCloneTxn(), + require.NoError(t, kvclientutils.CheckPushResult(ctx, db, tr, *txn.TestingCloneTxn(), kvclientutils.ExpectCommitted, kvclientutils.ExpectPusheeTxnRecovery)) } diff --git a/pkg/migration/migrationmanager/manager_external_test.go b/pkg/migration/migrationmanager/manager_external_test.go index 7be8c63cb6d5..754fd61d9678 100644 --- a/pkg/migration/migrationmanager/manager_external_test.go +++ b/pkg/migration/migrationmanager/manager_external_test.go @@ -146,7 +146,8 @@ RETURNING id;`).Scan(&secondID)) // Launch a second migration which later we'll ensure does not kick off // another job. We'll make sure this happens by polling the trace to see // the log line indicating what we want. - recCtx, getRecording, cancel := tracing.ContextWithRecordingSpan(ctx, "test") + tr := tc.Server(0).Tracer().(*tracing.Tracer) + recCtx, getRecording, cancel := tracing.ContextWithRecordingSpan(ctx, tr, "test") defer cancel() upgrade2Err := make(chan error, 1) go func() { diff --git a/pkg/server/testserver.go b/pkg/server/testserver.go index 7a4fc49e4457..16442bedb7fa 100644 --- a/pkg/server/testserver.go +++ b/pkg/server/testserver.go @@ -1398,7 +1398,7 @@ func (ts *TestServer) ExecutorConfig() interface{} { return *ts.sqlServer.execCfg } -// Tracer is part of the TestServerInterface +// Tracer is part of the TestServerInterface. func (ts *TestServer) Tracer() interface{} { return ts.node.storeCfg.AmbientCtx.Tracer } diff --git a/pkg/sql/distsql_running_test.go b/pkg/sql/distsql_running_test.go index 8b859f80ce89..fbba292b806b 100644 --- a/pkg/sql/distsql_running_test.go +++ b/pkg/sql/distsql_running_test.go @@ -111,7 +111,8 @@ func TestDistSQLRunningInAbortedTxn(t *testing.T) { iter := 0 // We'll trace to make sure the test isn't fooling itself. - runningCtx, getRec, cancel := tracing.ContextWithRecordingSpan(ctx, "test") + tr := s.Tracer().(*tracing.Tracer) + runningCtx, getRec, cancel := tracing.ContextWithRecordingSpan(ctx, tr, "test") defer cancel() err = shortDB.Txn(runningCtx, func(ctx context.Context, txn *kv.Txn) error { iter++ diff --git a/pkg/sql/internal_test.go b/pkg/sql/internal_test.go index 1b67503dfdb6..ccf0827b7afa 100644 --- a/pkg/sql/internal_test.go +++ b/pkg/sql/internal_test.go @@ -511,7 +511,8 @@ func TestInternalExecutorPushDetectionInTxn(t *testing.T) { txn.CommitTimestamp() require.True(t, txn.IsSerializablePushAndRefreshNotPossible()) - execCtx, collect, cancel := tracing.ContextWithRecordingSpan(ctx, "test-recording") + tr := s.Tracer().(*tracing.Tracer) + execCtx, collect, cancel := tracing.ContextWithRecordingSpan(ctx, tr, "test-recording") defer cancel() ie := s.InternalExecutor().(*sql.InternalExecutor) _, err = ie.Exec(execCtx, "test", txn, "select 42") diff --git a/pkg/sql/logictest/testdata/logic_test/distsql_stats b/pkg/sql/logictest/testdata/logic_test/distsql_stats index b684785bb6ce..cfb8ce4f3eb7 100644 --- a/pkg/sql/logictest/testdata/logic_test/distsql_stats +++ b/pkg/sql/logictest/testdata/logic_test/distsql_stats @@ -675,30 +675,11 @@ INSERT INTO users (user_profile) VALUES ('{"first_name": "Carl", "last_name": "Kimball", "location": "NYC", "breed": "Boston Terrier"}' ) -statement ok -SET tracing=on - # Ensure that trying to create statistics with default columns does not fail # when there is an inverted index. statement ok CREATE STATISTICS s FROM users -# Ensure that the trace includes the job by observing the job move through -# the 'succeeded' state in the trace. - -query I -SELECT count(*) - FROM [SHOW TRACE FOR SESSION] - WHERE message - LIKE '%job%: stepping through state succeeded with error: %' - AND message - NOT LIKE '%SELECT message%' ----- -1 - -statement ok -SET tracing=off - query TTIIIB colnames SELECT statistics_name, diff --git a/pkg/sql/logictest/testdata/logic_test/vectorize_local b/pkg/sql/logictest/testdata/logic_test/vectorize_local index 27f95c563121..57fc6c30b70c 100644 --- a/pkg/sql/logictest/testdata/logic_test/vectorize_local +++ b/pkg/sql/logictest/testdata/logic_test/vectorize_local @@ -145,12 +145,9 @@ SET tracing = on; SELECT * FROM tpar WHERE a = 0 OR a = 10; SET tracing = off query T SELECT message FROM [SHOW TRACE FOR SESSION] WHERE message IN ('querying next range at /Table/56/1/0', - 'querying next range at /Table/56/1/10', - '=== SPAN START: kv.DistSender: sending partial batch ===' - ) + 'querying next range at /Table/56/1/10') ---- querying next range at /Table/56/1/0 -=== SPAN START: kv.DistSender: sending partial batch === querying next range at /Table/56/1/10 # Regression test for #46123 (rowexec.TableReader not implementing diff --git a/pkg/sql/opt/exec/execbuilder/testdata/select b/pkg/sql/opt/exec/execbuilder/testdata/select index c1d48f855243..b607f591804f 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/select +++ b/pkg/sql/opt/exec/execbuilder/testdata/select @@ -1760,7 +1760,6 @@ WHERE message IN ) ---- querying next range at /Table/73/1/0 -=== SPAN START: kv.DistSender: sending partial batch === querying next range at /Table/73/1/10 # Test for 42202 -- ensure filters can get pushed down through project-set. diff --git a/pkg/sql/opt/exec/execbuilder/testdata/show_trace_nonmetamorphic b/pkg/sql/opt/exec/execbuilder/testdata/show_trace_nonmetamorphic index ff0a2d537d9b..9a620c6cce5b 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/show_trace_nonmetamorphic +++ b/pkg/sql/opt/exec/execbuilder/testdata/show_trace_nonmetamorphic @@ -75,11 +75,10 @@ query TT SELECT operation, message FROM [SHOW KV TRACE FOR SESSION] WHERE operation != 'dist sender send' ---- -flow CPut /Table/54/1/1/0 -> /TUPLE/2:2:Int/2 -flow InitPut /Table/54/2/2/0 -> /BYTES/0x89 -kv.DistSender: sending partial batch r36: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 -flow fast path completed -exec stmt rows affected: 1 +flow CPut /Table/54/1/1/0 -> /TUPLE/2:2:Int/2 +flow InitPut /Table/54/2/2/0 -> /BYTES/0x89 +flow fast path completed +exec stmt rows affected: 1 statement error duplicate key value @@ -90,10 +89,9 @@ set tracing=off; SELECT operation, message FROM [SHOW KV TRACE FOR SESSION] WHERE operation != 'dist sender send' ---- -flow CPut /Table/54/1/1/0 -> /TUPLE/2:2:Int/2 -flow InitPut /Table/54/2/2/0 -> /BYTES/0x89 -kv.DistSender: sending partial batch r36: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 -exec stmt execution failed after 0 rows: duplicate key value violates unique constraint "primary" +flow CPut /Table/54/1/1/0 -> /TUPLE/2:2:Int/2 +flow InitPut /Table/54/2/2/0 -> /BYTES/0x89 +exec stmt execution failed after 0 rows: duplicate key value violates unique constraint "primary" statement error duplicate key value SET tracing = on,kv,results; INSERT INTO t.kv(k, v) VALUES (2,2); SET tracing = off @@ -103,10 +101,9 @@ set tracing=off; SELECT operation, message FROM [SHOW KV TRACE FOR SESSION] WHERE operation != 'dist sender send' ---- -flow CPut /Table/54/1/2/0 -> /TUPLE/2:2:Int/2 -flow InitPut /Table/54/2/2/0 -> /BYTES/0x8a -kv.DistSender: sending partial batch r36: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 -exec stmt execution failed after 0 rows: duplicate key value violates unique constraint "woo" +flow CPut /Table/54/1/2/0 -> /TUPLE/2:2:Int/2 +flow InitPut /Table/54/2/2/0 -> /BYTES/0x8a +exec stmt execution failed after 0 rows: duplicate key value violates unique constraint "woo" statement ok SET tracing = on,kv,results; CREATE TABLE t.kv2 AS TABLE t.kv; SET tracing = off @@ -181,13 +178,12 @@ query TT SELECT operation, message FROM [SHOW KV TRACE FOR SESSION] WHERE operation != 'dist sender send' ---- -colbatchscan Scan /Table/54/{1-2} -colbatchscan fetched: /kv/primary/1/v -> /2 -flow Del /Table/54/2/2/0 -flow Del /Table/54/1/1/0 -kv.DistSender: sending partial batch r36: sending batch 1 Del to (n1,s1):1 -flow fast path completed -exec stmt rows affected: 1 +colbatchscan Scan /Table/54/{1-2} +colbatchscan fetched: /kv/primary/1/v -> /2 +flow Del /Table/54/2/2/0 +flow Del /Table/54/1/1/0 +flow fast path completed +exec stmt rows affected: 1 statement ok SET tracing = on,kv,results; DROP INDEX t.kv@woo CASCADE; SET tracing = off diff --git a/pkg/sql/opt/exec/execbuilder/testdata/upsert_nonmetamorphic b/pkg/sql/opt/exec/execbuilder/testdata/upsert_nonmetamorphic index 7fed366be2d0..fb470616c054 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/upsert_nonmetamorphic +++ b/pkg/sql/opt/exec/execbuilder/testdata/upsert_nonmetamorphic @@ -38,12 +38,11 @@ query TT SELECT operation, message FROM [SHOW KV TRACE FOR SESSION] WHERE operation != 'dist sender send' ---- -colbatchscan Scan /Table/55/1/2{-/#} -flow CPut /Table/55/1/2/0 -> /TUPLE/2:2:Int/3 -flow InitPut /Table/55/2/3/0 -> /BYTES/0x8a -kv.DistSender: sending partial batch r36: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 -flow fast path completed -exec stmt rows affected: 1 +colbatchscan Scan /Table/55/1/2{-/#} +flow CPut /Table/55/1/2/0 -> /TUPLE/2:2:Int/3 +flow InitPut /Table/55/2/3/0 -> /BYTES/0x8a +flow fast path completed +exec stmt rows affected: 1 statement ok SET tracing = on,kv,results; UPSERT INTO t.kv(k, v) VALUES (1,2); SET tracing = off @@ -52,12 +51,11 @@ query TT SELECT operation, message FROM [SHOW KV TRACE FOR SESSION] WHERE operation != 'dist sender send' ---- -colbatchscan Scan /Table/55/1/1{-/#} -flow CPut /Table/55/1/1/0 -> /TUPLE/2:2:Int/2 -flow InitPut /Table/55/2/2/0 -> /BYTES/0x89 -kv.DistSender: sending partial batch r36: sending batch 1 CPut, 1 EndTxn to (n1,s1):1 -flow fast path completed -exec stmt rows affected: 1 +colbatchscan Scan /Table/55/1/1{-/#} +flow CPut /Table/55/1/1/0 -> /TUPLE/2:2:Int/2 +flow InitPut /Table/55/2/2/0 -> /BYTES/0x89 +flow fast path completed +exec stmt rows affected: 1 statement error duplicate key value SET tracing = on,kv,results; UPSERT INTO t.kv(k, v) VALUES (2,2); SET tracing = off @@ -67,10 +65,9 @@ set tracing=off; SELECT operation, message FROM [SHOW KV TRACE FOR SESSION] WHERE operation != 'dist sender send' ---- -colbatchscan Scan /Table/55/1/2{-/#} -colbatchscan fetched: /kv/primary/2/v -> /3 -flow Put /Table/55/1/2/0 -> /TUPLE/2:2:Int/2 -flow Del /Table/55/2/3/0 -flow CPut /Table/55/2/2/0 -> /BYTES/0x8a (expecting does not exist) -kv.DistSender: sending partial batch r36: sending batch 1 Put, 1 EndTxn to (n1,s1):1 -exec stmt execution failed after 0 rows: duplicate key value violates unique constraint "woo" +colbatchscan Scan /Table/55/1/2{-/#} +colbatchscan fetched: /kv/primary/2/v -> /3 +flow Put /Table/55/1/2/0 -> /TUPLE/2:2:Int/2 +flow Del /Table/55/2/3/0 +flow CPut /Table/55/2/2/0 -> /BYTES/0x8a (expecting does not exist) +exec stmt execution failed after 0 rows: duplicate key value violates unique constraint "woo" diff --git a/pkg/testutils/kvclientutils/txn_recovery.go b/pkg/testutils/kvclientutils/txn_recovery.go index 44723e8883e1..e6edef38dba1 100644 --- a/pkg/testutils/kvclientutils/txn_recovery.go +++ b/pkg/testutils/kvclientutils/txn_recovery.go @@ -57,6 +57,7 @@ const ( func CheckPushResult( ctx context.Context, db *kv.DB, + tr *tracing.Tracer, txn roachpb.Transaction, expResolution ExpectedTxnResolution, pushExpectation PushExpectation, @@ -75,7 +76,7 @@ func CheckPushResult( ba := roachpb.BatchRequest{} ba.Add(&pushReq) - recCtx, collectRec, cancel := tracing.ContextWithRecordingSpan(ctx, "test trace") + recCtx, collectRec, cancel := tracing.ContextWithRecordingSpan(ctx, tr, "test trace") defer cancel() resp, pErr := db.NonTransactionalSender().Send(recCtx, ba) diff --git a/pkg/testutils/trace.go b/pkg/testutils/trace.go index 90c2f78b0abc..83d605baa393 100644 --- a/pkg/testutils/trace.go +++ b/pkg/testutils/trace.go @@ -50,3 +50,25 @@ func MatchInOrder(s string, res ...string) error { } return nil } + +// MatchEach matches interprets the given slice of strings as a slice of +// regular expressions and checks that they individually match against the given string. +// For example, if s=abcdefg and res=bc,ab,fg no error is returned, whereas +// res=abc,cdg would return a descriptive error about failing to match cde. +func MatchEach(s string, res ...string) error { + for i := range res { + reStr := "(?ms)" + res[i] + re, err := regexp.Compile(reStr) + if err != nil { + return errors.Errorf("regexp %d (%q) does not compile: %s", i, reStr, err) + } + if re.FindStringIndex(s) == nil { + // Not found. + return errors.Errorf( + "unable to find regexp %d (%q) in string:\n\n%s", + i, reStr, s, + ) + } + } + return nil +} diff --git a/pkg/util/tracing/crdbspan.go b/pkg/util/tracing/crdbspan.go index 674b32308a2f..9be388dc021b 100644 --- a/pkg/util/tracing/crdbspan.go +++ b/pkg/util/tracing/crdbspan.go @@ -29,8 +29,8 @@ import ( // crdbSpan is a span for internal crdb usage. This is used to power SQL session // tracing. type crdbSpan struct { - traceID uint64 // probabilistically unique. - spanID uint64 // probabilistically unique. + traceID uint64 // probabilistically unique + spanID uint64 // probabilistically unique parentSpanID uint64 goroutineID uint64 @@ -65,7 +65,8 @@ type crdbSpanMu struct { // children contains the list of child spans started after this Span // started recording. children []*crdbSpan - // remoteSpan contains the list of remote child spans manually imported. + // remoteSpan contains the list of remote child span recordings that + // were manually imported. remoteSpans []tracingpb.RecordedSpan } diff --git a/pkg/util/tracing/span_options.go b/pkg/util/tracing/span_options.go index f052b0b2b3cc..bc52e82b4a37 100644 --- a/pkg/util/tracing/span_options.go +++ b/pkg/util/tracing/span_options.go @@ -171,11 +171,11 @@ func (o tagsOption) apply(opts spanOptions) spanOptions { type followsFromOpt struct{} // WithFollowsFrom instructs StartSpan to use a FollowsFrom relationship -// should a child span be created (i.e. should WithParentAndAutoCollection or WithParentAndManualCollection -// be supplied as well). A WithFollowsFrom child is expected to, in the common -// case, outlive the parent span (for example: asynchronous cleanup work), -// whereas a "regular" child span is not (i.e. the parent span typically -// waits for the child to Finish()). +// should a child span be created (i.e. should WithParentAndAutoCollection or +// WithParentAndManualCollection be supplied as well). A WithFollowsFrom child +// is expected to, in the common case, outlive the parent span (for example: +// asynchronous cleanup work), whereas a "regular" child span is not (i.e. the +// parent span typically waits for the child to Finish()). // // There is no penalty for getting this wrong, but it can help external // trace systems visualize the traces better. diff --git a/pkg/util/tracing/tracer.go b/pkg/util/tracing/tracer.go index 9a8492f7a8c3..ed27ef33f416 100644 --- a/pkg/util/tracing/tracer.go +++ b/pkg/util/tracing/tracer.go @@ -166,6 +166,8 @@ type Tracer struct { syncutil.Mutex m map[*Span]struct{} } + + includeAsyncSpansInRecordings bool // see TestingIncludeAsyncSpansInRecordings } // NewTracer creates a Tracer. It initially tries to run with minimal overhead @@ -694,27 +696,40 @@ func (t *Tracer) VisitSpans(visitor func(*Span) error) error { return nil } +// TestingIncludeAsyncSpansInRecordings is a test-only helper that configures +// the tracer to include recordings from forked/async child spans, when +// retrieving the recording for a parent span. +func (t *Tracer) TestingIncludeAsyncSpansInRecordings() { + t.includeAsyncSpansInRecordings = true +} + // ForkSpan forks the current span, if any[1]. Forked spans "follow from" the // original, and are typically used to trace operations that may outlive the // parent (think async tasks). See the package-level documentation for more // details. // -// The recordings from these spans will be automatically propagated to the -// parent span. Also see `ChildSpan`, for the other kind of derived span +// The recordings from these spans will not be automatically propagated to the +// parent span[2]. Also see `ChildSpan`, for the other kind of derived span // relation. // // A context wrapping the newly created span is returned, along with the span // itself. If non-nil, the caller is responsible for eventually Finish()ing it. // // [1]: Looking towards the provided context to see if one exists. +// [2]: Unless configured differently by tests, see +// TestingIncludeAsyncSpansInRecordings. func ForkSpan(ctx context.Context, opName string) (context.Context, *Span) { sp := SpanFromContext(ctx) if sp == nil { return ctx, nil } - return sp.Tracer().StartSpanCtx( - ctx, opName, WithParentAndAutoCollection(sp), WithFollowsFrom(), - ) + collectionOpt := WithParentAndManualCollection(sp.Meta()) + if sp.Tracer().includeAsyncSpansInRecordings { + // Using auto collection here ensures that recordings from async spans + // also show up at the parent. + collectionOpt = WithParentAndAutoCollection(sp) + } + return sp.Tracer().StartSpanCtx(ctx, opName, WithFollowsFrom(), collectionOpt) } // ChildSpan creates a child span of the current one, if any. Recordings from @@ -787,17 +802,16 @@ func StartVerboseTrace(ctx context.Context, tr *Tracer, opName string) (context. return ctx, sp } -// ContextWithRecordingSpan returns a context with an embedded trace Span which -// returns its contents when getRecording is called and must be stopped by -// calling the cancel method when done with the context (getRecording() needs to -// be called before cancel()). +// ContextWithRecordingSpan returns a context with an embedded trace Span. +// The Span is derived from the provided Tracer. The Span returns its contents +// when `getRecording` is called, and must be stopped using `cancel`, when done +// with the context (`getRecording` needs to be called before `cancel`). // // Note that to convert the recorded spans into text, you can use // Recording.String(). Tests can also use FindMsgInRecording(). func ContextWithRecordingSpan( - ctx context.Context, opName string, + ctx context.Context, tr *Tracer, opName string, ) (_ context.Context, getRecording func() Recording, cancel func()) { - tr := NewTracer() ctx, sp := tr.StartSpanCtx(ctx, opName, WithForceRealSpan()) sp.SetVerbose(true) ctx, cancelCtx := context.WithCancel(ctx)