diff --git a/pkg/base/test_server_args.go b/pkg/base/test_server_args.go index 0bf51ff81f37..1d0fb7211c0f 100644 --- a/pkg/base/test_server_args.go +++ b/pkg/base/test_server_args.go @@ -78,7 +78,7 @@ type TestServerArgs struct { // Fields copied to the server.Config. Insecure bool - RetryOptions retry.Options + RetryOptions retry.Options // TODO(tbg): make testing knob. SocketFile string ScanInterval time.Duration ScanMinIdleTime time.Duration diff --git a/pkg/server/config.go b/pkg/server/config.go index 9ebeedbe9cf3..4b02a6608aa5 100644 --- a/pkg/server/config.go +++ b/pkg/server/config.go @@ -169,6 +169,8 @@ type Config struct { JoinPreferSRVRecords bool // RetryOptions controls the retry behavior of the server. + // + // TODO(tbg): this is only ever used in one test. Make it a testing knob. RetryOptions retry.Options // CacheSize is the amount of memory in bytes to use for caching data. diff --git a/pkg/server/testserver.go b/pkg/server/testserver.go index 26d81ed5ef7c..c4e0b2251ee3 100644 --- a/pkg/server/testserver.go +++ b/pkg/server/testserver.go @@ -439,17 +439,85 @@ func (d dummyProtectedTSProvider) Protect(context.Context, *kv.Txn, *ptpb.Record } func testSQLServerArgs(ts *TestServer) sqlServerArgs { - st := cluster.MakeTestingClusterSettings() stopper := ts.Stopper() + // If we used a dummy gossip, DistSQL and random other things won't work. + // Just use the test server's for now. + // + // TODO(tbg): drop the Gossip dependency. + g := ts.Gossip() + ts = nil // prevent usage below - cfg := makeTestConfig(st) + st := cluster.MakeTestingClusterSettings() - clock := hlc.NewClock(hlc.UnixNano, 1) - rpcContext := rpc.NewInsecureTestingContext(clock, stopper) + var sqlConfig SQLConfig + var bothConfig BothConfig + { + cfg := makeTestConfig(st) + sqlConfig = cfg.SQLConfig + bothConfig = cfg.BothConfig + } - nl := allErrorsFakeLiveness{} + clock := hlc.NewClock(hlc.UnixNano, time.Duration(bothConfig.MaxOffset)) - ds := ts.DistSender() + var rpcTestingKnobs rpc.ContextTestingKnobs + if p, ok := bothConfig.TestingKnobs.Server.(*TestingKnobs); ok { + rpcTestingKnobs = p.ContextTestingKnobs + } + rpcContext := rpc.NewContextWithTestingKnobs( + bothConfig.AmbientCtx, + bothConfig.Config, + clock, + stopper, + st, + rpcTestingKnobs, + ) + + var dsKnobs kvcoord.ClientTestingKnobs + if dsKnobsP, ok := bothConfig.TestingKnobs.DistSQL.(*kvcoord.ClientTestingKnobs); ok { + dsKnobs = *dsKnobsP + } + rpcRetryOptions := base.DefaultRetryOptions() + resolver := gossip.AddressResolver(g) // TODO(tbg): break gossip dep + nodeDialer := nodedialer.New(rpcContext, resolver) + dsCfg := kvcoord.DistSenderConfig{ + AmbientCtx: bothConfig.AmbientCtx, + Settings: st, + Clock: clock, + RPCRetryOptions: &rpcRetryOptions, + RPCContext: rpcContext, + RangeDescriptorDB: nil, // use DistSender itself + NodeDialer: nodeDialer, + TestingKnobs: dsKnobs, + } + ds := kvcoord.NewDistSender(dsCfg, g) + + var clientKnobs kvcoord.ClientTestingKnobs + if p, ok := bothConfig.TestingKnobs.KVClient.(*kvcoord.ClientTestingKnobs); ok { + clientKnobs = *p + } + // TODO(tbg): expose this registry via prometheus. See: + // https://github.com/cockroachdb/cockroach/issues/47905 + registry := metric.NewRegistry() + txnMetrics := kvcoord.MakeTxnMetrics(bothConfig.HistogramWindowInterval()) + registry.AddMetricStruct(txnMetrics) + tcsFactory := kvcoord.NewTxnCoordSenderFactory( + kvcoord.TxnCoordSenderFactoryConfig{ + AmbientCtx: bothConfig.AmbientCtx, + Settings: st, + Clock: clock, + Stopper: stopper, + HeartbeatInterval: base.DefaultTxnHeartbeatInterval, + Linearizable: false, + Metrics: txnMetrics, + TestingKnobs: clientKnobs, + }, + ds, + ) + db := kv.NewDB( + bothConfig.AmbientCtx, + tcsFactory, + clock, + ) circularInternalExecutor := &sql.InternalExecutor{} // Protected timestamps won't be available (at first) in multi-tenant @@ -457,7 +525,7 @@ func testSQLServerArgs(ts *TestServer) sqlServerArgs { var protectedTSProvider protectedts.Provider { pp, err := ptprovider.New(ptprovider.Config{ - DB: ts.DB(), + DB: db, InternalExecutor: circularInternalExecutor, Settings: st, }) @@ -467,15 +535,6 @@ func testSQLServerArgs(ts *TestServer) sqlServerArgs { protectedTSProvider = dummyProtectedTSProvider{pp} } - registry := metric.NewRegistry() - - // If we used a dummy gossip, DistSQL and random other things won't work. - // Just use the test server's for now. - // g := gossip.NewTest(nodeID, nil, nil, stopper, registry, nil) - g := ts.Gossip() - - nd := nodedialer.New(rpcContext, gossip.AddressResolver(ts.Gossip())) - dummyRecorder := &status.MetricsRecorder{} // TODO(asubiotto): Jobs don't play well with a weird node ID in a multitenant @@ -497,9 +556,9 @@ func testSQLServerArgs(ts *TestServer) sqlServerArgs { rpcContext: rpcContext, distSender: ds, statusServer: noStatusServer, - nodeLiveness: nl, + nodeLiveness: allErrorsFakeLiveness{}, gossip: gossip.MakeUnexposedGossip(g), - nodeDialer: nd, + nodeDialer: nodeDialer, grpcServer: dummyRPCServer, recorder: dummyRecorder, isMeta1Leaseholder: func(timestamp hlc.Timestamp) (bool, error) { @@ -513,13 +572,13 @@ func testSQLServerArgs(ts *TestServer) sqlServerArgs { return nil, errors.New("fake external uri storage") }, }, - SQLConfig: &cfg.SQLConfig, - BothConfig: &cfg.BothConfig, + SQLConfig: &sqlConfig, + BothConfig: &bothConfig, stopper: stopper, clock: clock, runtime: status.NewRuntimeStatSampler(context.Background(), clock), tenantID: roachpb.SystemTenantID, - db: ts.DB(), + db: db, registry: registry, sessionRegistry: sql.NewSessionRegistry(), circularInternalExecutor: circularInternalExecutor, @@ -545,6 +604,8 @@ func (ts *TestServer) StartTenant(params base.TestTenantArgs) (pgAddr string, _ ClusterSettingsUpdater: args.Settings.MakeUpdater(), } } + ts = nil // proves we're not using it below + s, err := newSQLServer(ctx, args) if err != nil { return "", err @@ -566,8 +627,8 @@ func (ts *TestServer) StartTenant(params base.TestTenantArgs) (pgAddr string, _ if err != nil { return "", err } - ts.Stopper().RunWorker(ctx, func(ctx context.Context) { - <-ts.Stopper().ShouldQuiesce() + args.stopper.RunWorker(ctx, func(ctx context.Context) { + <-args.stopper.ShouldQuiesce() // NB: we can't do this as a Closer because (*Server).ServeWith is // running in a worker and usually sits on accept(pgL) which unblocks // only when pgL closes. In other words, pgL needs to close when