diff --git a/pkg/server/server.go b/pkg/server/server.go index 7715f2312972..cd7e587aa74a 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -914,7 +914,7 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) { // Initialize the pgwire pre-server, which initializes connections, // sets up TLS and reads client status parameters. - pgPreServer := pgwire.MakePreServeConnHandler( + pgPreServer := pgwire.NewPreServeConnHandler( cfg.AmbientCtx, cfg.Config, cfg.Settings, @@ -1040,7 +1040,11 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) { sc := newServerController(ctx, node, cfg.BaseConfig.IDContainer, stopper, st, - lateBoundServer, &systemServerWrapper{server: lateBoundServer}, systemTenantNameContainer) + lateBoundServer, + &systemServerWrapper{server: lateBoundServer}, + systemTenantNameContainer, + pgPreServer.SendRoutingError, + ) // Create the debug API server. debugServer := debug.NewServer( @@ -1116,7 +1120,7 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) { protectedtsProvider: protectedtsProvider, spanConfigSubscriber: spanConfig.subscriber, spanConfigReporter: spanConfig.reporter, - pgPreServer: &pgPreServer, + pgPreServer: pgPreServer, sqlServer: sqlServer, serverController: sc, externalStorageBuilder: externalStorageBuilder, diff --git a/pkg/server/server_controller.go b/pkg/server/server_controller.go index a6dbfcc4f9f7..e8d5d985546d 100644 --- a/pkg/server/server_controller.go +++ b/pkg/server/server_controller.go @@ -98,6 +98,10 @@ type serverController struct { // testArgs is used when creating new tenant servers. testArgs map[roachpb.TenantName]base.TestSharedProcessTenantArgs + // sendSQLRoutingError is a callback to use to report + // a tenant routing error to the incoming client. + sendSQLRoutingError func(ctx context.Context, conn net.Conn, tenantName roachpb.TenantName) + mu struct { syncutil.Mutex @@ -122,6 +126,7 @@ func newServerController( tenantServerCreator tenantServerCreator, systemServer onDemandServer, systemTenantNameContainer *roachpb.TenantNameContainer, + sendSQLRoutingError func(ctx context.Context, conn net.Conn, tenantName roachpb.TenantName), ) *serverController { c := &serverController{ nodeID: parentNodeID, @@ -130,6 +135,7 @@ func newServerController( testArgs: make(map[roachpb.TenantName]base.TestSharedProcessTenantArgs), stopper: parentStopper, tenantServerCreator: tenantServerCreator, + sendSQLRoutingError: sendSQLRoutingError, } // We make the serverState for the system mock the regular diff --git a/pkg/server/server_controller_sql.go b/pkg/server/server_controller_sql.go index 91778d0a7abb..f2b498064f0c 100644 --- a/pkg/server/server_controller_sql.go +++ b/pkg/server/server_controller_sql.go @@ -76,17 +76,15 @@ func (c *serverController) sqlMux( }) case pgwire.PreServeReady: - tenantName := status.GetTenantName() + tenantName := roachpb.TenantName(status.GetTenantName()) if tenantName == "" { - tenantName = defaultTenantSelect.Get(&c.st.SV) + tenantName = roachpb.TenantName(defaultTenantSelect.Get(&c.st.SV)) } - s, err := c.getServer(ctx, roachpb.TenantName(tenantName)) + s, err := c.getServer(ctx, tenantName) if err != nil { log.Warningf(ctx, "unable to find server for tenant %q: %v", tenantName, err) - // TODO(knz): we might want to send a pg error to the client here. - // See: https://github.com/cockroachdb/cockroach/issues/92525 - _ = conn.Close() + c.sendSQLRoutingError(ctx, conn, tenantName) return err } diff --git a/pkg/server/server_controller_test.go b/pkg/server/server_controller_test.go index 1f7cc6cd4c69..0949ce473701 100644 --- a/pkg/server/server_controller_test.go +++ b/pkg/server/server_controller_test.go @@ -62,3 +62,24 @@ func TestServerController(t *testing.T) { require.Error(t, err, "tenant connector requires a CCL binary") // TODO(knz): test something about d } + +func TestSQLErrorUponInvalidTenant(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{ + DisableDefaultTestTenant: true, + }) + defer s.Stopper().Stop(ctx) + + sqlAddr := s.ServingSQLAddr() + db, err := serverutils.OpenDBConnE(sqlAddr, "cluster:nonexistent", false, s.Stopper()) + // Expect no error yet: the connection is opened lazily; an + // error here means the parameters were incorrect. + require.NoError(t, err) + + err = db.Ping() + require.Regexp(t, `service unavailable for target tenant \(nonexistent\)`, err.Error()) +} diff --git a/pkg/server/tenant.go b/pkg/server/tenant.go index 3e3d536e19d9..def74fe85779 100644 --- a/pkg/server/tenant.go +++ b/pkg/server/tenant.go @@ -253,7 +253,7 @@ func NewTenantServer( if !baseCfg.DisableSQLListener { // Initialize the pgwire pre-server, which initializes connections, // sets up TLS and reads client status parameters. - ps := pgwire.MakePreServeConnHandler( + pgPreServer = pgwire.NewPreServeConnHandler( baseCfg.AmbientCtx, baseCfg.Config, args.Settings, @@ -262,10 +262,9 @@ func NewTenantServer( args.monitorAndMetrics.rootSQLMemoryMonitor, false, /* acceptTenantName */ ) - for _, m := range ps.Metrics() { + for _, m := range pgPreServer.Metrics() { args.registry.AddMetricStruct(m) } - pgPreServer = &ps } // Instantiate the SQL server proper. diff --git a/pkg/sql/logictest/logic.go b/pkg/sql/logictest/logic.go index 3288f5b0b519..20ccce092f77 100644 --- a/pkg/sql/logictest/logic.go +++ b/pkg/sql/logictest/logic.go @@ -378,16 +378,16 @@ import ( // Changes the user for subsequent statements or queries. // If nodeidx is specified, this user will connect to the node // in the cluster with index N (note this is 0-indexed, while -// node IDs themselves are 1-indexed). +// node IDs themselves are 1-indexed). Otherwise, it will connect +// to the node with index 0 (node ID 1). +// A "host-cluster-" prefix can be prepended to the user, which will force +// the user session to be against the host cluster (useful for multi-tenant +// configurations). // // - upgrade N // When using a cockroach-go/testserver logictest, upgrades the node at // index N to the version specified by the logictest config. // -// A "host-cluster-" prefix can be prepended to the user, which will force -// the user session to be against the host cluster (useful for multi-tenant -// configurations). -// // - skipif // Skips the following `statement` or `query` if the argument is // postgresql, cockroachdb, or a config matching the currently @@ -985,9 +985,10 @@ type logicTest struct { // If this test uses a SQL tenant server, this is its address. In this case, // all clients are created against this tenant. tenantAddrs []string - // map of built clients. Needs to be persisted so that we can - // re-use them and close them all on exit. - clients map[string]*gosql.DB + // map of built clients, keyed first on username and then node idx. + // They are persisted so that they can be reused. They are not closed + // until the end of a test. + clients map[string]map[int]*gosql.DB // client currently in use. This can change during processing // of a test input file when encountering the "user" directive. // see setUser() for details. @@ -1158,14 +1159,12 @@ func (t *logicTest) outf(format string, args ...interface{}) { fmt.Printf("[%s] %s\n", now, msg) } -// setUser sets the DB client to the specified user. +// setUser sets the DB client to the specified user and connects +// to the node in the cluster at index nodeIdx. // It returns a cleanup function to be run when the credentials // are no longer needed. -func (t *logicTest) setUser(user string, nodeIdxOverride int) func() { - if t.clients == nil { - t.clients = map[string]*gosql.DB{} - } - if db, ok := t.clients[user]; ok { +func (t *logicTest) setUser(user string, nodeIdx int) func() { + if db, ok := t.clients[user][nodeIdx]; ok { t.db = db t.user = user @@ -1173,19 +1172,15 @@ func (t *logicTest) setUser(user string, nodeIdxOverride int) func() { return func() {} } - nodeIdx := t.nodeIdx - if nodeIdxOverride > 0 { - nodeIdx = nodeIdxOverride - } - var addr string var pgURL url.URL var pgUser string var cleanupFunc func() pgUser = strings.TrimPrefix(user, "host-cluster-") if t.cfg.UseCockroachGoTestserver { - pgURL = *t.testserverCluster.PGURL() + pgURL = *t.testserverCluster.PGURLForNode(nodeIdx) pgURL.User = url.User(pgUser) + pgURL.Path = "test" cleanupFunc = func() {} } else { addr = t.cluster.Server(nodeIdx).ServingSQLAddr() @@ -1211,9 +1206,16 @@ func (t *logicTest) setUser(user string, nodeIdxOverride int) func() { if _, err := db.Exec("SET index_recommendations_enabled = false"); err != nil { t.Fatal(err) } - t.clients[user] = db + if t.clients == nil { + t.clients = make(map[string]map[int]*gosql.DB) + } + if t.clients[user] == nil { + t.clients[user] = make(map[int]*gosql.DB) + } + t.clients[user][nodeIdx] = db t.db = db t.user = pgUser + t.nodeIdx = nodeIdx return cleanupFunc } @@ -1304,7 +1306,7 @@ func (t *logicTest) newTestServerCluster(bootstrapBinaryPath string, upgradeBina } t.testserverCluster = ts - t.clusterCleanupFuncs = append(t.clusterCleanupFuncs, t.setUser(username.RootUser, 0 /* nodeIdxOverride */)) + t.clusterCleanupFuncs = append(t.clusterCleanupFuncs, t.setUser(username.RootUser, 0 /* nodeIdx */)) t.clusterCleanupFuncs = append(t.clusterCleanupFuncs, ts.Stop) } @@ -1721,7 +1723,7 @@ func (t *logicTest) newCluster( // db may change over the lifetime of this function, with intermediate // values cached in t.clients and finally closed in t.close(). - t.clusterCleanupFuncs = append(t.clusterCleanupFuncs, t.setUser(username.RootUser, 0 /* nodeIdxOverride */)) + t.clusterCleanupFuncs = append(t.clusterCleanupFuncs, t.setUser(username.RootUser, 0 /* nodeIdx */)) } // waitForTenantReadOnlyClusterSettingToTakeEffectOrFatal waits until all tenant @@ -1774,8 +1776,10 @@ func (t *logicTest) shutdownCluster() { t.cluster = nil } if t.clients != nil { - for _, c := range t.clients { - c.Close() + for _, userClients := range t.clients { + for _, c := range userClients { + c.Close() + } } t.clients = nil } @@ -2173,22 +2177,24 @@ func (t *logicTest) processTestFile(path string, config logictestbase.TestCluste } func (t *logicTest) hasOpenTxns(ctx context.Context) bool { - for _, user := range t.clients { - existingTxnPriority := "NORMAL" - err := user.QueryRow("SHOW TRANSACTION PRIORITY").Scan(&existingTxnPriority) - if err != nil { - // If we are unable to see transaction priority assume we're in the middle - // of an explicit txn. - log.Warningf(ctx, "failed to check txn priority with %v", err) - return true - } - if _, err := user.Exec("SET TRANSACTION PRIORITY NORMAL;"); !testutils.IsError(err, "there is no transaction in progress") { - // Reset the txn priority to what it was before we checked for open txns. - _, err := user.Exec(fmt.Sprintf(`SET TRANSACTION PRIORITY %s`, existingTxnPriority)) + for _, userClients := range t.clients { + for _, user := range userClients { + existingTxnPriority := "NORMAL" + err := user.QueryRow("SHOW TRANSACTION PRIORITY").Scan(&existingTxnPriority) if err != nil { - log.Warningf(ctx, "failed to reset txn priority with %v", err) + // If we are unable to see transaction priority assume we're in the middle + // of an explicit txn. + log.Warningf(ctx, "failed to check txn priority with %v", err) + return true + } + if _, err := user.Exec("SET TRANSACTION PRIORITY NORMAL;"); !testutils.IsError(err, "there is no transaction in progress") { + // Reset the txn priority to what it was before we checked for open txns. + _, err := user.Exec(fmt.Sprintf(`SET TRANSACTION PRIORITY %s`, existingTxnPriority)) + if err != nil { + log.Warningf(ctx, "failed to reset txn priority with %v", err) + } + return true } - return true } } return false @@ -2222,8 +2228,9 @@ func (t *logicTest) maybeBackupRestore( } oldUser := t.user + oldNodeIdx := t.nodeIdx defer func() { - t.setUser(oldUser, 0 /* nodeIdxOverride */) + t.setUser(oldUser, oldNodeIdx) }() log.Info(context.Background(), "Running cluster backup and restore") @@ -2235,52 +2242,56 @@ func (t *logicTest) maybeBackupRestore( // TODO(adityamaru): A better approach might be to wipe the cluster once we // have a command that enables this. That way all of the session data will not // be lost in the process of creating a new cluster. - users := make([]string, 0, len(t.clients)) - userToHexSession := make(map[string]string, len(t.clients)) - userToSessionVars := make(map[string]map[string]string, len(t.clients)) - for user := range t.clients { - t.setUser(user, 0 /* nodeIdxOverride */) - users = append(users, user) - - // Serialize session variables. - var userSession string - var err error - if err = t.db.QueryRow(`SELECT encode(crdb_internal.serialize_session(), 'hex')`).Scan(&userSession); err == nil { - userToHexSession[user] = userSession - continue - } - log.Warningf(context.Background(), "failed to serialize session: %+v", err) - - // If we failed to serialize the session variables, lets save the output of - // `SHOW ALL`. This usually happens if the session contains prepared - // statements or portals that cause the `serialize_session()` to fail. - // - // Saving the session variables in this manner does not guarantee the test - // will succeed since there are no ordering semantics when we go to apply - // them. There are some session variables that need to be applied before - // others for them to be valid. Thus, it is strictly better to use - // `serialize/deserialize_session()`, this "hack" just gives the test one - // more chance to succeed. - userSessionVars := make(map[string]string) - existingSessionVars, err := t.db.Query("SHOW ALL") - if err != nil { - return err - } - for existingSessionVars.Next() { - var key, value string - if err := existingSessionVars.Scan(&key, &value); err != nil { - return errors.Wrap(err, "scanning session variables") + users := make(map[string][]int, len(t.clients)) + userToHexSession := make(map[string]map[int]string, len(t.clients)) + userToSessionVars := make(map[string]map[int]map[string]string, len(t.clients)) + for user, userClients := range t.clients { + userToHexSession[user] = make(map[int]string) + userToSessionVars[user] = make(map[int]map[string]string) + for nodeIdx := range userClients { + users[user] = append(users[user], nodeIdx) + t.setUser(user, nodeIdx) + + // Serialize session variables. + var userSession string + var err error + if err = t.db.QueryRow(`SELECT encode(crdb_internal.serialize_session(), 'hex')`).Scan(&userSession); err == nil { + userToHexSession[user][nodeIdx] = userSession + continue + } + log.Warningf(context.Background(), "failed to serialize session: %+v", err) + + // If we failed to serialize the session variables, lets save the output of + // `SHOW ALL`. This usually happens if the session contains prepared + // statements or portals that cause the `serialize_session()` to fail. + // + // Saving the session variables in this manner does not guarantee the test + // will succeed since there are no ordering semantics when we go to apply + // them. There are some session variables that need to be applied before + // others for them to be valid. Thus, it is strictly better to use + // `serialize/deserialize_session()`, this "hack" just gives the test one + // more chance to succeed. + userSessionVars := make(map[string]string) + existingSessionVars, err := t.db.Query("SHOW ALL") + if err != nil { + return err + } + for existingSessionVars.Next() { + var key, value string + if err := existingSessionVars.Scan(&key, &value); err != nil { + return errors.Wrap(err, "scanning session variables") + } + userSessionVars[key] = value } - userSessionVars[key] = value + userToSessionVars[user][nodeIdx] = userSessionVars } - userToSessionVars[user] = userSessionVars } backupLocation := fmt.Sprintf("gs://cockroachdb-backup-testing/logic-test-backup-restore-nightly/%s?AUTH=implicit", strconv.FormatInt(timeutil.Now().UnixNano(), 10)) // Perform the backup and restore as root. - t.setUser(username.RootUser, 0 /* nodeIdxOverride */) + t.setUser(username.RootUser, 0 /* nodeIdx */) if _, err := t.db.Exec(fmt.Sprintf("BACKUP INTO '%s'", backupLocation)); err != nil { return errors.Wrap(err, "backing up cluster") @@ -2291,7 +2302,7 @@ func (t *logicTest) maybeBackupRestore( t.resetCluster() // Run the restore as root. - t.setUser(username.RootUser, 0 /* nodeIdxOverride */) + t.setUser(username.RootUser, 0 /* nodeIdx */) if _, err := t.db.Exec(fmt.Sprintf("RESTORE FROM LATEST IN '%s'", backupLocation)); err != nil { return errors.Wrap(err, "restoring cluster") } @@ -2300,29 +2311,31 @@ func (t *logicTest) maybeBackupRestore( // Create new connections for the existing users, and restore the session // variables that we collected. - for _, user := range users { - // Call setUser for every user to create the connection for that user. - t.setUser(user, 0 /* nodeIdxOverride */) - - if userSession, ok := userToHexSession[user]; ok { - if _, err := t.db.Exec(fmt.Sprintf(`SELECT crdb_internal.deserialize_session(decode('%s', 'hex'))`, userSession)); err != nil { - return errors.Wrapf(err, "deserializing session") - } - } else if vars, ok := userToSessionVars[user]; ok { - // We now attempt to restore the session variables that were set on the - // backing up cluster. These are not included in the backup restore and so - // have to be restored manually. - for key, value := range vars { - // First try setting the cluster setting as a string. - if _, err := t.db.Exec(fmt.Sprintf("SET %s='%s'", key, value)); err != nil { - // If it fails, try setting the value as an int. - log.Infof(context.Background(), "setting session variable as string failed (err: %v), trying as int", pretty.Formatter(err)) - if _, err := t.db.Exec(fmt.Sprintf("SET %s=%s", key, value)); err != nil { - // Some cluster settings can't be set at all, so ignore these errors. - // If a setting that we needed could not be restored, we expect the - // logic test to fail and let us know. - log.Infof(context.Background(), "setting session variable as int failed: %v (continuing anyway)", pretty.Formatter(err)) - continue + for user, userNodeIdxs := range users { + for _, nodeIdx := range userNodeIdxs { + // Call setUser for every user to create the connection for that user. + t.setUser(user, nodeIdx) + + if userSession, ok := userToHexSession[user][nodeIdx]; ok { + if _, err := t.db.Exec(fmt.Sprintf(`SELECT crdb_internal.deserialize_session(decode('%s', 'hex'))`, userSession)); err != nil { + return errors.Wrapf(err, "deserializing session") + } + } else if vars, ok := userToSessionVars[user][nodeIdx]; ok { + // We now attempt to restore the session variables that were set on the + // backing up cluster. These are not included in the backup restore and so + // have to be restored manually. + for key, value := range vars { + // First try setting the cluster setting as a string. + if _, err := t.db.Exec(fmt.Sprintf("SET %s='%s'", key, value)); err != nil { + // If it fails, try setting the value as an int. + log.Infof(context.Background(), "setting session variable as string failed (err: %v), trying as int", pretty.Formatter(err)) + if _, err := t.db.Exec(fmt.Sprintf("SET %s=%s", key, value)); err != nil { + // Some cluster settings can't be set at all, so ignore these errors. + // If a setting that we needed could not be restored, we expect the + // logic test to fail and let us know. + log.Infof(context.Background(), "setting session variable as int failed: %v (continuing anyway)", pretty.Formatter(err)) + continue + } } } } @@ -2575,6 +2588,7 @@ func (t *logicTest) processSubtest( case "query": var query logicQuery query.pos = fmt.Sprintf("\n%s:%d", path, s.Line+subtest.lineLineIndexIntoFile) + query.nodeIdx = t.nodeIdx // Parse "query error " if m := errorRE.FindStringSubmatch(s.Text()); m != nil { query.expectErrCode = m[1] @@ -3298,7 +3312,7 @@ func (t *logicTest) execQuery(query logicQuery) error { db := t.db var closeDB func() - if query.nodeIdx != 0 { + if query.nodeIdx != t.nodeIdx { var pgURL url.URL if t.testserverCluster != nil { pgURL = *t.testserverCluster.PGURLForNode(query.nodeIdx) @@ -3833,16 +3847,18 @@ func (t *logicTest) validateAfterTestCompletion() error { } // Close all clients other than "root" - for username, c := range t.clients { - if username == "root" { + for user, userClients := range t.clients { + if user == username.RootUser { continue } - delete(t.clients, username) - if err := c.Close(); err != nil { - t.Fatalf("failed to close connection for user %s: %v", username, err) + for i, c := range userClients { + if err := c.Close(); err != nil { + t.Fatalf("failed to close connection to node %d for user %s: %v", i, user, err) + } } + delete(t.clients, user) } - t.setUser("root", 0 /* nodeIdxOverride */) + t.setUser(username.RootUser, 0 /* nodeIdx */) // Some cleanup to make sure the following validation queries can run // successfully. First we rollback in case the logic test had an uncommitted diff --git a/pkg/sql/logictest/testdata/logic_test/testserver_upgrade_node b/pkg/sql/logictest/testdata/logic_test/testserver_upgrade_node index 2cea5314e814..e6669d32954e 100644 --- a/pkg/sql/logictest/testdata/logic_test/testserver_upgrade_node +++ b/pkg/sql/logictest/testdata/logic_test/testserver_upgrade_node @@ -30,6 +30,29 @@ SELECT crdb_internal.node_executable_version() ---- 22.2 +# start of test that user cmd nodeidx opt works when used for a previously used user + +user root nodeidx=2 + +query T +SELECT crdb_internal.node_executable_version() +---- +22.2 + +query B +SELECT crdb_internal.node_executable_version() SIMILAR TO '1000022.2-%' +---- +false + +user root nodeidx=0 + +query B +SELECT crdb_internal.node_executable_version() SIMILAR TO '1000022.2-%' +---- +true + +# end of test that user cmd nodeidx opt works when used for a previously used user + upgrade 2 query B diff --git a/pkg/sql/pgwire/BUILD.bazel b/pkg/sql/pgwire/BUILD.bazel index 8fb3faf52481..a5b090b7b20e 100644 --- a/pkg/sql/pgwire/BUILD.bazel +++ b/pkg/sql/pgwire/BUILD.bazel @@ -25,6 +25,7 @@ go_library( "//pkg/base", "//pkg/clusterversion", "//pkg/col/coldata", + "//pkg/roachpb", "//pkg/security", "//pkg/security/password", "//pkg/security/sessionrevival", diff --git a/pkg/sql/pgwire/pre_serve.go b/pkg/sql/pgwire/pre_serve.go index e9d2a975c29e..b51592b66787 100644 --- a/pkg/sql/pgwire/pre_serve.go +++ b/pkg/sql/pgwire/pre_serve.go @@ -19,6 +19,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/hba" @@ -122,9 +123,9 @@ type PreServeConnHandler struct { acceptTenantName bool } -// MakePreServeConnHandler creates a PreServeConnHandler. +// NewPreServeConnHandler creates a PreServeConnHandler. // sv refers to the setting values "outside" of the current tenant - i.e. from the storage cluster. -func MakePreServeConnHandler( +func NewPreServeConnHandler( ambientCtx log.AmbientContext, cfg *base.Config, st *cluster.Settings, @@ -132,7 +133,7 @@ func MakePreServeConnHandler( histogramWindow time.Duration, parentMemoryMonitor *mon.BytesMonitor, acceptTenantName bool, -) PreServeConnHandler { +) *PreServeConnHandler { ctx := ambientCtx.AnnotateCtx(context.Background()) metrics := makeTenantIndependentMetrics(histogramWindow) s := PreServeConnHandler{ @@ -156,7 +157,7 @@ func MakePreServeConnHandler( // TODO(knz,ben): Use a cluster setting for this. s.trustClientProvidedRemoteAddr.Set(trustClientProvidedRemoteAddrOverride) - return s + return &s } // AnnotateCtxForIncomingConn annotates the provided context with a @@ -203,6 +204,20 @@ func (s *PreServeConnHandler) Metrics() (res []interface{}) { return []interface{}{&s.tenantIndependentMetrics} } +// SendRoutingError informs the client that they selected an invalid +// cluster and closes the connection. +func (s *PreServeConnHandler) SendRoutingError( + ctx context.Context, conn net.Conn, tenantName roachpb.TenantName, +) { + err := errors.WithHint( + pgerror.Newf(pgcode.ConnectionException, + "service unavailable for target tenant (%v)", tenantName), + `Double check your "-ccluster=" connection option or your "cluster:" database name prefix.`) + + _ = s.sendErr(ctx, conn, err) + _ = conn.Close() +} + // sendErr sends errors to the client during the connection startup // sequence. Later error sends during/after authentication are handled // in conn.go.