From 63082e0bf808644bd54ad9fa2cf1228ccc7fdb34 Mon Sep 17 00:00:00 2001 From: Kyle Patron Date: Thu, 11 May 2023 11:27:32 -0400 Subject: [PATCH 1/5] jwtauthccl: allow cluster SSO to pick principals from custom claims Previously, cluster SSO (JWT authentication) required user principals to be in the subject field of the JWT, which by the JWT specification required them to be a single string. A customer has a desire have which SQL users a human is allowed to login as be specified by their IDP via a "groups" field in the JWT. To accommodate this, a new cluster setting has been introduced (server.jwt_authentication.claim), which when populated specifies which claim of the JWT to read as containing the principal. This value can be either a string or an array of strings. The resulting principal(s) are then fed through the identity map as before to produce the set of valid SQL users that this token can login as. As before, humans can specify which SQL user they wish to login with via the username field as long as it matches one of the values the token is a valid authentication method for. Fixes: CC-24595 Release note (enterprise change): Cluster SSO (JWT authentication) can now read SQL usernames from any JWT claims instead of requiring the subject claim to be used. The claim can be controlled by the `server.jwt_authentication.claim` cluster setting with an empty string or "sub" being equivalent to the previous behavior. --- pkg/ccl/jwtauthccl/authentication_jwt.go | 68 ++++++-- pkg/ccl/jwtauthccl/authentication_jwt_test.go | 164 +++++++++++++++--- pkg/ccl/jwtauthccl/settings.go | 13 ++ pkg/ccl/testccl/authccl/auth_test.go | 5 + pkg/ccl/testccl/authccl/testdata/jwt | 62 ++++++- 5 files changed, 274 insertions(+), 38 deletions(-) diff --git a/pkg/ccl/jwtauthccl/authentication_jwt.go b/pkg/ccl/jwtauthccl/authentication_jwt.go index 493ae22681a2..15bf60769a77 100644 --- a/pkg/ccl/jwtauthccl/authentication_jwt.go +++ b/pkg/ccl/jwtauthccl/authentication_jwt.go @@ -10,6 +10,7 @@ package jwtauthccl import ( "context" + "fmt" "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" "github.com/cockroachdb/cockroach/pkg/security/username" @@ -64,6 +65,7 @@ type jwtAuthenticatorConf struct { enabled bool issuers []string jwks jwk.Set + claim string } // reloadConfig locks mutex and then refreshes the values in conf from the cluster settings. @@ -82,6 +84,7 @@ func (authenticator *jwtAuthenticator) reloadConfigLocked( enabled: JWTAuthEnabled.Get(&st.SV), issuers: mustParseValueOrArray(JWTAuthIssuers.Get(&st.SV)), jwks: mustParseJWKS(JWTAuthJWKS.Get(&st.SV)), + claim: JWTAuthClaim.Get(&st.SV), } if !authenticator.mu.conf.enabled && conf.enabled { @@ -147,23 +150,63 @@ func (authenticator *jwtAuthenticator) ValidateJWTLogin( "token issued by %s", parsedToken.Issuer()) } - users, err := authenticator.mapUsername(parsedToken.Subject(), parsedToken.Issuer(), identMap) - if err != nil || len(users) == 0 { + // Extract all requested principals from the token. By default, we take it from the subject unless they specify + // an alternate claim to pull from. + var tokenPrincipals []string + if authenticator.mu.conf.claim == "" || authenticator.mu.conf.claim == "sub" { + tokenPrincipals = []string{parsedToken.Subject()} + } else { + claimValue, ok := parsedToken.Get(authenticator.mu.conf.claim) + if !ok { + return errors.WithDetailf( + errors.Newf("JWT authentication: missing claim"), + "token does not contain a claim for %s", authenticator.mu.conf.claim) + } + switch castClaimValue := claimValue.(type) { + case string: + // Accept a single string value. + tokenPrincipals = []string{castClaimValue} + case []interface{}: + // Iterate over the slice and add all string values to the tokenPrincipals. + for _, maybePrincipal := range castClaimValue { + tokenPrincipals = append(tokenPrincipals, fmt.Sprint(maybePrincipal)) + } + case []string: + // This case never seems to happen but is included in case an implementation detail changes in the library. + tokenPrincipals = castClaimValue + default: + tokenPrincipals = []string{fmt.Sprint(castClaimValue)} + } + } + + // Take the principals from the token and send each of them through the identity map to generate the + // list of usernames that this token is valid authentication for. + var acceptedUsernames []username.SQLUsername + for _, tokenPrincipal := range tokenPrincipals { + mappedUsernames, err := authenticator.mapUsername(tokenPrincipal, parsedToken.Issuer(), identMap) + if err != nil { + return errors.WithDetailf( + errors.Newf("JWT authentication: invalid claim value"), + "the value %s for the issuer %s is invalid", tokenPrincipal, parsedToken.Issuer()) + } + acceptedUsernames = append(acceptedUsernames, mappedUsernames...) + } + if len(acceptedUsernames) == 0 { return errors.WithDetailf( - errors.Newf("JWT authentication: invalid subject"), - "the subject %s for the issuer %s is invalid", parsedToken.Subject(), parsedToken.Issuer()) + errors.Newf("JWT authentication: invalid principal"), + "the value %s for the issuer %s is invalid", tokenPrincipals, parsedToken.Issuer()) } - subjectMatch := false - for _, subject := range users { - if subject.Normalized() == user.Normalized() { - subjectMatch = true + principalMatch := false + for _, username := range acceptedUsernames { + if username.Normalized() == user.Normalized() { + principalMatch = true break } } - if !subjectMatch { + if !principalMatch { return errors.WithDetailf( - errors.Newf("JWT authentication: invalid subject"), - "token issued for %s and login was for %s", parsedToken.Subject(), user.Normalized()) + errors.Newf("JWT authentication: invalid principal"), + "token issued for %s and login was for %s", tokenPrincipals, user.Normalized()) } if user.IsRootUser() || user.IsReserved() { return errors.WithDetailf( @@ -216,6 +259,9 @@ var ConfigureJWTAuth = func( JWTAuthJWKS.SetOnChange(&st.SV, func(ctx context.Context) { authenticator.reloadConfig(ambientCtx.AnnotateCtx(ctx), st) }) + JWTAuthClaim.SetOnChange(&st.SV, func(ctx context.Context) { + authenticator.reloadConfig(ambientCtx.AnnotateCtx(ctx), st) + }) return &authenticator } diff --git a/pkg/ccl/jwtauthccl/authentication_jwt_test.go b/pkg/ccl/jwtauthccl/authentication_jwt_test.go index f929edde7b97..0a83c37bc477 100644 --- a/pkg/ccl/jwtauthccl/authentication_jwt_test.go +++ b/pkg/ccl/jwtauthccl/authentication_jwt_test.go @@ -46,6 +46,8 @@ var audience2 = "audience_2" var issuer1 = "issuer1" var issuer2 = "issuer2" +var customClaimName = "groups" + func createJWKS(t *testing.T) (jwk.Set, jwk.Key, jwk.Key) { key1 := createRSAKey(t, keyID1) key2 := createECDSAKey(t, keyID2) @@ -88,12 +90,17 @@ func createJWT( expiredAt time.Time, key jwk.Key, algorithm jwa.SignatureAlgorithm, + customClaimName string, + customClaimValue interface{}, ) []byte { token := jwt.New() require.NoError(t, token.Set(jwt.SubjectKey, subject)) require.NoError(t, token.Set(jwt.AudienceKey, audience)) require.NoError(t, token.Set(jwt.IssuerKey, issuer)) require.NoError(t, token.Set(jwt.ExpirationKey, expiredAt)) + if customClaimName != "" { + require.NoError(t, token.Set(customClaimName, customClaimValue)) + } signedTokenBytes, err := jwt.Sign(token, algorithm, key) require.NoError(t, err) return signedTokenBytes @@ -124,7 +131,7 @@ func TestJWTEnabledCheck(t *testing.T) { verifier := ConfigureJWTAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) key := createRSAKey(t, keyID1) - token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256) + token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") // JWT auth is not enabled. err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(username1), token, identMap) require.ErrorContains(t, err, "JWT authentication: not enabled") @@ -151,7 +158,7 @@ func TestJWTSingleKey(t *testing.T) { verifier := ConfigureJWTAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) _, key, _ := createJWKS(t) - token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256) + token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") publicKey, err := key.PublicKey() require.NoError(t, err) jwkPublicKey := serializePublicKey(t, publicKey) @@ -182,7 +189,7 @@ func TestJWTSingleKeyWithoutKeyAlgorithm(t *testing.T) { verifier := ConfigureJWTAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) _, key, _ := createJWKS(t) - token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256) + token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") // Clear the algorithm. require.NoError(t, key.Remove(jwk.AlgorithmKey)) publicKey, err := key.PublicKey() @@ -214,7 +221,7 @@ func TestJWTMultiKey(t *testing.T) { // Make sure jwt auth is enabled. JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) keySet, key, key2 := createJWKS(t) - token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key2, jwa.ES384) + token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key2, jwa.ES384, "", "") publicKey, err := key.PublicKey() require.NoError(t, err) keySetWithOneKey := jwk.NewSet() @@ -249,7 +256,7 @@ func TestExpiredToken(t *testing.T) { // Make sure jwt auth is enabled and accepts valid signing keys. JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) keySet, key, _ := createJWKS(t) - token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(-1*time.Second), key, jwa.RS256) + token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(-1*time.Second), key, jwa.RS256, "", "") JWTAuthJWKS.Override(ctx, &s.ClusterSettings().SV, serializePublicKeySet(t, keySet)) verifier := ConfigureJWTAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) @@ -271,7 +278,7 @@ func TestKeyIdMismatch(t *testing.T) { keySet, key, _ := createJWKS(t) // Create a JWT with different key id. require.NoError(t, key.Set(jwk.KeyIDKey, invalidKeyID)) - token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256) + token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") // Make sure jwt auth is enabled and accepts valid signing keys. JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) JWTAuthJWKS.Override(ctx, &s.ClusterSettings().SV, serializePublicKeySet(t, keySet)) @@ -283,7 +290,7 @@ func TestKeyIdMismatch(t *testing.T) { // Reset the key id and regenerate the token. require.NoError(t, key.Set(jwk.KeyIDKey, keyID1)) - token = createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256) + token = createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") // Now jwk1 token passes the validity check and fails on the next check. err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(invalidUsername), token, identMap) require.ErrorContains(t, err, "JWT authentication: invalid issuer") @@ -300,8 +307,8 @@ func TestIssuerCheck(t *testing.T) { identMap, err := identmap.From(strings.NewReader(identMapString)) require.NoError(t, err) keySet, key, _ := createJWKS(t) - token1 := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256) - token2 := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key, jwa.RS256) + token1 := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") + token2 := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") // Make sure jwt auth is enabled, accepts jwk1 or jwk2 as valid signing keys, accepts the audience of "test_cluster" // and the issuer of "issuer2". @@ -317,18 +324,18 @@ func TestIssuerCheck(t *testing.T) { // Validation succeeds when the issuer in the token is equal to the cluster's accepted issuers. err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(invalidUsername), token2, identMap) - require.ErrorContains(t, err, "JWT authentication: invalid subject") + require.ErrorContains(t, err, "JWT authentication: invalid principal") // Set the cluster setting to accept issuer values of either "issuer" or "issuer2". JWTAuthIssuers.Override(ctx, &s.ClusterSettings().SV, "[\""+issuer1+"\", \""+issuer2+"\"]") // Validation succeeds when the issuer in the token is an element of the cluster's accepted issuers. err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(invalidUsername), token1, identMap) - require.ErrorContains(t, err, "JWT authentication: invalid subject") + require.ErrorContains(t, err, "JWT authentication: invalid principal") // Validation succeeds when the issuer in the token is an element of the cluster's accepted issuers. err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(invalidUsername), token2, identMap) - require.ErrorContains(t, err, "JWT authentication: invalid subject") + require.ErrorContains(t, err, "JWT authentication: invalid principal") } @@ -343,7 +350,7 @@ func TestSubjectCheck(t *testing.T) { identMap, err := identmap.From(strings.NewReader(identMapString)) require.NoError(t, err) keySet, _, key2 := createJWKS(t) - token := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384) + token := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384, "", "") // Make sure jwt auth is enabled and accepts jwk1 or jwk2 as valid signing keys. JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) @@ -354,7 +361,7 @@ func TestSubjectCheck(t *testing.T) { // Validation fails with a subject error when a user tries to log in with a user named // "invalid" but the token is for the user "test2". err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(invalidUsername), token, identMap) - require.ErrorContains(t, err, "JWT authentication: invalid subject") + require.ErrorContains(t, err, "JWT authentication: invalid principal") // Validation passes the subject check when the username matches the subject and then fails on the next // check (audience field not matching). @@ -362,6 +369,121 @@ func TestSubjectCheck(t *testing.T) { require.ErrorContains(t, err, "JWT authentication: invalid audience") } +func TestClaimMissing(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) + defer s.Stopper().Stop(ctx) + identMapString := "" + identMap, err := identmap.From(strings.NewReader(identMapString)) + require.NoError(t, err) + keySet, _, key2 := createJWKS(t) + missingClaimToken := createJWT(t, invalidUsername, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384, "", "") + + // Make sure jwt auth is enabled and accepts jwk1 or jwk2 as valid signing keys. + JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) + JWTAuthJWKS.Override(ctx, &s.ClusterSettings().SV, serializePublicKeySet(t, keySet)) + verifier := ConfigureJWTAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) + JWTAuthIssuers.Override(ctx, &s.ClusterSettings().SV, issuer2) + JWTAuthClaim.Override(ctx, &s.ClusterSettings().SV, customClaimName) + + // Validation fails with missing claim + err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(invalidUsername), missingClaimToken, identMap) + require.ErrorContains(t, err, "JWT authentication: missing claim") +} + +func TestIntegerClaimValue(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) + defer s.Stopper().Stop(ctx) + // map the value 1 to a valid user + identMapString := issuer2 + " 1 " + username1 + identMap, err := identmap.From(strings.NewReader(identMapString)) + require.NoError(t, err) + keySet, _, key2 := createJWKS(t) + intClaimToken := createJWT(t, invalidUsername, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384, customClaimName, 1) + + // Make sure jwt auth is enabled and accepts jwk1 or jwk2 as valid signing keys. + JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) + JWTAuthJWKS.Override(ctx, &s.ClusterSettings().SV, serializePublicKeySet(t, keySet)) + verifier := ConfigureJWTAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) + JWTAuthIssuers.Override(ctx, &s.ClusterSettings().SV, issuer2) + JWTAuthClaim.Override(ctx, &s.ClusterSettings().SV, customClaimName) + + // the integer claim is implicitly cast to a string + err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(username1), intClaimToken, identMap) + require.ErrorContains(t, err, "JWT authentication: invalid audience") +} + +func TestSingleClaim(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) + defer s.Stopper().Stop(ctx) + identMapString := "" + identMap, err := identmap.From(strings.NewReader(identMapString)) + require.NoError(t, err) + keySet, _, key2 := createJWKS(t) + token := createJWT(t, invalidUsername, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384, customClaimName, username1) + + // Make sure jwt auth is enabled and accepts jwk1 or jwk2 as valid signing keys. + JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) + JWTAuthJWKS.Override(ctx, &s.ClusterSettings().SV, serializePublicKeySet(t, keySet)) + verifier := ConfigureJWTAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) + JWTAuthIssuers.Override(ctx, &s.ClusterSettings().SV, issuer2) + JWTAuthClaim.Override(ctx, &s.ClusterSettings().SV, customClaimName) + + // Validation fails with a subject error when a user tries to log in with a user named + // "invalid" but the token is for the user "test2". + err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(invalidUsername), token, identMap) + require.ErrorContains(t, err, "JWT authentication: invalid principal") + + // Validation passes the subject check when the username matches the subject and then fails on the next + // check (audience field not matching). + err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(username1), token, identMap) + require.ErrorContains(t, err, "JWT authentication: invalid audience") +} + +func TestMultipleClaim(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) + defer s.Stopper().Stop(ctx) + identMapString := "" + identMap, err := identmap.From(strings.NewReader(identMapString)) + require.NoError(t, err) + keySet, _, key2 := createJWKS(t) + token := createJWT(t, invalidUsername, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384, customClaimName, []string{username2, username1}) + + // Make sure jwt auth is enabled and accepts jwk1 or jwk2 as valid signing keys. + JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) + JWTAuthJWKS.Override(ctx, &s.ClusterSettings().SV, serializePublicKeySet(t, keySet)) + verifier := ConfigureJWTAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) + JWTAuthIssuers.Override(ctx, &s.ClusterSettings().SV, issuer2) + JWTAuthClaim.Override(ctx, &s.ClusterSettings().SV, customClaimName) + + // Validation fails with a subject error when a user tries to log in with a user named + // "invalid" but the token is for the user "test2". + err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(invalidUsername), token, identMap) + require.ErrorContains(t, err, "JWT authentication: invalid principal") + + // Validation passes the subject check when the username matches the subject and then fails on the next + // check (audience field not matching). + err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(username1), token, identMap) + require.ErrorContains(t, err, "JWT authentication: invalid audience") + err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(username2), token, identMap) + require.ErrorContains(t, err, "JWT authentication: invalid audience") +} + func TestSubjectMappingCheck(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -374,8 +496,8 @@ func TestSubjectMappingCheck(t *testing.T) { identMap, err := identmap.From(strings.NewReader(identMapString)) require.NoError(t, err) keySet, _, key2 := createJWKS(t) - token := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384) - token2 := createJWT(t, username2, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384) + token := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384, "", "") + token2 := createJWT(t, username2, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384, "", "") // Make sure jwt auth is enabled and accepts jwk1 or jwk2 as valid signing keys. JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) @@ -386,11 +508,11 @@ func TestSubjectMappingCheck(t *testing.T) { // Validation fails with a subject error when a user tries to log in when their user is mapped to username2 // but they try to log in with username1. err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(username1), token, identMap) - require.ErrorContains(t, err, "JWT authentication: invalid subject") + require.ErrorContains(t, err, "JWT authentication: invalid principal") // Validation fails if there is a map for the issuer but no mapping rule matches. err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(username1), token2, identMap) - require.ErrorContains(t, err, "JWT authentication: invalid subject") + require.ErrorContains(t, err, "JWT authentication: invalid principal") // Validation passes the subject check when the username matches the mapped subject. err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(username2), token, identMap) @@ -409,8 +531,8 @@ func TestSubjectReservedUser(t *testing.T) { identMap, err := identmap.From(strings.NewReader(identMapString)) require.NoError(t, err) keySet, _, key2 := createJWKS(t) - token := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384) - token2 := createJWT(t, "root", audience1, issuer1, timeutil.Now().Add(time.Hour), key2, jwa.ES384) + token := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384, "", "") + token2 := createJWT(t, "root", audience1, issuer1, timeutil.Now().Add(time.Hour), key2, jwa.ES384, "", "") // Make sure jwt auth is enabled and accepts jwk1 or jwk2 as valid signing keys. JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) @@ -437,7 +559,7 @@ func TestAudienceCheck(t *testing.T) { identMap, err := identmap.From(strings.NewReader(identMapString)) require.NoError(t, err) keySet, key, _ := createJWKS(t) - token := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key, jwa.RS256) + token := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") // Make sure jwt auth is enabled and accepts jwk1 or jwk2 as valid signing keys. JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) diff --git a/pkg/ccl/jwtauthccl/settings.go b/pkg/ccl/jwtauthccl/settings.go index db7f6408849a..633dbfdafb0c 100644 --- a/pkg/ccl/jwtauthccl/settings.go +++ b/pkg/ccl/jwtauthccl/settings.go @@ -24,8 +24,21 @@ const ( JWTAuthEnabledSettingName = baseJWTAuthSettingName + "enabled" JWTAuthIssuersSettingName = baseJWTAuthSettingName + "issuers" JWTAuthJWKSSettingName = baseJWTAuthSettingName + "jwks" + JWTAuthClaimSettingName = baseJWTAuthSettingName + "claim" ) +// JWTAuthClaim sets the JWT claim that is parsed to get the username. +var JWTAuthClaim = func() *settings.StringSetting { + s := settings.RegisterStringSetting( + settings.TenantWritable, + JWTAuthClaimSettingName, + "sets the JWT claim that is parsed to get the username", + "", + ) + s.SetReportable(true) + return s +}() + // JWTAuthAudience sets accepted audience values for JWT logins over the SQL interface. var JWTAuthAudience = func() *settings.StringSetting { s := settings.RegisterValidatedStringSetting( diff --git a/pkg/ccl/testccl/authccl/auth_test.go b/pkg/ccl/testccl/authccl/auth_test.go index 7b32286fc315..0108c319664b 100644 --- a/pkg/ccl/testccl/authccl/auth_test.go +++ b/pkg/ccl/testccl/authccl/auth_test.go @@ -227,6 +227,11 @@ func jwtRunTest(t *testing.T, insecure bool) { t.Fatalf("wrong number of argumenets to jwt_cluster_setting jwks: %d", len(a.Vals)) } jwtauthccl.JWTAuthJWKS.Override(context.Background(), sv, a.Vals[0]) + case "claim": + if len(a.Vals) != 1 { + t.Fatalf("wrong number of argumenets to jwt_cluster_setting claim: %d", len(a.Vals)) + } + jwtauthccl.JWTAuthClaim.Override(context.Background(), sv, a.Vals[0]) case "ident_map": if len(a.Vals) != 1 { t.Fatalf("wrong number of argumenets to jwt_cluster_setting ident_map: %d", len(a.Vals)) diff --git a/pkg/ccl/testccl/authccl/testdata/jwt b/pkg/ccl/testccl/authccl/testdata/jwt index 3a6d6548fe73..dddde1669744 100644 --- a/pkg/ccl/testccl/authccl/testdata/jwt +++ b/pkg/ccl/testccl/authccl/testdata/jwt @@ -101,8 +101,8 @@ jwt_cluster_setting issuers=issuer2 # see authentication_jwt_test.go for examples of how to generate these tokens. connect user=jwt_user options=--crdb:jwt_auth_enabled=true password=eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3QyIn0.eyJhdWQiOiJ0ZXN0X2NsdXN0ZXIiLCJleHAiOjI2NjEyNjQyNjksImlhdCI6MTY2MTI2NDI2OSwiaXNzIjoiaXNzdWVyMiIsInN1YiI6InRlc3QyIn0.Tot41E-wSz24wo1wj3b8CwEr-O_dqWZoHZkAh2x4nfK2hT4yhfiOcajmKQJVVZX2_897c8uDOqfLzl77JEe-AX4mlEBZXWUNqwwQIdIFZxpL6FEV_YjvTF0bQuu9oeD7kYW-6i3-QQpB6QpCVb-wLW8bBbJ4zCap88nYk14HZH-ZYSzPAP7YEVppHQNhWrxQ66nQU__RuYeQdL6J5Edes9qCHUgqnZCnMPzDZ4l_3Pc5tTSNVcOUl5MMHsvrYsb0VtSFTNCOjJIADXbc2KzVbfqLt-ArUDxs36__u_g84TfGFXoT0VTDbDjYwD7wpyLuT3oLcJuA4m_tto6Rrn7Rww ---- -ERROR: JWT authentication: invalid subject (SQLSTATE 28000) -DETAIL: token issued for test2 and login was for jwt_user +ERROR: JWT authentication: invalid principal (SQLSTATE 28000) +DETAIL: token issued for [test2] and login was for jwt_user jwt_cluster_setting issuers=["issuer1","issuer2"] ---- @@ -110,8 +110,8 @@ jwt_cluster_setting issuers=["issuer1","issuer2"] # see authentication_jwt_test.go for examples of how to generate these tokens. connect user=jwt_user options=--crdb:jwt_auth_enabled=true password=eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3QyIn0.eyJhdWQiOiJ0ZXN0X2NsdXN0ZXIiLCJleHAiOjI2NjEyNjQyNjksImlhdCI6MTY2MTI2NDI2OSwiaXNzIjoiaXNzdWVyMiIsInN1YiI6InRlc3QyIn0.Tot41E-wSz24wo1wj3b8CwEr-O_dqWZoHZkAh2x4nfK2hT4yhfiOcajmKQJVVZX2_897c8uDOqfLzl77JEe-AX4mlEBZXWUNqwwQIdIFZxpL6FEV_YjvTF0bQuu9oeD7kYW-6i3-QQpB6QpCVb-wLW8bBbJ4zCap88nYk14HZH-ZYSzPAP7YEVppHQNhWrxQ66nQU__RuYeQdL6J5Edes9qCHUgqnZCnMPzDZ4l_3Pc5tTSNVcOUl5MMHsvrYsb0VtSFTNCOjJIADXbc2KzVbfqLt-ArUDxs36__u_g84TfGFXoT0VTDbDjYwD7wpyLuT3oLcJuA4m_tto6Rrn7Rww ---- -ERROR: JWT authentication: invalid subject (SQLSTATE 28000) -DETAIL: token issued for test2 and login was for jwt_user +ERROR: JWT authentication: invalid principal (SQLSTATE 28000) +DETAIL: token issued for [test2] and login was for jwt_user subtest end @@ -144,8 +144,8 @@ subtest ident_map_subject_match # try to login with the test user even though the subject of the token is for test2 connect user=test options=--crdb:jwt_auth_enabled=true password=eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3QyIn0.eyJhdWQiOiJ0ZXN0X2NsdXN0ZXIiLCJleHAiOjI2NjEyNjQyNjksImlhdCI6MTY2MTI2NDI2OSwiaXNzIjoiaXNzdWVyMiIsInN1YiI6InRlc3QyIn0.Tot41E-wSz24wo1wj3b8CwEr-O_dqWZoHZkAh2x4nfK2hT4yhfiOcajmKQJVVZX2_897c8uDOqfLzl77JEe-AX4mlEBZXWUNqwwQIdIFZxpL6FEV_YjvTF0bQuu9oeD7kYW-6i3-QQpB6QpCVb-wLW8bBbJ4zCap88nYk14HZH-ZYSzPAP7YEVppHQNhWrxQ66nQU__RuYeQdL6J5Edes9qCHUgqnZCnMPzDZ4l_3Pc5tTSNVcOUl5MMHsvrYsb0VtSFTNCOjJIADXbc2KzVbfqLt-ArUDxs36__u_g84TfGFXoT0VTDbDjYwD7wpyLuT3oLcJuA4m_tto6Rrn7Rww ---- -ERROR: JWT authentication: invalid subject (SQLSTATE 28000) -DETAIL: token issued for test2 and login was for test +ERROR: JWT authentication: invalid principal (SQLSTATE 28000) +DETAIL: token issued for [test2] and login was for test # map the user test2 to test when issued by issuer2 jwt_cluster_setting ident_map=issuer2,test2,test @@ -158,3 +158,53 @@ connect user=test options=--crdb:jwt_auth_enabled=true password=eyJhbGciOiJSUzI1 ok defaultdb subtest end + + +subtest single_custom_claim_login + +jwt_cluster_setting jwks={"alg":"RS256","e":"AQAB","kid":"test_kid1","kty":"RSA","n":"7SIVb_TfkxvwoopYqCBGJyWVUXuMMfP6fdrxtb0WreAICher0VGD9xAF2ZddMNQuVycqHZVkxplN_2-nq8F17POgU4RKJ5V5HLCGhABx0HjRRpLn-akSDTuTUcD3P4cE8XbLjCVCbZTjVncWWpt-UeRV2XHU-17ih5LSZDInzSGlWpp6BUTXiSZ_H7-HjO5cO5Q7j6P1iInETrdAMXWeYbnHXMXNLKyN4uKIymingOohekwYlOCvkA4V2e-u6-FPP5W-51GDroDtWNIVtpSakk1SzWdBjClvdZ3V0nfhw58pvROz8OpnJTVgb9IkZiwRUSbplnCS92gm1wWKz0O-Mw"} +---- + +# see authentication_jwt_test.go for examples of how to generate these tokens. +# try to login with token with usernames in custom claim without claim field set. +connect user=test options=--crdb:jwt_auth_enabled=true password=eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3Rfa2lkMSIsInR5cCI6IkpXVCJ9.eyJhdWQiOlsidGVzdF9jbHVzdGVyIl0sImV4cCI6MTk5OTE3ODQwOSwiZ3JvdXBzIjoidGVzdCIsImlzcyI6Imlzc3VlcjEiLCJzdWIiOiJpbnZhbGlkX3VzZXIifQ.gfqWUshoNkEe2QDxpZBbLCcTbeogtd7vfUd9XLakhcBiqFjPyf3iP-yzCE3nAR90OWQFdtKVp-O19ymJOKOOe2yAcMBFHdwQSKJ5FHgX3M3IMZHcXNIkU0qTp698mJpGD_w_e8RBLN19OwKsAdUY3oj1oIkljBlsrTkhHIFQX1KG9NYqQQG2Py5eJiDtz9aBpqb2hRSBIcyLSWp7VxQ9sPNXOvIWAynDwRJxCIuF69FfbsR9yHdjPQfoc-6wRktllJ7q1ZZfp129OZZxcQWsbl2v1xPOQPkrT_O4ziElanDF_uReoUxBne3AzlEMIPybSkUaQZXrhhqmH3Hl9PswYw +---- +ERROR: JWT authentication: invalid principal (SQLSTATE 28000) +DETAIL: token issued for [invalid_user] and login was for test + +# use the groups claim instead of the subject claim +jwt_cluster_setting claim=groups +---- + +# see authentication_jwt_test.go for examples of how to generate these tokens. +# login with the test_user1 since it is the value of the groups claim in the token +connect user=test options=--crdb:jwt_auth_enabled=true password=eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3Rfa2lkMSIsInR5cCI6IkpXVCJ9.eyJhdWQiOlsidGVzdF9jbHVzdGVyIl0sImV4cCI6MTk5OTE3ODQwOSwiZ3JvdXBzIjoidGVzdCIsImlzcyI6Imlzc3VlcjEiLCJzdWIiOiJpbnZhbGlkX3VzZXIifQ.gfqWUshoNkEe2QDxpZBbLCcTbeogtd7vfUd9XLakhcBiqFjPyf3iP-yzCE3nAR90OWQFdtKVp-O19ymJOKOOe2yAcMBFHdwQSKJ5FHgX3M3IMZHcXNIkU0qTp698mJpGD_w_e8RBLN19OwKsAdUY3oj1oIkljBlsrTkhHIFQX1KG9NYqQQG2Py5eJiDtz9aBpqb2hRSBIcyLSWp7VxQ9sPNXOvIWAynDwRJxCIuF69FfbsR9yHdjPQfoc-6wRktllJ7q1ZZfp129OZZxcQWsbl2v1xPOQPkrT_O4ziElanDF_uReoUxBne3AzlEMIPybSkUaQZXrhhqmH3Hl9PswYw +---- +ok defaultdb + +subtest end + +subtest multiple_custom_claim_login + +# clear the claim value +jwt_cluster_setting claim=sub +---- + +# see authentication_jwt_test.go for examples of how to generate these tokens. +# try to login with token with usernames in custom claim without claim field set. +connect user=test2 options=--crdb:jwt_auth_enabled=true password=eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3Rfa2lkMSIsInR5cCI6IkpXVCJ9.eyJhdWQiOlsidGVzdF9jbHVzdGVyIl0sImV4cCI6MTk5OTE3ODQwOSwiZ3JvdXBzIjpbInRlc3QiLCJ0ZXN0MiJdLCJpc3MiOiJpc3N1ZXIxIiwic3ViIjoiaW52YWxpZF91c2VyIn0.5B2ihElB50zACjjqy0ATxrSxlECmMj-0KvPp0NwoBBvURG16bOnYYksSeN5Izl_-YaP9ZoKOywxgA-sRtw4fX4du6Oo0tDSk3GzkZI6_IQoOxt8eq8To43Y74VSg2P3ts98yyNYXG0n3fTv2qtPjs6ly9p2iSnZBor6Yhy-YIjheT93Ehhl5s2sUL0gTOlpzGnb4N9MDgjphKQinu81DK-w200nOweYF_8ft8aeNiJqqDq1sZuUnCI1KcryuUoqQu5mWh0pO74XYCYHPTLAXwQ2BtKpfj_RJQqPcLW7hy1YcVdWTsL0PPrs6gJ_YKuo99eb0dBl1g-5Kdd5xRIm72g +---- +ERROR: JWT authentication: invalid principal (SQLSTATE 28000) +DETAIL: token issued for [invalid_user] and login was for test2 + +# use the groups claim instead of the subject claim +jwt_cluster_setting claim=groups +---- + +# see authentication_jwt_test.go for examples of how to generate these tokens. +# login with the test_user2 since it is one of the values of the groups claim in the token +connect user=test2 options=--crdb:jwt_auth_enabled=true password=eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3Rfa2lkMSIsInR5cCI6IkpXVCJ9.eyJhdWQiOlsidGVzdF9jbHVzdGVyIl0sImV4cCI6MTk5OTE3ODQwOSwiZ3JvdXBzIjpbInRlc3QiLCJ0ZXN0MiJdLCJpc3MiOiJpc3N1ZXIxIiwic3ViIjoiaW52YWxpZF91c2VyIn0.5B2ihElB50zACjjqy0ATxrSxlECmMj-0KvPp0NwoBBvURG16bOnYYksSeN5Izl_-YaP9ZoKOywxgA-sRtw4fX4du6Oo0tDSk3GzkZI6_IQoOxt8eq8To43Y74VSg2P3ts98yyNYXG0n3fTv2qtPjs6ly9p2iSnZBor6Yhy-YIjheT93Ehhl5s2sUL0gTOlpzGnb4N9MDgjphKQinu81DK-w200nOweYF_8ft8aeNiJqqDq1sZuUnCI1KcryuUoqQu5mWh0pO74XYCYHPTLAXwQ2BtKpfj_RJQqPcLW7hy1YcVdWTsL0PPrs6gJ_YKuo99eb0dBl1g-5Kdd5xRIm72g +---- +ok defaultdb + +subtest end From 64a839b13f3701d096373934440956ea71c158fb Mon Sep 17 00:00:00 2001 From: Celia La Date: Mon, 8 May 2023 18:12:47 -0700 Subject: [PATCH 2/5] codeowners, roachtest: merge SQL schema + sessions to SQL foundations The SQL Schema and Sessions teams are merging to form SQL Foundations. As described in DEVINFHD-867 we want to: - reroute old teams to new sql-foundations team - rename "SQL Schema" project to "SQL Foundations" - deprecate "SQL Sessions" project Release note: None --- .github/CODEOWNERS | 136 +++++++++--------- TEAMS.yaml | 8 +- pkg/cmd/roachtest/registry/owners.go | 5 +- pkg/cmd/roachtest/tests/activerecord.go | 2 +- pkg/cmd/roachtest/tests/alterpk.go | 6 +- pkg/cmd/roachtest/tests/asyncpg.go | 2 +- pkg/cmd/roachtest/tests/awsdms.go | 2 +- pkg/cmd/roachtest/tests/connection_latency.go | 6 +- pkg/cmd/roachtest/tests/django.go | 2 +- pkg/cmd/roachtest/tests/drain.go | 6 +- pkg/cmd/roachtest/tests/flowable.go | 2 +- pkg/cmd/roachtest/tests/gopg.go | 2 +- pkg/cmd/roachtest/tests/gorm.go | 2 +- pkg/cmd/roachtest/tests/hibernate.go | 2 +- pkg/cmd/roachtest/tests/inverted_index.go | 2 +- pkg/cmd/roachtest/tests/jasyncsql.go | 2 +- pkg/cmd/roachtest/tests/knex.go | 2 +- pkg/cmd/roachtest/tests/libpq.go | 2 +- pkg/cmd/roachtest/tests/liquibase.go | 2 +- .../mixed_version_decl_schemachange_compat.go | 2 +- ...atibility_in_declarative_schema_changer.go | 2 +- .../tests/mixed_version_schemachange.go | 2 +- pkg/cmd/roachtest/tests/nodejs_postgres.go | 2 +- pkg/cmd/roachtest/tests/npgsql.go | 2 +- pkg/cmd/roachtest/tests/pgjdbc.go | 2 +- pkg/cmd/roachtest/tests/pgx.go | 2 +- pkg/cmd/roachtest/tests/pop.go | 2 +- pkg/cmd/roachtest/tests/psycopg.go | 2 +- pkg/cmd/roachtest/tests/ruby_pg.go | 2 +- pkg/cmd/roachtest/tests/rust_postgres.go | 2 +- pkg/cmd/roachtest/tests/schemachange.go | 8 +- .../tests/schemachange_random_load.go | 4 +- pkg/cmd/roachtest/tests/secondary_indexes.go | 2 +- pkg/cmd/roachtest/tests/sequelize.go | 2 +- pkg/cmd/roachtest/tests/sqlalchemy.go | 2 +- pkg/cmd/roachtest/tests/tpcc.go | 2 +- pkg/cmd/roachtest/tests/typeorm.go | 2 +- ...ate_system_schema_after_version_upgrade.go | 2 +- 38 files changed, 118 insertions(+), 121 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index af166ca6f594..f8575a3717dd 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -75,36 +75,36 @@ /pkg/sql/show_create*.go @cockroachdb/sql-syntax-prs /pkg/sql/types/ @cockroachdb/sql-syntax-prs -/pkg/sql/crdb_internal.go @cockroachdb/sql-sessions -/pkg/sql/pg_catalog.go @cockroachdb/sql-sessions -/pkg/sql/pgwire/ @cockroachdb/sql-sessions @cockroachdb/server-prs -/pkg/sql/pgwire/auth.go @cockroachdb/sql-sessions @cockroachdb/server-prs @cockroachdb/prodsec -/pkg/sql/sem/builtins/ @cockroachdb/sql-sessions -/pkg/sql/vtable/ @cockroachdb/sql-sessions - -/pkg/sql/sessiondata/ @cockroachdb/sql-sessions -/pkg/sql/tests/rsg_test.go @cockroachdb/sql-sessions -/pkg/sql/ttl @cockroachdb/sql-sessions - -/pkg/ccl/schemachangerccl/ @cockroachdb/sql-schema -/pkg/sql/catalog/ @cockroachdb/sql-schema -/pkg/sql/catalog/multiregion @cockroachdb/sql-schema -/pkg/sql/doctor/ @cockroachdb/sql-schema -/pkg/sql/gcjob/ @cockroachdb/sql-schema -/pkg/sql/gcjob_test/ @cockroachdb/sql-schema -/pkg/sql/privilege/ @cockroachdb/sql-schema -/pkg/sql/schemachange/ @cockroachdb/sql-schema -/pkg/sql/schemachanger/ @cockroachdb/sql-schema -/pkg/sql/alter*.go @cockroachdb/sql-schema -/pkg/sql/backfill*.go @cockroachdb/sql-schema -/pkg/sql/create*.go @cockroachdb/sql-schema -/pkg/sql/database*.go @cockroachdb/sql-schema -/pkg/sql/drop*.go @cockroachdb/sql-schema -/pkg/sql/grant*.go @cockroachdb/sql-schema -/pkg/sql/rename*.go @cockroachdb/sql-schema -/pkg/sql/revoke*.go @cockroachdb/sql-schema -/pkg/sql/schema*.go @cockroachdb/sql-schema -/pkg/sql/zone*.go @cockroachdb/sql-schema +/pkg/sql/crdb_internal.go @cockroachdb/sql-foundations +/pkg/sql/pg_catalog.go @cockroachdb/sql-foundations +/pkg/sql/pgwire/ @cockroachdb/sql-foundations @cockroachdb/server-prs +/pkg/sql/pgwire/auth.go @cockroachdb/sql-foundations @cockroachdb/server-prs @cockroachdb/prodsec +/pkg/sql/sem/builtins/ @cockroachdb/sql-foundations +/pkg/sql/vtable/ @cockroachdb/sql-foundations + +/pkg/sql/sessiondata/ @cockroachdb/sql-foundations +/pkg/sql/tests/rsg_test.go @cockroachdb/sql-foundations +/pkg/sql/ttl @cockroachdb/sql-foundations + +/pkg/ccl/schemachangerccl/ @cockroachdb/sql-foundations +/pkg/sql/catalog/ @cockroachdb/sql-foundations +/pkg/sql/catalog/multiregion @cockroachdb/sql-foundations +/pkg/sql/doctor/ @cockroachdb/sql-foundations +/pkg/sql/gcjob/ @cockroachdb/sql-foundations +/pkg/sql/gcjob_test/ @cockroachdb/sql-foundations +/pkg/sql/privilege/ @cockroachdb/sql-foundations +/pkg/sql/schemachange/ @cockroachdb/sql-foundations +/pkg/sql/schemachanger/ @cockroachdb/sql-foundations +/pkg/sql/alter*.go @cockroachdb/sql-foundations +/pkg/sql/backfill*.go @cockroachdb/sql-foundations +/pkg/sql/create*.go @cockroachdb/sql-foundations +/pkg/sql/database*.go @cockroachdb/sql-foundations +/pkg/sql/drop*.go @cockroachdb/sql-foundations +/pkg/sql/grant*.go @cockroachdb/sql-foundations +/pkg/sql/rename*.go @cockroachdb/sql-foundations +/pkg/sql/revoke*.go @cockroachdb/sql-foundations +/pkg/sql/schema*.go @cockroachdb/sql-foundations +/pkg/sql/zone*.go @cockroachdb/sql-foundations # Beware to not assign the CLI package directory to a single team, at # least until we heavily refactor the package to extract team-specific @@ -116,26 +116,26 @@ /pkg/cli/cli.go @cockroachdb/cli-prs /pkg/cli/cli_debug*.go @cockroachdb/kv-prs @cockroachdb/cli-prs /pkg/cli/cli_test.go @cockroachdb/cli-prs -/pkg/cli/clientflags/ @cockroachdb/sql-sessions @cockroachdb/cli-prs -/pkg/cli/clienturl/ @cockroachdb/sql-sessions @cockroachdb/cli-prs -/pkg/cli/clisqlcfg/ @cockroachdb/sql-sessions @cockroachdb/cli-prs -/pkg/cli/clisqlclient/ @cockroachdb/sql-sessions @cockroachdb/cli-prs -/pkg/cli/clisqlexec/ @cockroachdb/sql-sessions @cockroachdb/cli-prs -/pkg/cli/clisqlshell/ @cockroachdb/sql-sessions @cockroachdb/cli-prs +/pkg/cli/clientflags/ @cockroachdb/sql-foundations @cockroachdb/cli-prs +/pkg/cli/clienturl/ @cockroachdb/sql-foundations @cockroachdb/cli-prs +/pkg/cli/clisqlcfg/ @cockroachdb/sql-foundations @cockroachdb/cli-prs +/pkg/cli/clisqlclient/ @cockroachdb/sql-foundations @cockroachdb/cli-prs +/pkg/cli/clisqlexec/ @cockroachdb/sql-foundations @cockroachdb/cli-prs +/pkg/cli/clisqlshell/ @cockroachdb/sql-foundations @cockroachdb/cli-prs /pkg/cli/connect*.go @cockroachdb/prodsec @cockroachdb/cli-prs /pkg/cli/context.go @cockroachdb/cli-prs -/pkg/cli/convert_url* @cockroachdb/sql-sessions @cockroachdb/cli-prs +/pkg/cli/convert_url* @cockroachdb/sql-foundations @cockroachdb/cli-prs /pkg/cli/debug*.go @cockroachdb/kv-prs @cockroachdb/cli-prs /pkg/cli/debug_job_trace*.go @cockroachdb/jobs-prs @cockroachdb/disaster-recovery /pkg/cli/debug_logconfig.go @cockroachdb/obs-inf-prs @cockroachdb/cli-prs /pkg/cli/debug_merg_logs*.go @cockroachdb/obs-inf-prs @cockroachdb/cli-prs -/pkg/cli/declarative_* @cockroachdb/sql-schema +/pkg/cli/declarative_* @cockroachdb/sql-foundations /pkg/cli/decode*.go @cockroachdb/kv-prs @cockroachdb/cli-prs -/pkg/cli/demo*.go @cockroachdb/sql-sessions @cockroachdb/server-prs @cockroachdb/cli-prs -/pkg/cli/democluster/ @cockroachdb/sql-sessions @cockroachdb/server-prs @cockroachdb/cli-prs -/pkg/cli/doctor*.go @cockroachdb/sql-schema @cockroachdb/cli-prs +/pkg/cli/demo*.go @cockroachdb/sql-foundations @cockroachdb/server-prs @cockroachdb/cli-prs +/pkg/cli/democluster/ @cockroachdb/sql-foundations @cockroachdb/server-prs @cockroachdb/cli-prs +/pkg/cli/doctor*.go @cockroachdb/sql-foundations @cockroachdb/cli-prs /pkg/cli/flags*.go @cockroachdb/cli-prs -/pkg/cli/import*.go @cockroachdb/sql-sessions @cockroachdb/cli-prs +/pkg/cli/import*.go @cockroachdb/sql-foundations @cockroachdb/cli-prs /pkg/cli/inflight_trace_dump/ @cockroachdb/cluster-observability @cockroachdb/cli-prs /pkg/cli/init.go @cockroachdb/kv-prs @cockroachdb/cli-prs /pkg/cli/log*.go @cockroachdb/obs-inf-prs @cockroachdb/cli-prs @@ -145,7 +145,7 @@ /pkg/cli/mt_test_directory.go @cockroachdb/sqlproxy-prs @cockroachdb/server-prs /pkg/cli/nodelocal*.go @cockroachdb/disaster-recovery /pkg/cli/rpc*.go @cockroachdb/kv-prs @cockroachdb/cli-prs -/pkg/cli/sql*.go @cockroachdb/sql-sessions @cockroachdb/cli-prs +/pkg/cli/sql*.go @cockroachdb/sql-foundations @cockroachdb/cli-prs /pkg/cli/start*.go @cockroachdb/server-prs @cockroachdb/cli-prs /pkg/cli/statement*.go @cockroachdb/cluster-observability @cockroachdb/cli-prs /pkg/cli/syncbench/ @cockroachdb/storage @cockroachdb/kv-prs @@ -153,7 +153,7 @@ /pkg/cli/testutils.go @cockroachdb/test-eng /pkg/cli/tsdump.go @cockroachdb/obs-inf-prs /pkg/cli/userfile.go @cockroachdb/disaster-recovery -/pkg/cli/workload* @cockroachdb/sql-sessions +/pkg/cli/workload* @cockroachdb/sql-foundations /pkg/cli/zip*.go @cockroachdb/obs-inf-prs @cockroachdb/cli-prs # Beware to not assign the entire server package directory to a single @@ -190,19 +190,19 @@ /pkg/server/key_vis* @cockroachdb/cluster-observability @cockroachdb/obs-inf-prs /pkg/server/load_endpoint* @cockroachdb/obs-inf-prs @cockroachdb/server-prs /pkg/server/loss_of_quorum*.go @cockroachdb/kv-prs -/pkg/server/migration* @cockroachdb/sql-schema +/pkg/server/migration* @cockroachdb/sql-foundations /pkg/server/multi_store* @cockroachdb/kv-prs @cockroachdb/storage /pkg/server/node* @cockroachdb/kv-prs @cockroachdb/server-prs /pkg/server/node_http*.go @cockroachdb/obs-inf-prs @cockroachdb/server-prs /pkg/server/node_tenant*go @cockroachdb/obs-inf-prs @cockroachdb/multi-tenant @cockroachdb/server-prs -/pkg/server/pgurl/ @cockroachdb/sql-sessions @cockroachdb/cli-prs +/pkg/server/pgurl/ @cockroachdb/sql-foundations @cockroachdb/cli-prs /pkg/server/pagination* @cockroachdb/obs-inf-prs @cockroachdb/server-prs /pkg/server/problem_ranges*.go @cockroachdb/cluster-observability @cockroachdb/obs-inf-prs /pkg/server/profiler/ @cockroachdb/obs-inf-prs @cockroachdb/kv-prs /pkg/server/purge_auth_* @cockroachdb/obs-inf-prs @cockroachdb/server-prs /pkg/server/server_controller_*.go @cockroachdb/multi-tenant @cockroachdb/server-prs /pkg/server/server_controller_http.go @cockroachdb/obs-inf-prs @cockroachdb/server-prs -/pkg/server/server_controller_sql.go @cockroachdb/sql-sessions @cockroachdb/server-prs +/pkg/server/server_controller_sql.go @cockroachdb/sql-foundations @cockroachdb/server-prs /pkg/server/server_http*.go @cockroachdb/obs-inf-prs @cockroachdb/server-prs /pkg/server/server_import_ts*.go @cockroachdb/obs-inf-prs @cockroachdb/kv-prs /pkg/server/server_obs* @cockroachdb/obs-inf-prs @@ -346,10 +346,10 @@ #!/pkg/gen/*.bzl @cockroachdb/dev-inf-noreview /pkg/gen/gen.bzl @cockroachdb/dev-inf -/pkg/acceptance/ @cockroachdb/sql-sessions +/pkg/acceptance/ @cockroachdb/sql-foundations /pkg/base/ @cockroachdb/kv-prs @cockroachdb/server-prs #!/pkg/bench/ @cockroachdb/sql-queries-noreview -/pkg/bench/rttanalysis @cockroachdb/sql-schema +/pkg/bench/rttanalysis @cockroachdb/sql-foundations /pkg/blobs/ @cockroachdb/disaster-recovery /pkg/build/ @cockroachdb/dev-inf #!/pkg/ccl/baseccl/ @cockroachdb/unowned @@ -364,12 +364,12 @@ #!/pkg/ccl/upgradeccl/ @cockroachdb/unowned #!/pkg/ccl/logictestccl/ @cockroachdb/sql-queries-noreview #!/pkg/ccl/sqlitelogictestccl/ @cockroachdb/sql-queries-noreview -/pkg/ccl/multiregionccl/ @cockroachdb/sql-schema +/pkg/ccl/multiregionccl/ @cockroachdb/sql-foundations /pkg/ccl/multitenantccl/ @cockroachdb/multi-tenant /pkg/ccl/multitenant/tenantcostclient/ @cockroachdb/sqlproxy-prs /pkg/ccl/multitenant/tenantcostserver/ @cockroachdb/sqlproxy-prs #!/pkg/ccl/oidcccl/ @cockroachdb/unowned -/pkg/ccl/partitionccl/ @cockroachdb/sql-schema +/pkg/ccl/partitionccl/ @cockroachdb/sql-foundations #!/pkg/ccl/serverccl/ @cockroachdb/unowned /pkg/ccl/serverccl/diagnosticsccl/ @cockroachdb/obs-inf-prs @@ -385,35 +385,35 @@ /pkg/ccl/testccl/authccl/ @cockroachdb/cloud-identity @cockroachdb/prodsec /pkg/ccl/testccl/sqlccl/ @cockroachdb/sql-queries -/pkg/ccl/testccl/workload/schemachange/ @cockroachdb/sql-schema +/pkg/ccl/testccl/workload/schemachange/ @cockroachdb/sql-foundations #!/pkg/ccl/testutilsccl/ @cockroachdb/test-eng-noreview -/pkg/ccl/testutilsccl/alter_* @cockroachdb/sql-schema +/pkg/ccl/testutilsccl/alter_* @cockroachdb/sql-foundations #!/pkg/ccl/utilccl/ @cockroachdb/unowned -/pkg/ccl/workloadccl/ @cockroachdb/test-eng #! @cockroachdb/sql-sessions-noreview -/pkg/ccl/benchccl/rttanalysisccl/ @cockroachdb/sql-schema +/pkg/ccl/workloadccl/ @cockroachdb/test-eng #! @cockroachdb/sql-foundations-noreview +/pkg/ccl/benchccl/rttanalysisccl/ @cockroachdb/sql-foundations #!/pkg/clusterversion/ @cockroachdb/dev-inf-noreview @cockroachdb/kv-prs-noreview /pkg/cmd/allocsim/ @cockroachdb/kv-prs /pkg/cmd/bazci/ @cockroachdb/dev-inf /pkg/cmd/cloudupload/ @cockroachdb/dev-inf /pkg/cmd/cmdutil/ @cockroachdb/dev-inf -/pkg/cmd/cmp-protocol/ @cockroachdb/sql-sessions -/pkg/cmd/cmp-sql/ @cockroachdb/sql-sessions -/pkg/cmd/cmpconn/ @cockroachdb/sql-sessions +/pkg/cmd/cmp-protocol/ @cockroachdb/sql-foundations +/pkg/cmd/cmp-sql/ @cockroachdb/sql-foundations +/pkg/cmd/cmpconn/ @cockroachdb/sql-foundations /pkg/cmd/cockroach/ @cockroachdb/dev-inf @cockroachdb/cli-prs /pkg/cmd/cockroach-oss/ @cockroachdb/dev-inf @cockroachdb/cli-prs /pkg/cmd/cockroach-short/ @cockroachdb/dev-inf @cockroachdb/cli-prs -/pkg/cmd/cockroach-sql/ @cockroachdb/sql-sessions @cockroachdb/cli-prs +/pkg/cmd/cockroach-sql/ @cockroachdb/sql-foundations @cockroachdb/cli-prs /pkg/cmd/compile-build/ @cockroachdb/dev-inf -/pkg/cmd/cr2pg/ @cockroachdb/sql-sessions +/pkg/cmd/cr2pg/ @cockroachdb/sql-foundations /pkg/cmd/dev/ @cockroachdb/dev-inf #!/pkg/cmd/docgen/ @cockroachdb/docs-infra-prs /pkg/cmd/docs-issue-generation/ @cockroachdb/dev-inf /pkg/cmd/fuzz/ @cockroachdb/test-eng -/pkg/cmd/generate-binary/ @cockroachdb/sql-sessions +/pkg/cmd/generate-binary/ @cockroachdb/sql-foundations /pkg/cmd/generate-distdir/ @cockroachdb/dev-inf /pkg/cmd/generate-logictest/ @cockroachdb/dev-inf /pkg/cmd/generate-acceptance-tests/ @cockroachdb/dev-inf -/pkg/cmd/generate-metadata-tables/ @cockroachdb/sql-sessions +/pkg/cmd/generate-metadata-tables/ @cockroachdb/sql-foundations /pkg/cmd/generate-spatial-ref-sys/ @cockroachdb/spatial /pkg/cmd/generate-bazel-extra/ @cockroachdb/dev-inf /pkg/cmd/generate-staticcheck/ @cockroachdb/dev-inf @@ -445,7 +445,7 @@ #!/pkg/cmd/roachtest/tests @cockroachdb/test-eng-noreview /pkg/cmd/roachvet/ @cockroachdb/dev-inf /pkg/cmd/skip-test/ @cockroachdb/test-eng -/pkg/cmd/skiperrs/ @cockroachdb/sql-sessions +/pkg/cmd/skiperrs/ @cockroachdb/sql-foundations /pkg/cmd/skipped-tests/ @cockroachdb/test-eng /pkg/cmd/smith/ @cockroachdb/sql-queries /pkg/cmd/smithcmp/ @cockroachdb/sql-queries @@ -455,11 +455,11 @@ /pkg/cmd/uptodate/ @cockroachdb/dev-inf #!/pkg/cmd/urlcheck/ @cockroachdb/docs-infra-prs /pkg/cmd/whoownsit/ @cockroachdb/test-eng -/pkg/cmd/workload/ @cockroachdb/test-eng #! @cockroachdb/sql-sessions-noreview +/pkg/cmd/workload/ @cockroachdb/test-eng #! @cockroachdb/sql-foundations-noreview #!/pkg/cmd/wraprules/ @cockroachdb/obs-inf-prs-noreview #!/pkg/cmd/zerosum/ @cockroachdb/kv-noreview /pkg/col/ @cockroachdb/sql-queries -/pkg/compose/ @cockroachdb/sql-sessions +/pkg/compose/ @cockroachdb/sql-foundations /pkg/config/ @cockroachdb/kv-prs @cockroachdb/server-prs # TODO(nickvigilante): add the cockroach repo to the docs-infra-prs team so that # Github stops complaining. Then remove the #! prefix here and on the other lines @@ -478,7 +478,7 @@ /pkg/keysbase/ @cockroachdb/kv-prs # Don't ping KV on updates to reserved descriptor IDs and such. #!/pkg/keys/constants.go @cockroachdb/kv-prs-noreview -/pkg/upgrade/ @cockroachdb/sql-schema +/pkg/upgrade/ @cockroachdb/sql-foundations /pkg/keyvisualizer/ @cockroachdb/kv-obs-prs /pkg/multitenant/ @cockroachdb/multi-tenant /pkg/release/ @cockroachdb/dev-inf @@ -501,7 +501,7 @@ /pkg/rpc/auth_tenant.go @cockroachdb/multi-tenant @cockroachdb/prodsec /pkg/scheduledjobs/ @cockroachdb/jobs-prs @cockroachdb/disaster-recovery /pkg/security/ @cockroachdb/prodsec @cockroachdb/server-prs -/pkg/security/clientsecopts/ @cockroachdb/sql-sessions @cockroachdb/prodsec +/pkg/security/clientsecopts/ @cockroachdb/sql-foundations @cockroachdb/prodsec #!/pkg/settings/ @cockroachdb/unowned /pkg/spanconfig/ @cockroachdb/kv-prs /pkg/repstream/ @cockroachdb/disaster-recovery @@ -520,7 +520,7 @@ /pkg/util/admission/ @cockroachdb/admission-control /pkg/util/schedulerlatency/ @cockroachdb/admission-control /pkg/util/tracing @cockroachdb/obs-inf-prs -/pkg/workload/ @cockroachdb/test-eng #! @cockroachdb/sql-sessions-noreview +/pkg/workload/ @cockroachdb/test-eng #! @cockroachdb/sql-foundations-noreview /pkg/obs/ @cockroachdb/obs-inf-prs /pkg/obsservice/ @cockroachdb/obs-inf-prs diff --git a/TEAMS.yaml b/TEAMS.yaml index ce64f43e16fa..966a43cf1061 100644 --- a/TEAMS.yaml +++ b/TEAMS.yaml @@ -21,15 +21,13 @@ cockroachdb/docs: triage_column_id: 3971225 aliases: cockroachdb/docs-infra-prs: other -cockroachdb/sql-sessions: +cockroachdb/sql-foundations: aliases: cockroachdb/sql-syntax-prs: other cockroachdb/sqlproxy-prs: other cockroachdb/sql-api-prs: other - triage_column_id: 7259065 - label: T-sql-sessions -cockroachdb/sql-schema: - triage_column_id: 8946818 + triage_column_id: 19467489 + label: T-sql-foundations cockroachdb/sql-queries: aliases: cockroachdb/sql-optimizer: other diff --git a/pkg/cmd/roachtest/registry/owners.go b/pkg/cmd/roachtest/registry/owners.go index 5fdda3dd2a6c..d1b6c5f166e7 100644 --- a/pkg/cmd/roachtest/registry/owners.go +++ b/pkg/cmd/roachtest/registry/owners.go @@ -16,16 +16,15 @@ type Owner string // The allowable values of Owner. const ( - OwnerSQLSessions Owner = `sql-sessions` - OwnerDisasterRecovery Owner = `disaster-recovery` OwnerCDC Owner = `cdc` + OwnerDisasterRecovery Owner = `disaster-recovery` OwnerKV Owner = `kv` OwnerReplication Owner = `replication` OwnerAdmissionControl Owner = `admission-control` OwnerObsInf Owner = `obs-inf-prs` OwnerServer Owner = `server` // not currently staffed + OwnerSQLFoundations Owner = `sql-foundations` OwnerSQLQueries Owner = `sql-queries` - OwnerSQLSchema Owner = `sql-schema` OwnerStorage Owner = `storage` OwnerTestEng Owner = `test-eng` OwnerDevInf Owner = `dev-inf` diff --git a/pkg/cmd/roachtest/tests/activerecord.go b/pkg/cmd/roachtest/tests/activerecord.go index c89fd1a11301..efce73e086a8 100644 --- a/pkg/cmd/roachtest/tests/activerecord.go +++ b/pkg/cmd/roachtest/tests/activerecord.go @@ -243,7 +243,7 @@ func registerActiveRecord(r registry.Registry) { r.Add(registry.TestSpec{ Name: "activerecord", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Timeout: 5 * time.Hour, Cluster: r.MakeClusterSpec(1), NativeLibs: registry.LibGEOS, diff --git a/pkg/cmd/roachtest/tests/alterpk.go b/pkg/cmd/roachtest/tests/alterpk.go index 309fcc1e2eed..1552361d5e1f 100644 --- a/pkg/cmd/roachtest/tests/alterpk.go +++ b/pkg/cmd/roachtest/tests/alterpk.go @@ -179,7 +179,7 @@ func registerAlterPK(r registry.Registry) { } r.Add(registry.TestSpec{ Name: "alterpk-bank", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, // Use a 4 node cluster -- 3 nodes will run cockroach, and the last will be the // workload driver node. Cluster: r.MakeClusterSpec(4), @@ -187,7 +187,7 @@ func registerAlterPK(r registry.Registry) { }) r.Add(registry.TestSpec{ Name: "alterpk-tpcc-250", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, // Use a 4 node cluster -- 3 nodes will run cockroach, and the last will be the // workload driver node. Cluster: r.MakeClusterSpec(4, spec.CPU(32)), @@ -197,7 +197,7 @@ func registerAlterPK(r registry.Registry) { }) r.Add(registry.TestSpec{ Name: "alterpk-tpcc-500", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, // Use a 4 node cluster -- 3 nodes will run cockroach, and the last will be the // workload driver node. Cluster: r.MakeClusterSpec(4, spec.CPU(16)), diff --git a/pkg/cmd/roachtest/tests/asyncpg.go b/pkg/cmd/roachtest/tests/asyncpg.go index e6543639b24b..93bd22fd7881 100644 --- a/pkg/cmd/roachtest/tests/asyncpg.go +++ b/pkg/cmd/roachtest/tests/asyncpg.go @@ -139,7 +139,7 @@ func registerAsyncpg(r registry.Registry) { r.Add(registry.TestSpec{ Name: "asyncpg", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1, spec.CPU(16)), Tags: registry.Tags(`default`, `orm`), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/cmd/roachtest/tests/awsdms.go b/pkg/cmd/roachtest/tests/awsdms.go index ccc00362498e..2d669193c406 100644 --- a/pkg/cmd/roachtest/tests/awsdms.go +++ b/pkg/cmd/roachtest/tests/awsdms.go @@ -189,7 +189,7 @@ func dmsDescribeTasksInput( func registerAWSDMS(r registry.Registry) { r.Add(registry.TestSpec{ Name: "awsdms", - Owner: registry.OwnerSQLSessions, // TODO(otan): add a migrations OWNERS team + Owner: registry.OwnerSQLFoundations, // TODO(otan): add a migrations OWNERS team Cluster: r.MakeClusterSpec(1), Tags: registry.Tags(`default`, `awsdms`, `aws`), Run: runAWSDMS, diff --git a/pkg/cmd/roachtest/tests/connection_latency.go b/pkg/cmd/roachtest/tests/connection_latency.go index 259edfe87203..fa937c7ff9c9 100644 --- a/pkg/cmd/roachtest/tests/connection_latency.go +++ b/pkg/cmd/roachtest/tests/connection_latency.go @@ -119,7 +119,7 @@ func registerConnectionLatencyTest(r registry.Registry) { numNodes := 3 r.Add(registry.TestSpec{ Name: fmt.Sprintf("connection_latency/nodes=%d/certs", numNodes), - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, // Add one more node for load node. Cluster: r.MakeClusterSpec(numNodes+1, spec.Zones(regionUsCentral)), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { @@ -135,7 +135,7 @@ func registerConnectionLatencyTest(r registry.Registry) { r.Add(registry.TestSpec{ Name: fmt.Sprintf("connection_latency/nodes=%d/multiregion/certs", numMultiRegionNodes), - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(numMultiRegionNodes+loadNodes, spec.Geo(), spec.Zones(geoZonesStr)), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { runConnectionLatencyTest(ctx, t, c, numMultiRegionNodes, numZones, false /*password*/) @@ -144,7 +144,7 @@ func registerConnectionLatencyTest(r registry.Registry) { r.Add(registry.TestSpec{ Name: fmt.Sprintf("connection_latency/nodes=%d/multiregion/password", numMultiRegionNodes), - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(numMultiRegionNodes+loadNodes, spec.Geo(), spec.Zones(geoZonesStr)), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { runConnectionLatencyTest(ctx, t, c, numMultiRegionNodes, numZones, true /*password*/) diff --git a/pkg/cmd/roachtest/tests/django.go b/pkg/cmd/roachtest/tests/django.go index 646c9719044f..7a0fc61284e4 100644 --- a/pkg/cmd/roachtest/tests/django.go +++ b/pkg/cmd/roachtest/tests/django.go @@ -215,7 +215,7 @@ func registerDjango(r registry.Registry) { r.Add(registry.TestSpec{ Name: "django", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1, spec.CPU(16)), Tags: registry.Tags(`default`, `orm`), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/cmd/roachtest/tests/drain.go b/pkg/cmd/roachtest/tests/drain.go index 71df5b6db70a..0bea2e77bc38 100644 --- a/pkg/cmd/roachtest/tests/drain.go +++ b/pkg/cmd/roachtest/tests/drain.go @@ -38,7 +38,7 @@ func registerDrain(r registry.Registry) { { r.Add(registry.TestSpec{ Name: "drain/early-exit-conn-wait", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { runEarlyExitInConnectionWait(ctx, t, c) @@ -47,7 +47,7 @@ func registerDrain(r registry.Registry) { r.Add(registry.TestSpec{ Name: "drain/warn-conn-wait-timeout", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { runWarningForConnWait(ctx, t, c) @@ -56,7 +56,7 @@ func registerDrain(r registry.Registry) { r.Add(registry.TestSpec{ Name: "drain/not-at-quorum", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(3), SkipPostValidations: registry.PostValidationNoDeadNodes, Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/cmd/roachtest/tests/flowable.go b/pkg/cmd/roachtest/tests/flowable.go index 44ac76413ae8..1f53fb797c8c 100644 --- a/pkg/cmd/roachtest/tests/flowable.go +++ b/pkg/cmd/roachtest/tests/flowable.go @@ -104,7 +104,7 @@ func registerFlowable(r registry.Registry) { r.Add(registry.TestSpec{ Name: "flowable", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { runFlowable(ctx, t, c) diff --git a/pkg/cmd/roachtest/tests/gopg.go b/pkg/cmd/roachtest/tests/gopg.go index 3a4dc6d92547..2349467ad7f2 100644 --- a/pkg/cmd/roachtest/tests/gopg.go +++ b/pkg/cmd/roachtest/tests/gopg.go @@ -156,7 +156,7 @@ func registerGopg(r registry.Registry) { r.Add(registry.TestSpec{ Name: "gopg", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Tags: registry.Tags(`default`, `orm`), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/cmd/roachtest/tests/gorm.go b/pkg/cmd/roachtest/tests/gorm.go index a5a402b017d3..3b3a46f46b9f 100644 --- a/pkg/cmd/roachtest/tests/gorm.go +++ b/pkg/cmd/roachtest/tests/gorm.go @@ -134,7 +134,7 @@ func registerGORM(r registry.Registry) { r.Add(registry.TestSpec{ Name: "gorm", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Tags: registry.Tags(`default`, `orm`), Run: runGORM, diff --git a/pkg/cmd/roachtest/tests/hibernate.go b/pkg/cmd/roachtest/tests/hibernate.go index fa583b71cd9f..046224166a5b 100644 --- a/pkg/cmd/roachtest/tests/hibernate.go +++ b/pkg/cmd/roachtest/tests/hibernate.go @@ -244,7 +244,7 @@ func registerHibernate(r registry.Registry, opt hibernateOptions) { r.Add(registry.TestSpec{ Name: opt.testName, - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), NativeLibs: registry.LibGEOS, Tags: registry.Tags(`default`, `orm`), diff --git a/pkg/cmd/roachtest/tests/inverted_index.go b/pkg/cmd/roachtest/tests/inverted_index.go index 49dfdebdb843..f1ebed589511 100644 --- a/pkg/cmd/roachtest/tests/inverted_index.go +++ b/pkg/cmd/roachtest/tests/inverted_index.go @@ -26,7 +26,7 @@ import ( func registerSchemaChangeInvertedIndex(r registry.Registry) { r.Add(registry.TestSpec{ Name: "schemachange/invertedindex", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(5), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { runSchemaChangeInvertedIndex(ctx, t, c) diff --git a/pkg/cmd/roachtest/tests/jasyncsql.go b/pkg/cmd/roachtest/tests/jasyncsql.go index 36954f7c3d35..4e087b59086c 100644 --- a/pkg/cmd/roachtest/tests/jasyncsql.go +++ b/pkg/cmd/roachtest/tests/jasyncsql.go @@ -141,7 +141,7 @@ func registerJasyncSQL(r registry.Registry) { r.Add(registry.TestSpec{ Name: "jasync", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Tags: registry.Tags(`default`, `orm`), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/cmd/roachtest/tests/knex.go b/pkg/cmd/roachtest/tests/knex.go index 4b1f80f4f6c6..1d8630c1a453 100644 --- a/pkg/cmd/roachtest/tests/knex.go +++ b/pkg/cmd/roachtest/tests/knex.go @@ -128,7 +128,7 @@ func registerKnex(r registry.Registry) { r.Add(registry.TestSpec{ Name: "knex", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), NativeLibs: registry.LibGEOS, Tags: registry.Tags(`default`, `orm`), diff --git a/pkg/cmd/roachtest/tests/libpq.go b/pkg/cmd/roachtest/tests/libpq.go index 2693593419d4..2ba548e0aaa7 100644 --- a/pkg/cmd/roachtest/tests/libpq.go +++ b/pkg/cmd/roachtest/tests/libpq.go @@ -136,7 +136,7 @@ func registerLibPQ(r registry.Registry) { r.Add(registry.TestSpec{ Name: "lib/pq", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Tags: registry.Tags(`default`, `driver`), Run: runLibPQ, diff --git a/pkg/cmd/roachtest/tests/liquibase.go b/pkg/cmd/roachtest/tests/liquibase.go index 904e8abf5c98..00b6ba29b601 100644 --- a/pkg/cmd/roachtest/tests/liquibase.go +++ b/pkg/cmd/roachtest/tests/liquibase.go @@ -132,7 +132,7 @@ func registerLiquibase(r registry.Registry) { r.Add(registry.TestSpec{ Name: "liquibase", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Tags: registry.Tags(`default`, `tool`), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/cmd/roachtest/tests/mixed_version_decl_schemachange_compat.go b/pkg/cmd/roachtest/tests/mixed_version_decl_schemachange_compat.go index d06035a7e987..6161bb99d67a 100644 --- a/pkg/cmd/roachtest/tests/mixed_version_decl_schemachange_compat.go +++ b/pkg/cmd/roachtest/tests/mixed_version_decl_schemachange_compat.go @@ -28,7 +28,7 @@ import ( func registerDeclSchemaChangeCompatMixedVersions(r registry.Registry) { r.Add(registry.TestSpec{ Name: "schemachange/mixed-versions-compat", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { if c.IsLocal() && runtime.GOARCH == "arm64" { diff --git a/pkg/cmd/roachtest/tests/mixed_version_job_compatibility_in_declarative_schema_changer.go b/pkg/cmd/roachtest/tests/mixed_version_job_compatibility_in_declarative_schema_changer.go index fbcaa986c847..8eeeac3c93df 100644 --- a/pkg/cmd/roachtest/tests/mixed_version_job_compatibility_in_declarative_schema_changer.go +++ b/pkg/cmd/roachtest/tests/mixed_version_job_compatibility_in_declarative_schema_changer.go @@ -130,7 +130,7 @@ func registerDeclarativeSchemaChangerJobCompatibilityInMixedVersion(r registry.R // supported in the "previous" major release. r.Add(registry.TestSpec{ Name: "declarative_schema_changer/job-compatibility-mixed-version-V222-V231", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(4), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { if c.IsLocal() && runtime.GOARCH == "arm64" { diff --git a/pkg/cmd/roachtest/tests/mixed_version_schemachange.go b/pkg/cmd/roachtest/tests/mixed_version_schemachange.go index fbc0014a4234..fb66c4003e60 100644 --- a/pkg/cmd/roachtest/tests/mixed_version_schemachange.go +++ b/pkg/cmd/roachtest/tests/mixed_version_schemachange.go @@ -25,7 +25,7 @@ import ( func registerSchemaChangeMixedVersions(r registry.Registry) { r.Add(registry.TestSpec{ Name: "schemachange/mixed-versions", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, // This tests the work done for 20.1 that made schema changes jobs and in // addition prevented making any new schema changes on a mixed cluster in // order to prevent bugs during upgrades. diff --git a/pkg/cmd/roachtest/tests/nodejs_postgres.go b/pkg/cmd/roachtest/tests/nodejs_postgres.go index 5dcc8d1307b5..179e2aed1871 100644 --- a/pkg/cmd/roachtest/tests/nodejs_postgres.go +++ b/pkg/cmd/roachtest/tests/nodejs_postgres.go @@ -168,7 +168,7 @@ PGSSLCERT=$HOME/certs/client.%s.crt PGSSLKEY=$HOME/certs/client.%s.key PGSSLROOT r.Add(registry.TestSpec{ Name: "node-postgres", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Tags: registry.Tags(`default`, `driver`), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/cmd/roachtest/tests/npgsql.go b/pkg/cmd/roachtest/tests/npgsql.go index 5efe39304b64..6bfc1497e363 100644 --- a/pkg/cmd/roachtest/tests/npgsql.go +++ b/pkg/cmd/roachtest/tests/npgsql.go @@ -167,7 +167,7 @@ echo '%s' | git apply --ignore-whitespace -`, npgsqlPatch), r.Add(registry.TestSpec{ Name: "npgsql", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Tags: registry.Tags(`default`, `driver`), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/cmd/roachtest/tests/pgjdbc.go b/pkg/cmd/roachtest/tests/pgjdbc.go index 93acc59373da..b6b33daabc69 100644 --- a/pkg/cmd/roachtest/tests/pgjdbc.go +++ b/pkg/cmd/roachtest/tests/pgjdbc.go @@ -211,7 +211,7 @@ func registerPgjdbc(r registry.Registry) { r.Add(registry.TestSpec{ Name: "pgjdbc", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Tags: registry.Tags(`default`, `driver`), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/cmd/roachtest/tests/pgx.go b/pkg/cmd/roachtest/tests/pgx.go index cd7c96e8fa0d..a1e1c6f6cca8 100644 --- a/pkg/cmd/roachtest/tests/pgx.go +++ b/pkg/cmd/roachtest/tests/pgx.go @@ -132,7 +132,7 @@ func registerPgx(r registry.Registry) { r.Add(registry.TestSpec{ Name: "pgx", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Tags: registry.Tags(`default`, `driver`), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/cmd/roachtest/tests/pop.go b/pkg/cmd/roachtest/tests/pop.go index 875cfc802900..762f32a71ea4 100644 --- a/pkg/cmd/roachtest/tests/pop.go +++ b/pkg/cmd/roachtest/tests/pop.go @@ -100,7 +100,7 @@ func registerPop(r registry.Registry) { r.Add(registry.TestSpec{ Name: "pop", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Tags: registry.Tags(`default`, `orm`), Run: runPop, diff --git a/pkg/cmd/roachtest/tests/psycopg.go b/pkg/cmd/roachtest/tests/psycopg.go index ac8c1ac5e2ce..6a24599246a4 100644 --- a/pkg/cmd/roachtest/tests/psycopg.go +++ b/pkg/cmd/roachtest/tests/psycopg.go @@ -141,7 +141,7 @@ func registerPsycopg(r registry.Registry) { r.Add(registry.TestSpec{ Name: "psycopg", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Tags: registry.Tags(`default`, `driver`), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/cmd/roachtest/tests/ruby_pg.go b/pkg/cmd/roachtest/tests/ruby_pg.go index 22f14085e08a..c37298479117 100644 --- a/pkg/cmd/roachtest/tests/ruby_pg.go +++ b/pkg/cmd/roachtest/tests/ruby_pg.go @@ -230,7 +230,7 @@ func registerRubyPG(r registry.Registry) { r.Add(registry.TestSpec{ Name: "ruby-pg", Timeout: 1 * time.Hour, - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), NativeLibs: registry.LibGEOS, Tags: registry.Tags(`default`, `orm`), diff --git a/pkg/cmd/roachtest/tests/rust_postgres.go b/pkg/cmd/roachtest/tests/rust_postgres.go index 83da5fb94730..67ff8f3de2e6 100644 --- a/pkg/cmd/roachtest/tests/rust_postgres.go +++ b/pkg/cmd/roachtest/tests/rust_postgres.go @@ -166,7 +166,7 @@ func registerRustPostgres(r registry.Registry) { r.Add(registry.TestSpec{ Name: "rust-postgres", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1, spec.CPU(16)), Tags: registry.Tags(`default`, `orm`), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/cmd/roachtest/tests/schemachange.go b/pkg/cmd/roachtest/tests/schemachange.go index 6e775c6a836f..9282a48edac9 100644 --- a/pkg/cmd/roachtest/tests/schemachange.go +++ b/pkg/cmd/roachtest/tests/schemachange.go @@ -30,7 +30,7 @@ import ( func registerSchemaChangeDuringKV(r registry.Registry) { r.Add(registry.TestSpec{ Name: `schemachange/during/kv`, - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(5), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { const fixturePath = `gs://cockroach-fixtures/workload/tpch/scalefactor=10/backup?AUTH=implicit` @@ -307,7 +307,7 @@ func makeIndexAddTpccTest( ) registry.TestSpec { return registry.TestSpec{ Name: fmt.Sprintf("schemachange/index/tpcc/w=%d", warehouses), - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: spec, Timeout: length * 3, Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { @@ -339,7 +339,7 @@ func makeSchemaChangeBulkIngestTest( ) registry.TestSpec { return registry.TestSpec{ Name: "schemachange/bulkingest", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(numNodes), Timeout: length * 2, // `fixtures import` (with the workload paths) is not supported in 2.1 @@ -425,7 +425,7 @@ func makeSchemaChangeDuringTPCC( ) registry.TestSpec { return registry.TestSpec{ Name: "schemachange/during/tpcc", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: spec, Timeout: length * 3, Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/cmd/roachtest/tests/schemachange_random_load.go b/pkg/cmd/roachtest/tests/schemachange_random_load.go index 600446dcdc11..5b7c839d6068 100644 --- a/pkg/cmd/roachtest/tests/schemachange_random_load.go +++ b/pkg/cmd/roachtest/tests/schemachange_random_load.go @@ -44,7 +44,7 @@ func registerSchemaChangeRandomLoad(r registry.Registry) { geoZonesStr := strings.Join(geoZones, ",") r.Add(registry.TestSpec{ Name: "schemachange/random-load", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec( 3, spec.Geo(), @@ -89,7 +89,7 @@ func registerRandomLoadBenchSpec(r registry.Registry, b randomLoadBenchSpec) { r.Add(registry.TestSpec{ Name: name, - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(b.Nodes), NativeLibs: registry.LibGEOS, Skip: "https://github.com/cockroachdb/cockroach/issues/56230", diff --git a/pkg/cmd/roachtest/tests/secondary_indexes.go b/pkg/cmd/roachtest/tests/secondary_indexes.go index c43548535bb3..888eee17dffa 100644 --- a/pkg/cmd/roachtest/tests/secondary_indexes.go +++ b/pkg/cmd/roachtest/tests/secondary_indexes.go @@ -137,7 +137,7 @@ func verifyTableData(node int, expected [][]int) versionStep { func registerSecondaryIndexesMultiVersionCluster(r registry.Registry) { r.Add(registry.TestSpec{ Name: "schemachange/secondary-index-multi-version", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(3), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { if c.IsLocal() && runtime.GOARCH == "arm64" { diff --git a/pkg/cmd/roachtest/tests/sequelize.go b/pkg/cmd/roachtest/tests/sequelize.go index e0af5557fe3c..6d56a6e5e674 100644 --- a/pkg/cmd/roachtest/tests/sequelize.go +++ b/pkg/cmd/roachtest/tests/sequelize.go @@ -151,7 +151,7 @@ func registerSequelize(r registry.Registry) { r.Add(registry.TestSpec{ Name: "sequelize", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), NativeLibs: registry.LibGEOS, Tags: registry.Tags(`default`, `orm`), diff --git a/pkg/cmd/roachtest/tests/sqlalchemy.go b/pkg/cmd/roachtest/tests/sqlalchemy.go index ebac69566444..8de1095bf539 100644 --- a/pkg/cmd/roachtest/tests/sqlalchemy.go +++ b/pkg/cmd/roachtest/tests/sqlalchemy.go @@ -37,7 +37,7 @@ var supportedSQLAlchemyTag = "2.0.2" func registerSQLAlchemy(r registry.Registry) { r.Add(registry.TestSpec{ Name: "sqlalchemy", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Tags: registry.Tags(`default`, `orm`), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/cmd/roachtest/tests/tpcc.go b/pkg/cmd/roachtest/tests/tpcc.go index 39b1ed6fafb7..9ea1f43ed1a9 100644 --- a/pkg/cmd/roachtest/tests/tpcc.go +++ b/pkg/cmd/roachtest/tests/tpcc.go @@ -673,7 +673,7 @@ func registerTPCC(r registry.Registry) { tc := multiRegionTests[i] r.Add(registry.TestSpec{ Name: tc.name, - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, // Add an extra node which serves as the workload nodes. Cluster: r.MakeClusterSpec(len(regions)*nodesPerRegion+1, spec.Geo(), spec.Zones(strings.Join(zs, ","))), EncryptionSupport: registry.EncryptionMetamorphic, diff --git a/pkg/cmd/roachtest/tests/typeorm.go b/pkg/cmd/roachtest/tests/typeorm.go index 0e06164626e7..1103dc4c43bc 100644 --- a/pkg/cmd/roachtest/tests/typeorm.go +++ b/pkg/cmd/roachtest/tests/typeorm.go @@ -180,7 +180,7 @@ func registerTypeORM(r registry.Registry) { r.Add(registry.TestSpec{ Name: "typeorm", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Tags: registry.Tags(`default`, `orm`), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/cmd/roachtest/tests/validate_system_schema_after_version_upgrade.go b/pkg/cmd/roachtest/tests/validate_system_schema_after_version_upgrade.go index 4ca669bb1172..63b023d1aa0c 100644 --- a/pkg/cmd/roachtest/tests/validate_system_schema_after_version_upgrade.go +++ b/pkg/cmd/roachtest/tests/validate_system_schema_after_version_upgrade.go @@ -33,7 +33,7 @@ func registerValidateSystemSchemaAfterVersionUpgrade(r registry.Registry) { // and assert that the output matches the expected output content. r.Add(registry.TestSpec{ Name: "systemschema/validate-after-version-upgrade", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { if c.IsLocal() && runtime.GOARCH == "arm64" { From d7d77e310e0651681dacaf251c430d128b9445b8 Mon Sep 17 00:00:00 2001 From: maryliag Date: Fri, 12 May 2023 18:05:33 -0400 Subject: [PATCH 3/5] ui: no need to refresh page after error Previously, if one page crashes on DB Console, all other pages would show the same error message and the user had to force a refresh on the browser to be able to see the other pages. Now only the broken page shows the error and all the other pages load as expected. The user still needs to force a refresh on the broken page if they want to retry. Fixes #97533 Release note (ui change): If a page crashed, a force refresh is no longer required to be able to see the other pages on DB Console. --- .../db-console/src/views/app/containers/layout/index.tsx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/ui/workspaces/db-console/src/views/app/containers/layout/index.tsx b/pkg/ui/workspaces/db-console/src/views/app/containers/layout/index.tsx index dbbd2fded48c..172e6e19545e 100644 --- a/pkg/ui/workspaces/db-console/src/views/app/containers/layout/index.tsx +++ b/pkg/ui/workspaces/db-console/src/views/app/containers/layout/index.tsx @@ -106,7 +106,9 @@ class Layout extends React.Component {
- {this.props.children} + + {this.props.children} +
From db60a0718536b5b251fe1527360f81b7758054e0 Mon Sep 17 00:00:00 2001 From: Rui Hu Date: Mon, 15 May 2023 14:44:21 +0000 Subject: [PATCH 4/5] backupccl: disable restore data processor memory monitoring by default Temporarily disable memory monitoring for the restore data processor due to current logic not handling deletes correctly. Epic: none --- pkg/ccl/backupccl/restore_processor_planning.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/ccl/backupccl/restore_processor_planning.go b/pkg/ccl/backupccl/restore_processor_planning.go index eab36e682ce6..8c0a4a650597 100644 --- a/pkg/ccl/backupccl/restore_processor_planning.go +++ b/pkg/ccl/backupccl/restore_processor_planning.go @@ -28,7 +28,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/physicalplan" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -54,7 +53,7 @@ var memoryMonitorSSTs = settings.RegisterBoolSetting( settings.TenantWritable, "bulkio.restore.memory_monitor_ssts", "if true, restore will limit number of simultaneously open SSTs based on available memory", - util.ConstantWithMetamorphicTestBool("restore-memory-monitor-ssts", true), + false, ) // distRestore plans a 2 stage distSQL flow for a distributed restore. It From 1629e11a6101cabf5feb3364cc8c2b51c8f8791b Mon Sep 17 00:00:00 2001 From: Alex Barganier Date: Thu, 4 May 2023 13:21:47 -0400 Subject: [PATCH 5/5] pkg/util/log: unify explicit flush of network and file sinks Ref: https://github.com/cockroachdb/cockroach/pull/101562 both for files *and* network sinks, such as fluent-servers. I received some feedback that we shouldn't divide these flush operations based on sink type, and instead we should unify the flush operation to flush both (as the crash reporter already does). The existing function to flush just the file sinks is quite widely used. Introducing flushing of network sinks begs the question, "What if this adds to the runtime of code using this explicit flush mechanism?" Well, the existing FlushFileSinks calls fsync() [1] with F_FULLFSYNC [2]. IIUC, this means that it already is a blocking operation as the fsync() call will wait for the buffered data to be written to permanent storage (not just the disk cache). Given that, I think any caller of this function already assumes it's a blocking operation and therefore should be tolerant of that. [1]: https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/fsync.2.html [2]: https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/fcntl.2.html#//apple_ref/doc/man/2/fcntl Nonetheless, we implement a timeout mechanism for the flushing of the buffered network sinks as an added protection. --- pkg/ccl/backupccl/utils_test.go | 2 +- pkg/ccl/changefeedccl/changefeed_test.go | 10 +-- pkg/ccl/changefeedccl/helpers_test.go | 2 +- pkg/ccl/changefeedccl/nemeses_test.go | 2 +- .../telemetryccl/telemetry_logging_test.go | 4 +- pkg/ccl/testccl/authccl/auth_test.go | 4 +- pkg/ccl/testccl/sqlccl/tenant_gc_test.go | 2 +- pkg/cli/connect.go | 2 +- pkg/cli/debug_send_kv_batch_test.go | 2 +- pkg/cli/demo.go | 2 +- pkg/cli/start.go | 4 +- pkg/jobs/jobstest/logutils.go | 2 +- pkg/jobs/registry_external_test.go | 2 +- .../protectedts/ptstorage/storage_test.go | 4 +- pkg/kv/kvserver/replica_test.go | 2 +- pkg/kv/kvserver/replicate_queue_test.go | 2 +- pkg/security/certmgr/cert_manager_test.go | 2 +- pkg/security/certs_rotation_test.go | 2 +- pkg/server/server_test.go | 2 +- pkg/server/status.go | 6 +- pkg/server/status/runtime_stats_test.go | 2 +- .../structlogging/hot_ranges_log_test.go | 2 +- pkg/sql/admin_audit_log_test.go | 10 +-- pkg/sql/event_log_test.go | 4 +- pkg/sql/pgwire/auth_test.go | 4 +- .../captured_index_usage_stats_test.go | 2 +- pkg/sql/telemetry_logging_test.go | 10 +-- .../upgrades/schema_changes_external_test.go | 2 +- pkg/util/hlc/hlc_test.go | 4 +- pkg/util/log/clog_test.go | 2 +- pkg/util/log/doc.go | 6 +- pkg/util/log/file.go | 2 +- pkg/util/log/file_log_gc_test.go | 4 +- pkg/util/log/formats_test.go | 2 +- pkg/util/log/log_flush.go | 77 ++++++++++++------- pkg/util/log/logcrash/crash_reporting.go | 2 +- pkg/util/log/secondary_log_test.go | 4 +- pkg/util/log/test_log_scope.go | 4 +- 38 files changed, 114 insertions(+), 89 deletions(-) diff --git a/pkg/ccl/backupccl/utils_test.go b/pkg/ccl/backupccl/utils_test.go index f513f1dcd621..db9732105a63 100644 --- a/pkg/ccl/backupccl/utils_test.go +++ b/pkg/ccl/backupccl/utils_test.go @@ -663,7 +663,7 @@ func requireRecoveryEvent( expected eventpb.RecoveryEvent, ) { testutils.SucceedsSoon(t, func() error { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( startTime, math.MaxInt64, diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go index 09334572cd25..df8a78c5c095 100644 --- a/pkg/ccl/changefeedccl/changefeed_test.go +++ b/pkg/ccl/changefeedccl/changefeed_test.go @@ -1802,7 +1802,7 @@ func TestChangefeedSchemaChangeNoBackfill(t *testing.T) { cdcTest(t, testFn) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) if err != nil { @@ -2172,7 +2172,7 @@ func TestChangefeedSchemaChangeBackfillCheckpoint(t *testing.T) { cdcTestWithSystem(t, testFn, feedTestEnterpriseSinks) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) if err != nil { @@ -2358,7 +2358,7 @@ func TestChangefeedSchemaChangeAllowBackfill(t *testing.T) { cdcTestWithSystem(t, testFn) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) if err != nil { @@ -2410,7 +2410,7 @@ func TestChangefeedSchemaChangeBackfillScope(t *testing.T) { } cdcTestWithSystem(t, testFn) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) if err != nil { @@ -2517,7 +2517,7 @@ func TestChangefeedAfterSchemaChangeBackfill(t *testing.T) { } cdcTest(t, testFn) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) if err != nil { diff --git a/pkg/ccl/changefeedccl/helpers_test.go b/pkg/ccl/changefeedccl/helpers_test.go index 806e48d235c3..676d84f8ecb8 100644 --- a/pkg/ccl/changefeedccl/helpers_test.go +++ b/pkg/ccl/changefeedccl/helpers_test.go @@ -1041,7 +1041,7 @@ var cmLogRe = regexp.MustCompile(`event_log\.go`) func checkStructuredLogs(t *testing.T, eventType string, startTime int64) []string { var matchingEntries []string testutils.SucceedsSoon(t, func() error { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(startTime, math.MaxInt64, 10000, cmLogRe, log.WithMarkedSensitiveData) if err != nil { diff --git a/pkg/ccl/changefeedccl/nemeses_test.go b/pkg/ccl/changefeedccl/nemeses_test.go index a8700188289d..907bdd169b85 100644 --- a/pkg/ccl/changefeedccl/nemeses_test.go +++ b/pkg/ccl/changefeedccl/nemeses_test.go @@ -47,7 +47,7 @@ func TestChangefeedNemeses(t *testing.T) { // nemeses_test.go:39: pq: unimplemented: operation is // unsupported in multi-tenancy mode cdcTest(t, testFn, feedTestNoTenants) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) if err != nil { diff --git a/pkg/ccl/telemetryccl/telemetry_logging_test.go b/pkg/ccl/telemetryccl/telemetry_logging_test.go index fdac93b9161e..a2c68cc98820 100644 --- a/pkg/ccl/telemetryccl/telemetry_logging_test.go +++ b/pkg/ccl/telemetryccl/telemetry_logging_test.go @@ -107,7 +107,7 @@ func TestTelemetryLogRegions(t *testing.T) { sqlDB.Exec(t, tc.query) } - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, @@ -322,7 +322,7 @@ func TestBulkJobTelemetryLogging(t *testing.T) { execTimestamp++ } - log.FlushFileSinks() + log.Flush() var filteredSampleQueries []logpb.Entry testutils.SucceedsSoon(t, func() error { diff --git a/pkg/ccl/testccl/authccl/auth_test.go b/pkg/ccl/testccl/authccl/auth_test.go index 7b32286fc315..a1b99a1e5827 100644 --- a/pkg/ccl/testccl/authccl/auth_test.go +++ b/pkg/ccl/testccl/authccl/auth_test.go @@ -528,7 +528,7 @@ func TestClientAddrOverride(t *testing.T) { t.Run("check-server-log-uses-override", func(t *testing.T) { // Wait for the disconnection event in logs. testutils.SucceedsSoon(t, func() error { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(testStartTime.UnixNano(), math.MaxInt64, 10000, sessionTerminatedRe, log.WithMarkedSensitiveData) if err != nil { @@ -541,7 +541,7 @@ func TestClientAddrOverride(t *testing.T) { }) // Now we want to check that the logging tags are also updated. - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(testStartTime.UnixNano(), math.MaxInt64, 10000, authLogFileRe, log.WithMarkedSensitiveData) if err != nil { diff --git a/pkg/ccl/testccl/sqlccl/tenant_gc_test.go b/pkg/ccl/testccl/sqlccl/tenant_gc_test.go index 2ea85661f5f5..08c0d4586b4e 100644 --- a/pkg/ccl/testccl/sqlccl/tenant_gc_test.go +++ b/pkg/ccl/testccl/sqlccl/tenant_gc_test.go @@ -505,7 +505,7 @@ func TestGCTenantJobWaitsForProtectedTimestamps(t *testing.T) { checkGCBlockedByPTS := func(t *testing.T, sj *jobs.StartableJob, tenID uint64) { testutils.SucceedsSoon(t, func() error { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile(fmt.Sprintf("GC TTL for dropped tenant %d has expired, but protected timestamp record\\(s\\)", tenID)), log.WithFlattenedSensitiveData) diff --git a/pkg/cli/connect.go b/pkg/cli/connect.go index 9dee8e107f09..b60c5b91296c 100644 --- a/pkg/cli/connect.go +++ b/pkg/cli/connect.go @@ -76,7 +76,7 @@ func runConnectInit(cmd *cobra.Command, args []string) (retErr error) { } // Ensure that log files are populated when the process terminates. - defer log.FlushFileSinks() + defer log.Flush() peers := []string(serverCfg.JoinList) ctx, cancel := context.WithCancel(context.Background()) diff --git a/pkg/cli/debug_send_kv_batch_test.go b/pkg/cli/debug_send_kv_batch_test.go index 07c17b9784bd..9dca99252e9b 100644 --- a/pkg/cli/debug_send_kv_batch_test.go +++ b/pkg/cli/debug_send_kv_batch_test.go @@ -123,7 +123,7 @@ func TestSendKVBatch(t *testing.T) { require.JSONEq(t, jsonResponse, output) // Check that a structured log event was emitted. - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(start.UnixNano(), timeutil.Now().UnixNano(), 1, regexp.MustCompile("debug_send_kv_batch"), log.WithFlattenedSensitiveData) require.NoError(t, err) diff --git a/pkg/cli/demo.go b/pkg/cli/demo.go index 93b2e8c81f88..431487dd250d 100644 --- a/pkg/cli/demo.go +++ b/pkg/cli/demo.go @@ -376,7 +376,7 @@ func runDemoInternal( } // Ensure the last few entries in the log files are flushed at the end. - defer log.FlushFileSinks() + defer log.Flush() return sqlCtx.Run(ctx, conn) } diff --git a/pkg/cli/start.go b/pkg/cli/start.go index bdd97d17a684..684457e612b8 100644 --- a/pkg/cli/start.go +++ b/pkg/cli/start.go @@ -771,7 +771,7 @@ func createAndStartServerAsync( go func() { // Ensure that the log files see the startup messages immediately. - defer log.FlushFileSinks() + defer log.Flush() // If anything goes dramatically wrong, use Go's panic/recover // mechanism to intercept the panic and log the panic details to // the error reporting server. @@ -1524,7 +1524,7 @@ func reportReadinessExternally(ctx context.Context, cmd *cobra.Command, waitForI // Ensure the configuration logging is written to disk in case a // process is waiting for the sdnotify readiness to read important // information from there. - log.FlushFileSinks() + log.Flush() // Signal readiness. This unblocks the process when running with // --background or under systemd. diff --git a/pkg/jobs/jobstest/logutils.go b/pkg/jobs/jobstest/logutils.go index 37102683d20a..7299d0b7e4c4 100644 --- a/pkg/jobs/jobstest/logutils.go +++ b/pkg/jobs/jobstest/logutils.go @@ -36,7 +36,7 @@ func CheckEmittedEvents( ) { // Check that the structured event was logged. testutils.SucceedsSoon(t, func() error { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(startTime, math.MaxInt64, 10000, cmLogRe, log.WithMarkedSensitiveData) if err != nil { diff --git a/pkg/jobs/registry_external_test.go b/pkg/jobs/registry_external_test.go index a600b257502d..8bf561d6a09e 100644 --- a/pkg/jobs/registry_external_test.go +++ b/pkg/jobs/registry_external_test.go @@ -543,7 +543,7 @@ SELECT unnest(execution_errors) t *testing.T, id jobspb.JobID, status jobs.Status, from, to time.Time, cause string, ) { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( from.UnixNano(), to.UnixNano(), 2, regexp.MustCompile(fmt.Sprintf( diff --git a/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go b/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go index f96f5d065492..482f5a1777ba 100644 --- a/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go +++ b/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go @@ -645,7 +645,7 @@ func TestCorruptData(t *testing.T) { require.NoError(t, err) } - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 100, msg, log.WithFlattenedSensitiveData) require.NoError(t, err) @@ -732,7 +732,7 @@ func TestCorruptData(t *testing.T) { require.Nil(t, got) _, err = pts.GetState(ctx) require.NoError(t, err) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 100, msg, log.WithFlattenedSensitiveData) diff --git a/pkg/kv/kvserver/replica_test.go b/pkg/kv/kvserver/replica_test.go index e35c927b1955..8ece4233b256 100644 --- a/pkg/kv/kvserver/replica_test.go +++ b/pkg/kv/kvserver/replica_test.go @@ -13530,7 +13530,7 @@ func TestProposalNotAcknowledgedOrReproposedAfterApplication(t *testing.T) { if _, pErr := tc.repl.Send(ctx, ba); pErr != nil { t.Fatal(pErr) } - log.FlushFileSinks() + log.Flush() stopper.Quiesce(ctx) entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, diff --git a/pkg/kv/kvserver/replicate_queue_test.go b/pkg/kv/kvserver/replicate_queue_test.go index 3ae162fedfc7..ee74a0ffd021 100644 --- a/pkg/kv/kvserver/replicate_queue_test.go +++ b/pkg/kv/kvserver/replicate_queue_test.go @@ -884,7 +884,7 @@ func TestReplicateQueueTracingOnError(t *testing.T) { // Flush logs and get log messages from replicate_queue.go since just // before calling store.Enqueue(..). - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(testStartTs.UnixNano(), math.MaxInt64, 100, regexp.MustCompile(`replicate_queue\.go`), log.WithMarkedSensitiveData) require.NoError(t, err) diff --git a/pkg/security/certmgr/cert_manager_test.go b/pkg/security/certmgr/cert_manager_test.go index bef9ebeb706a..623f2cda66b1 100644 --- a/pkg/security/certmgr/cert_manager_test.go +++ b/pkg/security/certmgr/cert_manager_test.go @@ -57,7 +57,7 @@ var cmLogRe = regexp.MustCompile(`event_log\.go`) // Check that the structured event was logged. func checkLogStructEntry(t *testing.T, expectSuccess bool, beforeReload time.Time) error { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(beforeReload.UnixNano(), math.MaxInt64, 10000, cmLogRe, log.WithMarkedSensitiveData) if err != nil { diff --git a/pkg/security/certs_rotation_test.go b/pkg/security/certs_rotation_test.go index 0f39ee83df39..8979464c055a 100644 --- a/pkg/security/certs_rotation_test.go +++ b/pkg/security/certs_rotation_test.go @@ -201,7 +201,7 @@ func TestRotateCerts(t *testing.T) { // the moment the structured logging event is actually // written to the log file. testutils.SucceedsSoon(t, func() error { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(beforeReload.UnixNano(), math.MaxInt64, 10000, cmLogRe, log.WithMarkedSensitiveData) if err != nil { diff --git a/pkg/server/server_test.go b/pkg/server/server_test.go index acbfee91e78c..9870d9c34616 100644 --- a/pkg/server/server_test.go +++ b/pkg/server/server_test.go @@ -592,7 +592,7 @@ func TestPersistHLCUpperBound(t *testing.T) { var fatal bool defer log.ResetExitFunc() log.SetExitFunc(true /* hideStack */, func(r exit.Code) { - defer log.FlushFileSinks() + defer log.Flush() if r == exit.FatalError() { fatal = true } diff --git a/pkg/server/status.go b/pkg/server/status.go index cde19f000512..551e49bf031d 100644 --- a/pkg/server/status.go +++ b/pkg/server/status.go @@ -1238,7 +1238,7 @@ func (s *statusServer) LogFilesList( } return status.LogFilesList(ctx, req) } - log.FlushFileSinks() + log.Flush() logFiles, err := log.ListLogFiles() if err != nil { return nil, serverError(ctx, err) @@ -1278,7 +1278,7 @@ func (s *statusServer) LogFile( inputEditMode := log.SelectEditMode(req.Redact, log.KeepRedactable) // Ensure that the latest log entries are available in files. - log.FlushFileSinks() + log.Flush() // Read the logs. reader, err := log.GetLogReader(req.File) @@ -1408,7 +1408,7 @@ func (s *statusServer) Logs( } // Ensure that the latest log entries are available in files. - log.FlushFileSinks() + log.Flush() // Read the logs. entries, err := log.FetchEntriesFromFiles( diff --git a/pkg/server/status/runtime_stats_test.go b/pkg/server/status/runtime_stats_test.go index d04e453cf34b..601a36276159 100644 --- a/pkg/server/status/runtime_stats_test.go +++ b/pkg/server/status/runtime_stats_test.go @@ -44,7 +44,7 @@ func TestStructuredEventLogging(t *testing.T) { time.Sleep(10 * time.Second) // Ensure that the entry hits the OS so it can be read back below. - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(testStartTs.UnixNano(), math.MaxInt64, 10000, cmLogRe, log.WithMarkedSensitiveData) diff --git a/pkg/server/structlogging/hot_ranges_log_test.go b/pkg/server/structlogging/hot_ranges_log_test.go index 2cbbea9a4f50..3f1982b2ec67 100644 --- a/pkg/server/structlogging/hot_ranges_log_test.go +++ b/pkg/server/structlogging/hot_ranges_log_test.go @@ -84,7 +84,7 @@ func TestHotRangesStats(t *testing.T) { }) testutils.SucceedsWithin(t, func() error { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, math.MaxInt64, diff --git a/pkg/sql/admin_audit_log_test.go b/pkg/sql/admin_audit_log_test.go index 0efeca3d452f..4b7ee9bae57e 100644 --- a/pkg/sql/admin_audit_log_test.go +++ b/pkg/sql/admin_audit_log_test.go @@ -73,7 +73,7 @@ func TestAdminAuditLogBasic(t *testing.T) { db.Exec(t, `SELECT 1;`) var selectAdminRe = regexp.MustCompile(`"EventType":"admin_query","Statement":"SELECT ‹1›","Tag":"SELECT","User":"root"`) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 10000, selectAdminRe, log.WithMarkedSensitiveData) @@ -124,7 +124,7 @@ func TestAdminAuditLogRegularUser(t *testing.T) { var selectRe = regexp.MustCompile(`SELECT 1`) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 10000, selectRe, log.WithMarkedSensitiveData) @@ -180,7 +180,7 @@ COMMIT; }, } - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, @@ -275,7 +275,7 @@ COMMIT; }, } - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, @@ -319,7 +319,7 @@ COMMIT; t.Fatal(err) } - log.FlushFileSinks() + log.Flush() entries, err = log.FetchEntriesFromFiles( 0, diff --git a/pkg/sql/event_log_test.go b/pkg/sql/event_log_test.go index 8aece89e2f72..ca237786077a 100644 --- a/pkg/sql/event_log_test.go +++ b/pkg/sql/event_log_test.go @@ -82,7 +82,7 @@ func TestStructuredEventLogging(t *testing.T) { } // Ensure that the entries hit the OS so they can be read back below. - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(testStartTs.UnixNano(), math.MaxInt64, 10000, execLogRe, log.WithMarkedSensitiveData) @@ -736,7 +736,7 @@ func TestPerfLogging(t *testing.T) { } var logRe = regexp.MustCompile(tc.logRe) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( start, math.MaxInt64, 1000, logRe, log.WithMarkedSensitiveData, ) diff --git a/pkg/sql/pgwire/auth_test.go b/pkg/sql/pgwire/auth_test.go index f6fa56e9c700..4858105c567c 100644 --- a/pkg/sql/pgwire/auth_test.go +++ b/pkg/sql/pgwire/auth_test.go @@ -734,7 +734,7 @@ func TestClientAddrOverride(t *testing.T) { t.Run("check-server-log-uses-override", func(t *testing.T) { // Wait for the disconnection event in logs. testutils.SucceedsSoon(t, func() error { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(testStartTime.UnixNano(), math.MaxInt64, 10000, sessionTerminatedRe, log.WithMarkedSensitiveData) if err != nil { @@ -747,7 +747,7 @@ func TestClientAddrOverride(t *testing.T) { }) // Now we want to check that the logging tags are also updated. - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(testStartTime.UnixNano(), math.MaxInt64, 10000, authLogFileRe, log.WithMarkedSensitiveData) if err != nil { diff --git a/pkg/sql/scheduledlogging/captured_index_usage_stats_test.go b/pkg/sql/scheduledlogging/captured_index_usage_stats_test.go index c4a0b70d2899..7c452c45f759 100644 --- a/pkg/sql/scheduledlogging/captured_index_usage_stats_test.go +++ b/pkg/sql/scheduledlogging/captured_index_usage_stats_test.go @@ -281,7 +281,7 @@ func checkNumTotalEntriesAndNumIndexEntries( expectedIndividualIndexEntries int, scheduleCompleteChan chan struct{}, ) error { - log.FlushFileSinks() + log.Flush() // Fetch log entries. entries, err := log.FetchEntriesFromFiles( 0, diff --git a/pkg/sql/telemetry_logging_test.go b/pkg/sql/telemetry_logging_test.go index 8c3ca58cf7cf..cad9ebd844aa 100644 --- a/pkg/sql/telemetry_logging_test.go +++ b/pkg/sql/telemetry_logging_test.go @@ -365,7 +365,7 @@ func TestTelemetryLogging(t *testing.T) { } } - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, @@ -672,7 +672,7 @@ func TestNoTelemetryLogOnTroubleshootMode(t *testing.T) { db.Exec(t, tc.query) } - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, @@ -876,7 +876,7 @@ func TestTelemetryLogJoinTypesAndAlgorithms(t *testing.T) { db.Exec(t, tc.query) } - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, @@ -1131,7 +1131,7 @@ func TestTelemetryScanCounts(t *testing.T) { db.Exec(t, tc.query) } - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, @@ -1245,7 +1245,7 @@ $$` db.Exec(t, stmt) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, diff --git a/pkg/upgrade/upgrades/schema_changes_external_test.go b/pkg/upgrade/upgrades/schema_changes_external_test.go index 5f9df3a249f9..a7bd9894c05e 100644 --- a/pkg/upgrade/upgrades/schema_changes_external_test.go +++ b/pkg/upgrade/upgrades/schema_changes_external_test.go @@ -510,7 +510,7 @@ func testMigrationWithFailures( }) if test.waitForMigrationRestart { // Ensure that we have observed the expected number of ignored schema change jobs. - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, math.MaxInt64, 10000, regexp.MustCompile("skipping.*operation as the schema change already exists."), diff --git a/pkg/util/hlc/hlc_test.go b/pkg/util/hlc/hlc_test.go index 11c88e075f33..50cd0e1c0298 100644 --- a/pkg/util/hlc/hlc_test.go +++ b/pkg/util/hlc/hlc_test.go @@ -438,7 +438,7 @@ func TestHLCEnforceWallTimeWithinBoundsInNow(t *testing.T) { var fatal bool defer log.ResetExitFunc() log.SetExitFunc(true /* hideStack */, func(r exit.Code) { - defer log.FlushFileSinks() + defer log.Flush() if r == exit.FatalError() { fatal = true } @@ -487,7 +487,7 @@ func TestHLCEnforceWallTimeWithinBoundsInUpdate(t *testing.T) { var fatal bool defer log.ResetExitFunc() log.SetExitFunc(true /* hideStack */, func(r exit.Code) { - defer log.FlushFileSinks() + defer log.Flush() if r == exit.FatalError() { fatal = true } diff --git a/pkg/util/log/clog_test.go b/pkg/util/log/clog_test.go index b51bcf461003..69b4ef0aaea2 100644 --- a/pkg/util/log/clog_test.go +++ b/pkg/util/log/clog_test.go @@ -651,7 +651,7 @@ func TestFileSeverityFilter(t *testing.T) { Infof(context.Background(), "test1") Errorf(context.Background(), "test2") - FlushFileSinks() + Flush() debugFileSink := debugFileSinkInfo.sink.(*fileSink) contents, err := os.ReadFile(debugFileSink.getFileName(t)) diff --git a/pkg/util/log/doc.go b/pkg/util/log/doc.go index c1fb3f1aa10f..5885a423b641 100644 --- a/pkg/util/log/doc.go +++ b/pkg/util/log/doc.go @@ -83,10 +83,10 @@ // // # Output // -// Log output is buffered and written periodically using FlushFileSinks. -// Programs should call FlushFileSinks before exiting to guarantee all +// Log output is buffered and written periodically using Flush. +// Programs should call Flush before exiting to guarantee all // log output is written to files. Note that buffered network sinks also -// exist. If you'd like to flush these as well, call FlushAllSync. +// exist. If you'd like to flush these as well, call Flush. // // By default, all log statements write to files in a temporary directory. // This package provides several flags that modify this behavior. diff --git a/pkg/util/log/file.go b/pkg/util/log/file.go index 8778e0f82fcd..52f0ed218c64 100644 --- a/pkg/util/log/file.go +++ b/pkg/util/log/file.go @@ -67,7 +67,7 @@ type fileSink struct { // name generator for log files. nameGenerator fileNameGenerator - // bufferedWrites if false calls file.FlushFileSinks on every log + // bufferedWrites if false calls file.Flush on every log // write. This can be set per-logger e.g. for audit logging. // // Note that synchronization for all log files simultaneously can diff --git a/pkg/util/log/file_log_gc_test.go b/pkg/util/log/file_log_gc_test.go index ed6e907bc5b6..813d774a95cc 100644 --- a/pkg/util/log/file_log_gc_test.go +++ b/pkg/util/log/file_log_gc_test.go @@ -158,7 +158,7 @@ func testLogGC(t *testing.T, fileSink *fileSink, logFn func(ctx context.Context, const newLogFiles = 20 for i := 1; i < newLogFiles; i++ { logFn(context.Background(), fmt.Sprint(i)) - FlushFileSinks() + Flush() } if _, err := expectFileCount(newLogFiles); err != nil { t.Fatal(err) @@ -169,7 +169,7 @@ func testLogGC(t *testing.T, fileSink *fileSink, logFn func(ctx context.Context, // Emit a log line which will rotate the files and trigger GC. logFn(context.Background(), "final") - FlushFileSinks() + Flush() succeedsSoon(t, func() error { _, err := expectFileCount(expectedFilesAfterGC) diff --git a/pkg/util/log/formats_test.go b/pkg/util/log/formats_test.go index 510f6033c4db..dc4895602049 100644 --- a/pkg/util/log/formats_test.go +++ b/pkg/util/log/formats_test.go @@ -79,7 +79,7 @@ func TestFormatRedaction(t *testing.T) { defer cleanupFn() Infof(ctx, "safe2 %s", "secret3") - FlushFileSinks() + Flush() contents, err := os.ReadFile(getDebugLogFileName(t)) require.NoError(t, err) diff --git a/pkg/util/log/log_flush.go b/pkg/util/log/log_flush.go index 05054f0e51cc..ff9e3783aaf4 100644 --- a/pkg/util/log/log_flush.go +++ b/pkg/util/log/log_flush.go @@ -17,6 +17,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/util/envutil" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/sysutil" ) @@ -27,42 +28,66 @@ type flushSyncWriter interface { io.Writer } -// FlushFileSinks explicitly flushes all pending log file I/O. -// See also flushDaemon() that manages background (asynchronous) -// flushes, and signalFlusher() that manages flushes in reaction to a -// user signal. -func FlushFileSinks() { - _ = logging.allSinkInfos.iterFileSinks(func(l *fileSink) error { - l.lockAndFlushAndMaybeSync(true /*doSync*/) - return nil - }) -} +// flushActive indicates if a current Flush() is executing. If true, +// additional calls to Flush() will be a noop and return early, until +// the current Flush() call has completed. +var flushActive syncutil.AtomicBool -// FlushAllSync explicitly flushes all asynchronous buffered logging sinks, +// Flush explicitly flushes all asynchronous buffered logging sinks, // including pending log file I/O and buffered network sinks. // // NB: This is a synchronous operation, and will block until all flushes // have completed. Generally only recommended for use in crash reporting // scenarios. -func FlushAllSync() { - FlushFileSinks() +// +// When flushing buffered network logging sinks, each sink is given a +// 5-second timeout before we move on to attempt flushing the next. +func Flush() { + if flushActive.Swap(true) { + return + } + defer flushActive.Swap(false) + + // Flush all file sinks. + _ = logging.allSinkInfos.iterFileSinks(func(l *fileSink) error { + l.lockAndFlushAndMaybeSync(true /*doSync*/) + return nil + }) + + // Flush all buffered network sinks. _ = logging.allSinkInfos.iterBufferedSinks(func(bs *bufferedSink) error { - // Trigger a synchronous flush by calling output on the bufferedSink - // with a `forceSync` option. - err := bs.output([]byte{}, sinkOutputOptions{forceSync: true}) - if err != nil { - // We don't want to let errors to stop us from iterating and flushing - // the remaining buffered log sinks. Nor do we want to log the error - // using the logging system, as it's unlikely to make it to the - // destination sink anyway (there's a good chance we're flushing - // as part of handling a panic). Display the error and continue. - fmt.Printf("Error draining buffered log sink: %v\n", err) + doneCh := make(chan struct{}) + // Set a timer, so we don't prevent the process from exiting if the + // child sink is unavailable & the request hangs. + timer := time.NewTimer(5 * time.Second) + go func() { + // Trigger a synchronous flush by calling output on the bufferedSink + // with a `forceSync` option. + err := bs.output([]byte{}, sinkOutputOptions{forceSync: true}) + if err != nil { + // We don't want to let errors to stop us from iterating and flushing + // the remaining buffered log sinks. Nor do we want to log the error + // using the logging system, as it's unlikely to make it to the + // destination sink anyway (there's a good chance we're flushing + // as part of handling a panic). Display the error. + fmt.Fprintf(OrigStderr, "error draining buffered log sink: %v\n", err) + } + doneCh <- struct{}{} + }() + + select { + case <-doneCh: + case <-timer.C: + fmt.Fprintf(OrigStderr, "timed out draining buffered log sink: %T\n", bs.child) } + // In the event of errors or timeouts, we still want to attempt to flush + // any remaining buffered sinks. Return nil so the iterator can continue. return nil }) } func init() { + flushActive.Set(false) go flushDaemon() go signalFlusher() } @@ -86,7 +111,7 @@ const syncWarnDuration = 10 * time.Second // flushDaemon periodically flushes and syncs the log file buffers. // This manages both the primary and secondary loggers. // -// FlushFileSinks propagates the in-memory buffer inside CockroachDB to the +// Flush propagates the in-memory buffer inside CockroachDB to the // in-memory buffer(s) of the OS. The flush is relatively frequent so // that a human operator can see "up to date" logging data in the log // file. @@ -122,7 +147,7 @@ func signalFlusher() { ch := sysutil.RefreshSignaledChan() for sig := range ch { Ops.Infof(context.Background(), "%s received, flushing logs", sig) - FlushFileSinks() + Flush() } } @@ -134,5 +159,5 @@ func signalFlusher() { func StartAlwaysFlush() { logging.flushWrites.Set(true) // There may be something in the buffers already; flush it. - FlushFileSinks() + Flush() } diff --git a/pkg/util/log/logcrash/crash_reporting.go b/pkg/util/log/logcrash/crash_reporting.go index a22d87a3ead6..e5a74d7e2054 100644 --- a/pkg/util/log/logcrash/crash_reporting.go +++ b/pkg/util/log/logcrash/crash_reporting.go @@ -192,7 +192,7 @@ func ReportPanic(ctx context.Context, sv *settings.Values, r interface{}, depth // Ensure that the logs are flushed before letting a panic // terminate the server. - log.FlushAllSync() + log.Flush() } // PanicAsError turns r into an error if it is not one already. diff --git a/pkg/util/log/secondary_log_test.go b/pkg/util/log/secondary_log_test.go index 1e0770c8f56c..f691712bd851 100644 --- a/pkg/util/log/secondary_log_test.go +++ b/pkg/util/log/secondary_log_test.go @@ -71,7 +71,7 @@ func TestSecondaryLog(t *testing.T) { Infof(context.Background(), "test2") // Make sure the content made it to disk. - FlushFileSinks() + Flush() // Check that the messages indeed made it to different files. @@ -151,7 +151,7 @@ func TestListLogFilesIncludeSecondaryLogs(t *testing.T) { // Emit some logging and ensure the files gets created. ctx := context.Background() Sessions.Infof(ctx, "story time") - FlushFileSinks() + Flush() results, err := ListLogFiles() if err != nil { diff --git a/pkg/util/log/test_log_scope.go b/pkg/util/log/test_log_scope.go index 78e7b2303fcd..de586987c7fd 100644 --- a/pkg/util/log/test_log_scope.go +++ b/pkg/util/log/test_log_scope.go @@ -365,7 +365,7 @@ func (l *TestLogScope) Rotate(t tShim) { t.Helper() t.Logf("-- test log scope file rotation --") // Ensure remaining logs are written. - FlushFileSinks() + Flush() if err := logging.allSinkInfos.iterFileSinks(func(l *fileSink) error { l.mu.Lock() @@ -387,7 +387,7 @@ func (l *TestLogScope) Close(t tShim) { t.Logf("-- test log scope end --") // Ensure any remaining logs are written to files. - FlushFileSinks() + Flush() if l.logDir != "" { defer func() {