diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 1479f7f11477..fd60c4d7d770 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -75,36 +75,36 @@ /pkg/sql/show_create*.go @cockroachdb/sql-syntax-prs /pkg/sql/types/ @cockroachdb/sql-syntax-prs -/pkg/sql/crdb_internal.go @cockroachdb/sql-sessions -/pkg/sql/pg_catalog.go @cockroachdb/sql-sessions -/pkg/sql/pgwire/ @cockroachdb/sql-sessions @cockroachdb/server-prs -/pkg/sql/pgwire/auth.go @cockroachdb/sql-sessions @cockroachdb/server-prs @cockroachdb/prodsec -/pkg/sql/sem/builtins/ @cockroachdb/sql-sessions -/pkg/sql/vtable/ @cockroachdb/sql-sessions - -/pkg/sql/sessiondata/ @cockroachdb/sql-sessions -/pkg/sql/tests/rsg_test.go @cockroachdb/sql-sessions -/pkg/sql/ttl @cockroachdb/sql-sessions - -/pkg/ccl/schemachangerccl/ @cockroachdb/sql-schema -/pkg/sql/catalog/ @cockroachdb/sql-schema -/pkg/sql/catalog/multiregion @cockroachdb/sql-schema -/pkg/sql/doctor/ @cockroachdb/sql-schema -/pkg/sql/gcjob/ @cockroachdb/sql-schema -/pkg/sql/gcjob_test/ @cockroachdb/sql-schema -/pkg/sql/privilege/ @cockroachdb/sql-schema -/pkg/sql/schemachange/ @cockroachdb/sql-schema -/pkg/sql/schemachanger/ @cockroachdb/sql-schema -/pkg/sql/alter*.go @cockroachdb/sql-schema -/pkg/sql/backfill*.go @cockroachdb/sql-schema -/pkg/sql/create*.go @cockroachdb/sql-schema -/pkg/sql/database*.go @cockroachdb/sql-schema -/pkg/sql/drop*.go @cockroachdb/sql-schema -/pkg/sql/grant*.go @cockroachdb/sql-schema -/pkg/sql/rename*.go @cockroachdb/sql-schema -/pkg/sql/revoke*.go @cockroachdb/sql-schema -/pkg/sql/schema*.go @cockroachdb/sql-schema -/pkg/sql/zone*.go @cockroachdb/sql-schema +/pkg/sql/crdb_internal.go @cockroachdb/sql-foundations +/pkg/sql/pg_catalog.go @cockroachdb/sql-foundations +/pkg/sql/pgwire/ @cockroachdb/sql-foundations @cockroachdb/server-prs +/pkg/sql/pgwire/auth.go @cockroachdb/sql-foundations @cockroachdb/server-prs @cockroachdb/prodsec +/pkg/sql/sem/builtins/ @cockroachdb/sql-foundations +/pkg/sql/vtable/ @cockroachdb/sql-foundations + +/pkg/sql/sessiondata/ @cockroachdb/sql-foundations +/pkg/sql/tests/rsg_test.go @cockroachdb/sql-foundations +/pkg/sql/ttl @cockroachdb/sql-foundations + +/pkg/ccl/schemachangerccl/ @cockroachdb/sql-foundations +/pkg/sql/catalog/ @cockroachdb/sql-foundations +/pkg/sql/catalog/multiregion @cockroachdb/sql-foundations +/pkg/sql/doctor/ @cockroachdb/sql-foundations +/pkg/sql/gcjob/ @cockroachdb/sql-foundations +/pkg/sql/gcjob_test/ @cockroachdb/sql-foundations +/pkg/sql/privilege/ @cockroachdb/sql-foundations +/pkg/sql/schemachange/ @cockroachdb/sql-foundations +/pkg/sql/schemachanger/ @cockroachdb/sql-foundations +/pkg/sql/alter*.go @cockroachdb/sql-foundations +/pkg/sql/backfill*.go @cockroachdb/sql-foundations +/pkg/sql/create*.go @cockroachdb/sql-foundations +/pkg/sql/database*.go @cockroachdb/sql-foundations +/pkg/sql/drop*.go @cockroachdb/sql-foundations +/pkg/sql/grant*.go @cockroachdb/sql-foundations +/pkg/sql/rename*.go @cockroachdb/sql-foundations +/pkg/sql/revoke*.go @cockroachdb/sql-foundations +/pkg/sql/schema*.go @cockroachdb/sql-foundations +/pkg/sql/zone*.go @cockroachdb/sql-foundations # Beware to not assign the CLI package directory to a single team, at # least until we heavily refactor the package to extract team-specific @@ -116,26 +116,26 @@ /pkg/cli/cli.go @cockroachdb/cli-prs /pkg/cli/cli_debug*.go @cockroachdb/kv-prs @cockroachdb/cli-prs /pkg/cli/cli_test.go @cockroachdb/cli-prs -/pkg/cli/clientflags/ @cockroachdb/sql-sessions @cockroachdb/cli-prs -/pkg/cli/clienturl/ @cockroachdb/sql-sessions @cockroachdb/cli-prs -/pkg/cli/clisqlcfg/ @cockroachdb/sql-sessions @cockroachdb/cli-prs -/pkg/cli/clisqlclient/ @cockroachdb/sql-sessions @cockroachdb/cli-prs -/pkg/cli/clisqlexec/ @cockroachdb/sql-sessions @cockroachdb/cli-prs -/pkg/cli/clisqlshell/ @cockroachdb/sql-sessions @cockroachdb/cli-prs +/pkg/cli/clientflags/ @cockroachdb/sql-foundations @cockroachdb/cli-prs +/pkg/cli/clienturl/ @cockroachdb/sql-foundations @cockroachdb/cli-prs +/pkg/cli/clisqlcfg/ @cockroachdb/sql-foundations @cockroachdb/cli-prs +/pkg/cli/clisqlclient/ @cockroachdb/sql-foundations @cockroachdb/cli-prs +/pkg/cli/clisqlexec/ @cockroachdb/sql-foundations @cockroachdb/cli-prs +/pkg/cli/clisqlshell/ @cockroachdb/sql-foundations @cockroachdb/cli-prs /pkg/cli/connect*.go @cockroachdb/prodsec @cockroachdb/cli-prs /pkg/cli/context.go @cockroachdb/cli-prs -/pkg/cli/convert_url* @cockroachdb/sql-sessions @cockroachdb/cli-prs +/pkg/cli/convert_url* @cockroachdb/sql-foundations @cockroachdb/cli-prs /pkg/cli/debug*.go @cockroachdb/kv-prs @cockroachdb/cli-prs /pkg/cli/debug_job_trace*.go @cockroachdb/jobs-prs @cockroachdb/disaster-recovery /pkg/cli/debug_logconfig.go @cockroachdb/obs-inf-prs @cockroachdb/cli-prs /pkg/cli/debug_merg_logs*.go @cockroachdb/obs-inf-prs @cockroachdb/cli-prs -/pkg/cli/declarative_* @cockroachdb/sql-schema +/pkg/cli/declarative_* @cockroachdb/sql-foundations /pkg/cli/decode*.go @cockroachdb/kv-prs @cockroachdb/cli-prs -/pkg/cli/demo*.go @cockroachdb/sql-sessions @cockroachdb/server-prs @cockroachdb/cli-prs -/pkg/cli/democluster/ @cockroachdb/sql-sessions @cockroachdb/server-prs @cockroachdb/cli-prs -/pkg/cli/doctor*.go @cockroachdb/sql-schema @cockroachdb/cli-prs +/pkg/cli/demo*.go @cockroachdb/sql-foundations @cockroachdb/server-prs @cockroachdb/cli-prs +/pkg/cli/democluster/ @cockroachdb/sql-foundations @cockroachdb/server-prs @cockroachdb/cli-prs +/pkg/cli/doctor*.go @cockroachdb/sql-foundations @cockroachdb/cli-prs /pkg/cli/flags*.go @cockroachdb/cli-prs -/pkg/cli/import*.go @cockroachdb/sql-sessions @cockroachdb/cli-prs +/pkg/cli/import*.go @cockroachdb/sql-foundations @cockroachdb/cli-prs /pkg/cli/inflight_trace_dump/ @cockroachdb/cluster-observability @cockroachdb/cli-prs /pkg/cli/init.go @cockroachdb/kv-prs @cockroachdb/cli-prs /pkg/cli/log*.go @cockroachdb/obs-inf-prs @cockroachdb/cli-prs @@ -145,7 +145,7 @@ /pkg/cli/mt_test_directory.go @cockroachdb/sqlproxy-prs @cockroachdb/server-prs /pkg/cli/nodelocal*.go @cockroachdb/disaster-recovery /pkg/cli/rpc*.go @cockroachdb/kv-prs @cockroachdb/cli-prs -/pkg/cli/sql*.go @cockroachdb/sql-sessions @cockroachdb/cli-prs +/pkg/cli/sql*.go @cockroachdb/sql-foundations @cockroachdb/cli-prs /pkg/cli/start*.go @cockroachdb/server-prs @cockroachdb/cli-prs /pkg/cli/statement*.go @cockroachdb/cluster-observability @cockroachdb/cli-prs /pkg/cli/syncbench/ @cockroachdb/storage @cockroachdb/kv-prs @@ -153,7 +153,7 @@ /pkg/cli/testutils.go @cockroachdb/test-eng /pkg/cli/tsdump.go @cockroachdb/obs-inf-prs /pkg/cli/userfile.go @cockroachdb/disaster-recovery -/pkg/cli/workload* @cockroachdb/sql-sessions +/pkg/cli/workload* @cockroachdb/sql-foundations /pkg/cli/zip*.go @cockroachdb/obs-inf-prs @cockroachdb/cli-prs # Beware to not assign the entire server package directory to a single @@ -190,19 +190,19 @@ /pkg/server/key_vis* @cockroachdb/cluster-observability @cockroachdb/obs-inf-prs /pkg/server/load_endpoint* @cockroachdb/obs-inf-prs @cockroachdb/server-prs /pkg/server/loss_of_quorum*.go @cockroachdb/kv-prs -/pkg/server/migration* @cockroachdb/sql-schema +/pkg/server/migration* @cockroachdb/sql-foundations /pkg/server/multi_store* @cockroachdb/kv-prs @cockroachdb/storage /pkg/server/node* @cockroachdb/kv-prs @cockroachdb/server-prs /pkg/server/node_http*.go @cockroachdb/obs-inf-prs @cockroachdb/server-prs /pkg/server/node_tenant*go @cockroachdb/obs-inf-prs @cockroachdb/multi-tenant @cockroachdb/server-prs -/pkg/server/pgurl/ @cockroachdb/sql-sessions @cockroachdb/cli-prs +/pkg/server/pgurl/ @cockroachdb/sql-foundations @cockroachdb/cli-prs /pkg/server/pagination* @cockroachdb/obs-inf-prs @cockroachdb/server-prs /pkg/server/problem_ranges*.go @cockroachdb/cluster-observability @cockroachdb/obs-inf-prs /pkg/server/profiler/ @cockroachdb/obs-inf-prs @cockroachdb/kv-prs /pkg/server/purge_auth_* @cockroachdb/obs-inf-prs @cockroachdb/server-prs /pkg/server/server_controller_*.go @cockroachdb/multi-tenant @cockroachdb/server-prs /pkg/server/server_controller_http.go @cockroachdb/obs-inf-prs @cockroachdb/server-prs -/pkg/server/server_controller_sql.go @cockroachdb/sql-sessions @cockroachdb/server-prs +/pkg/server/server_controller_sql.go @cockroachdb/sql-foundations @cockroachdb/server-prs /pkg/server/server_http*.go @cockroachdb/obs-inf-prs @cockroachdb/server-prs /pkg/server/server_import_ts*.go @cockroachdb/obs-inf-prs @cockroachdb/kv-prs /pkg/server/server_obs* @cockroachdb/obs-inf-prs @@ -346,10 +346,10 @@ #!/pkg/gen/*.bzl @cockroachdb/dev-inf-noreview /pkg/gen/gen.bzl @cockroachdb/dev-inf -/pkg/acceptance/ @cockroachdb/sql-sessions +/pkg/acceptance/ @cockroachdb/sql-foundations /pkg/base/ @cockroachdb/kv-prs @cockroachdb/server-prs #!/pkg/bench/ @cockroachdb/sql-queries-noreview -/pkg/bench/rttanalysis @cockroachdb/sql-schema +/pkg/bench/rttanalysis @cockroachdb/sql-foundations /pkg/blobs/ @cockroachdb/disaster-recovery /pkg/build/ @cockroachdb/dev-inf #!/pkg/ccl/baseccl/ @cockroachdb/unowned @@ -364,12 +364,12 @@ #!/pkg/ccl/upgradeccl/ @cockroachdb/unowned #!/pkg/ccl/logictestccl/ @cockroachdb/sql-queries-noreview #!/pkg/ccl/sqlitelogictestccl/ @cockroachdb/sql-queries-noreview -/pkg/ccl/multiregionccl/ @cockroachdb/sql-schema +/pkg/ccl/multiregionccl/ @cockroachdb/sql-foundations /pkg/ccl/multitenantccl/ @cockroachdb/multi-tenant /pkg/ccl/multitenant/tenantcostclient/ @cockroachdb/sqlproxy-prs /pkg/ccl/multitenant/tenantcostserver/ @cockroachdb/sqlproxy-prs #!/pkg/ccl/oidcccl/ @cockroachdb/unowned -/pkg/ccl/partitionccl/ @cockroachdb/sql-schema +/pkg/ccl/partitionccl/ @cockroachdb/sql-foundations #!/pkg/ccl/serverccl/ @cockroachdb/unowned /pkg/ccl/serverccl/diagnosticsccl/ @cockroachdb/obs-inf-prs @@ -385,35 +385,35 @@ /pkg/ccl/testccl/authccl/ @cockroachdb/cloud-identity @cockroachdb/prodsec /pkg/ccl/testccl/sqlccl/ @cockroachdb/sql-queries -/pkg/ccl/testccl/workload/schemachange/ @cockroachdb/sql-schema +/pkg/ccl/testccl/workload/schemachange/ @cockroachdb/sql-foundations #!/pkg/ccl/testutilsccl/ @cockroachdb/test-eng-noreview -/pkg/ccl/testutilsccl/alter_* @cockroachdb/sql-schema +/pkg/ccl/testutilsccl/alter_* @cockroachdb/sql-foundations #!/pkg/ccl/utilccl/ @cockroachdb/unowned -/pkg/ccl/workloadccl/ @cockroachdb/test-eng #! @cockroachdb/sql-sessions-noreview -/pkg/ccl/benchccl/rttanalysisccl/ @cockroachdb/sql-schema +/pkg/ccl/workloadccl/ @cockroachdb/test-eng #! @cockroachdb/sql-foundations-noreview +/pkg/ccl/benchccl/rttanalysisccl/ @cockroachdb/sql-foundations #!/pkg/clusterversion/ @cockroachdb/dev-inf-noreview @cockroachdb/kv-prs-noreview /pkg/cmd/allocsim/ @cockroachdb/kv-prs /pkg/cmd/bazci/ @cockroachdb/dev-inf /pkg/cmd/cloudupload/ @cockroachdb/dev-inf /pkg/cmd/cmdutil/ @cockroachdb/dev-inf -/pkg/cmd/cmp-protocol/ @cockroachdb/sql-sessions -/pkg/cmd/cmp-sql/ @cockroachdb/sql-sessions -/pkg/cmd/cmpconn/ @cockroachdb/sql-sessions +/pkg/cmd/cmp-protocol/ @cockroachdb/sql-foundations +/pkg/cmd/cmp-sql/ @cockroachdb/sql-foundations +/pkg/cmd/cmpconn/ @cockroachdb/sql-foundations /pkg/cmd/cockroach/ @cockroachdb/dev-inf @cockroachdb/cli-prs /pkg/cmd/cockroach-oss/ @cockroachdb/dev-inf @cockroachdb/cli-prs /pkg/cmd/cockroach-short/ @cockroachdb/dev-inf @cockroachdb/cli-prs -/pkg/cmd/cockroach-sql/ @cockroachdb/sql-sessions @cockroachdb/cli-prs +/pkg/cmd/cockroach-sql/ @cockroachdb/sql-foundations @cockroachdb/cli-prs /pkg/cmd/compile-build/ @cockroachdb/dev-inf -/pkg/cmd/cr2pg/ @cockroachdb/sql-sessions +/pkg/cmd/cr2pg/ @cockroachdb/sql-foundations /pkg/cmd/dev/ @cockroachdb/dev-inf #!/pkg/cmd/docgen/ @cockroachdb/docs-infra-prs /pkg/cmd/docs-issue-generation/ @cockroachdb/dev-inf /pkg/cmd/fuzz/ @cockroachdb/test-eng -/pkg/cmd/generate-binary/ @cockroachdb/sql-sessions +/pkg/cmd/generate-binary/ @cockroachdb/sql-foundations /pkg/cmd/generate-distdir/ @cockroachdb/dev-inf /pkg/cmd/generate-logictest/ @cockroachdb/dev-inf /pkg/cmd/generate-acceptance-tests/ @cockroachdb/dev-inf -/pkg/cmd/generate-metadata-tables/ @cockroachdb/sql-sessions +/pkg/cmd/generate-metadata-tables/ @cockroachdb/sql-foundations /pkg/cmd/generate-spatial-ref-sys/ @cockroachdb/spatial /pkg/cmd/generate-bazel-extra/ @cockroachdb/dev-inf /pkg/cmd/generate-staticcheck/ @cockroachdb/dev-inf @@ -445,7 +445,7 @@ #!/pkg/cmd/roachtest/tests @cockroachdb/test-eng-noreview /pkg/cmd/roachvet/ @cockroachdb/dev-inf /pkg/cmd/skip-test/ @cockroachdb/test-eng -/pkg/cmd/skiperrs/ @cockroachdb/sql-sessions +/pkg/cmd/skiperrs/ @cockroachdb/sql-foundations /pkg/cmd/skipped-tests/ @cockroachdb/test-eng /pkg/cmd/smith/ @cockroachdb/sql-queries /pkg/cmd/smithcmp/ @cockroachdb/sql-queries @@ -455,11 +455,11 @@ /pkg/cmd/uptodate/ @cockroachdb/dev-inf #!/pkg/cmd/urlcheck/ @cockroachdb/docs-infra-prs /pkg/cmd/whoownsit/ @cockroachdb/test-eng -/pkg/cmd/workload/ @cockroachdb/test-eng #! @cockroachdb/sql-sessions-noreview +/pkg/cmd/workload/ @cockroachdb/test-eng #! @cockroachdb/sql-foundations-noreview #!/pkg/cmd/wraprules/ @cockroachdb/obs-inf-prs-noreview #!/pkg/cmd/zerosum/ @cockroachdb/kv-noreview /pkg/col/ @cockroachdb/sql-queries -/pkg/compose/ @cockroachdb/sql-sessions +/pkg/compose/ @cockroachdb/sql-foundations /pkg/config/ @cockroachdb/kv-prs @cockroachdb/server-prs # TODO(nickvigilante): add the cockroach repo to the docs-infra-prs team so that # Github stops complaining. Then remove the #! prefix here and on the other lines @@ -478,7 +478,7 @@ /pkg/keysbase/ @cockroachdb/kv-prs # Don't ping KV on updates to reserved descriptor IDs and such. #!/pkg/keys/constants.go @cockroachdb/kv-prs-noreview -/pkg/upgrade/ @cockroachdb/sql-schema +/pkg/upgrade/ @cockroachdb/sql-foundations /pkg/keyvisualizer/ @cockroachdb/kv-obs-prs /pkg/multitenant/ @cockroachdb/multi-tenant /pkg/release/ @cockroachdb/dev-inf @@ -501,7 +501,7 @@ /pkg/rpc/auth_tenant.go @cockroachdb/multi-tenant @cockroachdb/prodsec /pkg/scheduledjobs/ @cockroachdb/jobs-prs @cockroachdb/disaster-recovery /pkg/security/ @cockroachdb/prodsec @cockroachdb/server-prs -/pkg/security/clientsecopts/ @cockroachdb/sql-sessions @cockroachdb/prodsec +/pkg/security/clientsecopts/ @cockroachdb/sql-foundations @cockroachdb/prodsec #!/pkg/settings/ @cockroachdb/unowned /pkg/spanconfig/ @cockroachdb/kv-prs /pkg/repstream/ @cockroachdb/disaster-recovery @@ -520,7 +520,7 @@ /pkg/util/admission/ @cockroachdb/admission-control /pkg/util/schedulerlatency/ @cockroachdb/admission-control /pkg/util/tracing @cockroachdb/obs-inf-prs -/pkg/workload/ @cockroachdb/test-eng #! @cockroachdb/sql-sessions-noreview +/pkg/workload/ @cockroachdb/test-eng #! @cockroachdb/sql-foundations-noreview /pkg/obs/ @cockroachdb/obs-inf-prs /pkg/obsservice/ @cockroachdb/obs-inf-prs diff --git a/TEAMS.yaml b/TEAMS.yaml index ce64f43e16fa..966a43cf1061 100644 --- a/TEAMS.yaml +++ b/TEAMS.yaml @@ -21,15 +21,13 @@ cockroachdb/docs: triage_column_id: 3971225 aliases: cockroachdb/docs-infra-prs: other -cockroachdb/sql-sessions: +cockroachdb/sql-foundations: aliases: cockroachdb/sql-syntax-prs: other cockroachdb/sqlproxy-prs: other cockroachdb/sql-api-prs: other - triage_column_id: 7259065 - label: T-sql-sessions -cockroachdb/sql-schema: - triage_column_id: 8946818 + triage_column_id: 19467489 + label: T-sql-foundations cockroachdb/sql-queries: aliases: cockroachdb/sql-optimizer: other diff --git a/pkg/ccl/backupccl/restore_processor_planning.go b/pkg/ccl/backupccl/restore_processor_planning.go index eab36e682ce6..8c0a4a650597 100644 --- a/pkg/ccl/backupccl/restore_processor_planning.go +++ b/pkg/ccl/backupccl/restore_processor_planning.go @@ -28,7 +28,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/physicalplan" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" - "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -54,7 +53,7 @@ var memoryMonitorSSTs = settings.RegisterBoolSetting( settings.TenantWritable, "bulkio.restore.memory_monitor_ssts", "if true, restore will limit number of simultaneously open SSTs based on available memory", - util.ConstantWithMetamorphicTestBool("restore-memory-monitor-ssts", true), + false, ) // distRestore plans a 2 stage distSQL flow for a distributed restore. It diff --git a/pkg/ccl/backupccl/utils_test.go b/pkg/ccl/backupccl/utils_test.go index f513f1dcd621..db9732105a63 100644 --- a/pkg/ccl/backupccl/utils_test.go +++ b/pkg/ccl/backupccl/utils_test.go @@ -663,7 +663,7 @@ func requireRecoveryEvent( expected eventpb.RecoveryEvent, ) { testutils.SucceedsSoon(t, func() error { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( startTime, math.MaxInt64, diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go index a628778746d3..e5d5bb078add 100644 --- a/pkg/ccl/changefeedccl/changefeed_test.go +++ b/pkg/ccl/changefeedccl/changefeed_test.go @@ -1796,7 +1796,7 @@ func TestChangefeedSchemaChangeNoBackfill(t *testing.T) { cdcTest(t, testFn) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) if err != nil { @@ -2166,7 +2166,7 @@ func TestChangefeedSchemaChangeBackfillCheckpoint(t *testing.T) { cdcTestWithSystem(t, testFn, feedTestEnterpriseSinks) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) if err != nil { @@ -2352,7 +2352,7 @@ func TestChangefeedSchemaChangeAllowBackfill(t *testing.T) { cdcTestWithSystem(t, testFn) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) if err != nil { @@ -2404,7 +2404,7 @@ func TestChangefeedSchemaChangeBackfillScope(t *testing.T) { } cdcTestWithSystem(t, testFn) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) if err != nil { @@ -2511,7 +2511,7 @@ func TestChangefeedAfterSchemaChangeBackfill(t *testing.T) { } cdcTest(t, testFn) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) if err != nil { diff --git a/pkg/ccl/changefeedccl/helpers_test.go b/pkg/ccl/changefeedccl/helpers_test.go index 2e02176f83d0..ef9939a22361 100644 --- a/pkg/ccl/changefeedccl/helpers_test.go +++ b/pkg/ccl/changefeedccl/helpers_test.go @@ -1141,7 +1141,7 @@ var cmLogRe = regexp.MustCompile(`event_log\.go`) func checkStructuredLogs(t *testing.T, eventType string, startTime int64) []string { var matchingEntries []string testutils.SucceedsSoon(t, func() error { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(startTime, math.MaxInt64, 10000, cmLogRe, log.WithMarkedSensitiveData) if err != nil { diff --git a/pkg/ccl/changefeedccl/nemeses_test.go b/pkg/ccl/changefeedccl/nemeses_test.go index a8700188289d..907bdd169b85 100644 --- a/pkg/ccl/changefeedccl/nemeses_test.go +++ b/pkg/ccl/changefeedccl/nemeses_test.go @@ -47,7 +47,7 @@ func TestChangefeedNemeses(t *testing.T) { // nemeses_test.go:39: pq: unimplemented: operation is // unsupported in multi-tenancy mode cdcTest(t, testFn, feedTestNoTenants) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) if err != nil { diff --git a/pkg/ccl/jwtauthccl/authentication_jwt.go b/pkg/ccl/jwtauthccl/authentication_jwt.go index 493ae22681a2..15bf60769a77 100644 --- a/pkg/ccl/jwtauthccl/authentication_jwt.go +++ b/pkg/ccl/jwtauthccl/authentication_jwt.go @@ -10,6 +10,7 @@ package jwtauthccl import ( "context" + "fmt" "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" "github.com/cockroachdb/cockroach/pkg/security/username" @@ -64,6 +65,7 @@ type jwtAuthenticatorConf struct { enabled bool issuers []string jwks jwk.Set + claim string } // reloadConfig locks mutex and then refreshes the values in conf from the cluster settings. @@ -82,6 +84,7 @@ func (authenticator *jwtAuthenticator) reloadConfigLocked( enabled: JWTAuthEnabled.Get(&st.SV), issuers: mustParseValueOrArray(JWTAuthIssuers.Get(&st.SV)), jwks: mustParseJWKS(JWTAuthJWKS.Get(&st.SV)), + claim: JWTAuthClaim.Get(&st.SV), } if !authenticator.mu.conf.enabled && conf.enabled { @@ -147,23 +150,63 @@ func (authenticator *jwtAuthenticator) ValidateJWTLogin( "token issued by %s", parsedToken.Issuer()) } - users, err := authenticator.mapUsername(parsedToken.Subject(), parsedToken.Issuer(), identMap) - if err != nil || len(users) == 0 { + // Extract all requested principals from the token. By default, we take it from the subject unless they specify + // an alternate claim to pull from. + var tokenPrincipals []string + if authenticator.mu.conf.claim == "" || authenticator.mu.conf.claim == "sub" { + tokenPrincipals = []string{parsedToken.Subject()} + } else { + claimValue, ok := parsedToken.Get(authenticator.mu.conf.claim) + if !ok { + return errors.WithDetailf( + errors.Newf("JWT authentication: missing claim"), + "token does not contain a claim for %s", authenticator.mu.conf.claim) + } + switch castClaimValue := claimValue.(type) { + case string: + // Accept a single string value. + tokenPrincipals = []string{castClaimValue} + case []interface{}: + // Iterate over the slice and add all string values to the tokenPrincipals. + for _, maybePrincipal := range castClaimValue { + tokenPrincipals = append(tokenPrincipals, fmt.Sprint(maybePrincipal)) + } + case []string: + // This case never seems to happen but is included in case an implementation detail changes in the library. + tokenPrincipals = castClaimValue + default: + tokenPrincipals = []string{fmt.Sprint(castClaimValue)} + } + } + + // Take the principals from the token and send each of them through the identity map to generate the + // list of usernames that this token is valid authentication for. + var acceptedUsernames []username.SQLUsername + for _, tokenPrincipal := range tokenPrincipals { + mappedUsernames, err := authenticator.mapUsername(tokenPrincipal, parsedToken.Issuer(), identMap) + if err != nil { + return errors.WithDetailf( + errors.Newf("JWT authentication: invalid claim value"), + "the value %s for the issuer %s is invalid", tokenPrincipal, parsedToken.Issuer()) + } + acceptedUsernames = append(acceptedUsernames, mappedUsernames...) + } + if len(acceptedUsernames) == 0 { return errors.WithDetailf( - errors.Newf("JWT authentication: invalid subject"), - "the subject %s for the issuer %s is invalid", parsedToken.Subject(), parsedToken.Issuer()) + errors.Newf("JWT authentication: invalid principal"), + "the value %s for the issuer %s is invalid", tokenPrincipals, parsedToken.Issuer()) } - subjectMatch := false - for _, subject := range users { - if subject.Normalized() == user.Normalized() { - subjectMatch = true + principalMatch := false + for _, username := range acceptedUsernames { + if username.Normalized() == user.Normalized() { + principalMatch = true break } } - if !subjectMatch { + if !principalMatch { return errors.WithDetailf( - errors.Newf("JWT authentication: invalid subject"), - "token issued for %s and login was for %s", parsedToken.Subject(), user.Normalized()) + errors.Newf("JWT authentication: invalid principal"), + "token issued for %s and login was for %s", tokenPrincipals, user.Normalized()) } if user.IsRootUser() || user.IsReserved() { return errors.WithDetailf( @@ -216,6 +259,9 @@ var ConfigureJWTAuth = func( JWTAuthJWKS.SetOnChange(&st.SV, func(ctx context.Context) { authenticator.reloadConfig(ambientCtx.AnnotateCtx(ctx), st) }) + JWTAuthClaim.SetOnChange(&st.SV, func(ctx context.Context) { + authenticator.reloadConfig(ambientCtx.AnnotateCtx(ctx), st) + }) return &authenticator } diff --git a/pkg/ccl/jwtauthccl/authentication_jwt_test.go b/pkg/ccl/jwtauthccl/authentication_jwt_test.go index f929edde7b97..0a83c37bc477 100644 --- a/pkg/ccl/jwtauthccl/authentication_jwt_test.go +++ b/pkg/ccl/jwtauthccl/authentication_jwt_test.go @@ -46,6 +46,8 @@ var audience2 = "audience_2" var issuer1 = "issuer1" var issuer2 = "issuer2" +var customClaimName = "groups" + func createJWKS(t *testing.T) (jwk.Set, jwk.Key, jwk.Key) { key1 := createRSAKey(t, keyID1) key2 := createECDSAKey(t, keyID2) @@ -88,12 +90,17 @@ func createJWT( expiredAt time.Time, key jwk.Key, algorithm jwa.SignatureAlgorithm, + customClaimName string, + customClaimValue interface{}, ) []byte { token := jwt.New() require.NoError(t, token.Set(jwt.SubjectKey, subject)) require.NoError(t, token.Set(jwt.AudienceKey, audience)) require.NoError(t, token.Set(jwt.IssuerKey, issuer)) require.NoError(t, token.Set(jwt.ExpirationKey, expiredAt)) + if customClaimName != "" { + require.NoError(t, token.Set(customClaimName, customClaimValue)) + } signedTokenBytes, err := jwt.Sign(token, algorithm, key) require.NoError(t, err) return signedTokenBytes @@ -124,7 +131,7 @@ func TestJWTEnabledCheck(t *testing.T) { verifier := ConfigureJWTAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) key := createRSAKey(t, keyID1) - token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256) + token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") // JWT auth is not enabled. err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(username1), token, identMap) require.ErrorContains(t, err, "JWT authentication: not enabled") @@ -151,7 +158,7 @@ func TestJWTSingleKey(t *testing.T) { verifier := ConfigureJWTAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) _, key, _ := createJWKS(t) - token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256) + token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") publicKey, err := key.PublicKey() require.NoError(t, err) jwkPublicKey := serializePublicKey(t, publicKey) @@ -182,7 +189,7 @@ func TestJWTSingleKeyWithoutKeyAlgorithm(t *testing.T) { verifier := ConfigureJWTAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) _, key, _ := createJWKS(t) - token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256) + token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") // Clear the algorithm. require.NoError(t, key.Remove(jwk.AlgorithmKey)) publicKey, err := key.PublicKey() @@ -214,7 +221,7 @@ func TestJWTMultiKey(t *testing.T) { // Make sure jwt auth is enabled. JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) keySet, key, key2 := createJWKS(t) - token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key2, jwa.ES384) + token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key2, jwa.ES384, "", "") publicKey, err := key.PublicKey() require.NoError(t, err) keySetWithOneKey := jwk.NewSet() @@ -249,7 +256,7 @@ func TestExpiredToken(t *testing.T) { // Make sure jwt auth is enabled and accepts valid signing keys. JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) keySet, key, _ := createJWKS(t) - token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(-1*time.Second), key, jwa.RS256) + token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(-1*time.Second), key, jwa.RS256, "", "") JWTAuthJWKS.Override(ctx, &s.ClusterSettings().SV, serializePublicKeySet(t, keySet)) verifier := ConfigureJWTAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) @@ -271,7 +278,7 @@ func TestKeyIdMismatch(t *testing.T) { keySet, key, _ := createJWKS(t) // Create a JWT with different key id. require.NoError(t, key.Set(jwk.KeyIDKey, invalidKeyID)) - token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256) + token := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") // Make sure jwt auth is enabled and accepts valid signing keys. JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) JWTAuthJWKS.Override(ctx, &s.ClusterSettings().SV, serializePublicKeySet(t, keySet)) @@ -283,7 +290,7 @@ func TestKeyIdMismatch(t *testing.T) { // Reset the key id and regenerate the token. require.NoError(t, key.Set(jwk.KeyIDKey, keyID1)) - token = createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256) + token = createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") // Now jwk1 token passes the validity check and fails on the next check. err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(invalidUsername), token, identMap) require.ErrorContains(t, err, "JWT authentication: invalid issuer") @@ -300,8 +307,8 @@ func TestIssuerCheck(t *testing.T) { identMap, err := identmap.From(strings.NewReader(identMapString)) require.NoError(t, err) keySet, key, _ := createJWKS(t) - token1 := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256) - token2 := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key, jwa.RS256) + token1 := createJWT(t, username1, audience1, issuer1, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") + token2 := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") // Make sure jwt auth is enabled, accepts jwk1 or jwk2 as valid signing keys, accepts the audience of "test_cluster" // and the issuer of "issuer2". @@ -317,18 +324,18 @@ func TestIssuerCheck(t *testing.T) { // Validation succeeds when the issuer in the token is equal to the cluster's accepted issuers. err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(invalidUsername), token2, identMap) - require.ErrorContains(t, err, "JWT authentication: invalid subject") + require.ErrorContains(t, err, "JWT authentication: invalid principal") // Set the cluster setting to accept issuer values of either "issuer" or "issuer2". JWTAuthIssuers.Override(ctx, &s.ClusterSettings().SV, "[\""+issuer1+"\", \""+issuer2+"\"]") // Validation succeeds when the issuer in the token is an element of the cluster's accepted issuers. err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(invalidUsername), token1, identMap) - require.ErrorContains(t, err, "JWT authentication: invalid subject") + require.ErrorContains(t, err, "JWT authentication: invalid principal") // Validation succeeds when the issuer in the token is an element of the cluster's accepted issuers. err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(invalidUsername), token2, identMap) - require.ErrorContains(t, err, "JWT authentication: invalid subject") + require.ErrorContains(t, err, "JWT authentication: invalid principal") } @@ -343,7 +350,7 @@ func TestSubjectCheck(t *testing.T) { identMap, err := identmap.From(strings.NewReader(identMapString)) require.NoError(t, err) keySet, _, key2 := createJWKS(t) - token := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384) + token := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384, "", "") // Make sure jwt auth is enabled and accepts jwk1 or jwk2 as valid signing keys. JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) @@ -354,7 +361,7 @@ func TestSubjectCheck(t *testing.T) { // Validation fails with a subject error when a user tries to log in with a user named // "invalid" but the token is for the user "test2". err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(invalidUsername), token, identMap) - require.ErrorContains(t, err, "JWT authentication: invalid subject") + require.ErrorContains(t, err, "JWT authentication: invalid principal") // Validation passes the subject check when the username matches the subject and then fails on the next // check (audience field not matching). @@ -362,6 +369,121 @@ func TestSubjectCheck(t *testing.T) { require.ErrorContains(t, err, "JWT authentication: invalid audience") } +func TestClaimMissing(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) + defer s.Stopper().Stop(ctx) + identMapString := "" + identMap, err := identmap.From(strings.NewReader(identMapString)) + require.NoError(t, err) + keySet, _, key2 := createJWKS(t) + missingClaimToken := createJWT(t, invalidUsername, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384, "", "") + + // Make sure jwt auth is enabled and accepts jwk1 or jwk2 as valid signing keys. + JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) + JWTAuthJWKS.Override(ctx, &s.ClusterSettings().SV, serializePublicKeySet(t, keySet)) + verifier := ConfigureJWTAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) + JWTAuthIssuers.Override(ctx, &s.ClusterSettings().SV, issuer2) + JWTAuthClaim.Override(ctx, &s.ClusterSettings().SV, customClaimName) + + // Validation fails with missing claim + err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(invalidUsername), missingClaimToken, identMap) + require.ErrorContains(t, err, "JWT authentication: missing claim") +} + +func TestIntegerClaimValue(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) + defer s.Stopper().Stop(ctx) + // map the value 1 to a valid user + identMapString := issuer2 + " 1 " + username1 + identMap, err := identmap.From(strings.NewReader(identMapString)) + require.NoError(t, err) + keySet, _, key2 := createJWKS(t) + intClaimToken := createJWT(t, invalidUsername, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384, customClaimName, 1) + + // Make sure jwt auth is enabled and accepts jwk1 or jwk2 as valid signing keys. + JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) + JWTAuthJWKS.Override(ctx, &s.ClusterSettings().SV, serializePublicKeySet(t, keySet)) + verifier := ConfigureJWTAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) + JWTAuthIssuers.Override(ctx, &s.ClusterSettings().SV, issuer2) + JWTAuthClaim.Override(ctx, &s.ClusterSettings().SV, customClaimName) + + // the integer claim is implicitly cast to a string + err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(username1), intClaimToken, identMap) + require.ErrorContains(t, err, "JWT authentication: invalid audience") +} + +func TestSingleClaim(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) + defer s.Stopper().Stop(ctx) + identMapString := "" + identMap, err := identmap.From(strings.NewReader(identMapString)) + require.NoError(t, err) + keySet, _, key2 := createJWKS(t) + token := createJWT(t, invalidUsername, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384, customClaimName, username1) + + // Make sure jwt auth is enabled and accepts jwk1 or jwk2 as valid signing keys. + JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) + JWTAuthJWKS.Override(ctx, &s.ClusterSettings().SV, serializePublicKeySet(t, keySet)) + verifier := ConfigureJWTAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) + JWTAuthIssuers.Override(ctx, &s.ClusterSettings().SV, issuer2) + JWTAuthClaim.Override(ctx, &s.ClusterSettings().SV, customClaimName) + + // Validation fails with a subject error when a user tries to log in with a user named + // "invalid" but the token is for the user "test2". + err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(invalidUsername), token, identMap) + require.ErrorContains(t, err, "JWT authentication: invalid principal") + + // Validation passes the subject check when the username matches the subject and then fails on the next + // check (audience field not matching). + err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(username1), token, identMap) + require.ErrorContains(t, err, "JWT authentication: invalid audience") +} + +func TestMultipleClaim(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + ctx := context.Background() + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) + defer s.Stopper().Stop(ctx) + identMapString := "" + identMap, err := identmap.From(strings.NewReader(identMapString)) + require.NoError(t, err) + keySet, _, key2 := createJWKS(t) + token := createJWT(t, invalidUsername, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384, customClaimName, []string{username2, username1}) + + // Make sure jwt auth is enabled and accepts jwk1 or jwk2 as valid signing keys. + JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) + JWTAuthJWKS.Override(ctx, &s.ClusterSettings().SV, serializePublicKeySet(t, keySet)) + verifier := ConfigureJWTAuth(ctx, s.AmbientCtx(), s.ClusterSettings(), s.StorageClusterID()) + JWTAuthIssuers.Override(ctx, &s.ClusterSettings().SV, issuer2) + JWTAuthClaim.Override(ctx, &s.ClusterSettings().SV, customClaimName) + + // Validation fails with a subject error when a user tries to log in with a user named + // "invalid" but the token is for the user "test2". + err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(invalidUsername), token, identMap) + require.ErrorContains(t, err, "JWT authentication: invalid principal") + + // Validation passes the subject check when the username matches the subject and then fails on the next + // check (audience field not matching). + err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(username1), token, identMap) + require.ErrorContains(t, err, "JWT authentication: invalid audience") + err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(username2), token, identMap) + require.ErrorContains(t, err, "JWT authentication: invalid audience") +} + func TestSubjectMappingCheck(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -374,8 +496,8 @@ func TestSubjectMappingCheck(t *testing.T) { identMap, err := identmap.From(strings.NewReader(identMapString)) require.NoError(t, err) keySet, _, key2 := createJWKS(t) - token := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384) - token2 := createJWT(t, username2, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384) + token := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384, "", "") + token2 := createJWT(t, username2, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384, "", "") // Make sure jwt auth is enabled and accepts jwk1 or jwk2 as valid signing keys. JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) @@ -386,11 +508,11 @@ func TestSubjectMappingCheck(t *testing.T) { // Validation fails with a subject error when a user tries to log in when their user is mapped to username2 // but they try to log in with username1. err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(username1), token, identMap) - require.ErrorContains(t, err, "JWT authentication: invalid subject") + require.ErrorContains(t, err, "JWT authentication: invalid principal") // Validation fails if there is a map for the issuer but no mapping rule matches. err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(username1), token2, identMap) - require.ErrorContains(t, err, "JWT authentication: invalid subject") + require.ErrorContains(t, err, "JWT authentication: invalid principal") // Validation passes the subject check when the username matches the mapped subject. err = verifier.ValidateJWTLogin(s.ClusterSettings(), username.MakeSQLUsernameFromPreNormalizedString(username2), token, identMap) @@ -409,8 +531,8 @@ func TestSubjectReservedUser(t *testing.T) { identMap, err := identmap.From(strings.NewReader(identMapString)) require.NoError(t, err) keySet, _, key2 := createJWKS(t) - token := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384) - token2 := createJWT(t, "root", audience1, issuer1, timeutil.Now().Add(time.Hour), key2, jwa.ES384) + token := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key2, jwa.ES384, "", "") + token2 := createJWT(t, "root", audience1, issuer1, timeutil.Now().Add(time.Hour), key2, jwa.ES384, "", "") // Make sure jwt auth is enabled and accepts jwk1 or jwk2 as valid signing keys. JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) @@ -437,7 +559,7 @@ func TestAudienceCheck(t *testing.T) { identMap, err := identmap.From(strings.NewReader(identMapString)) require.NoError(t, err) keySet, key, _ := createJWKS(t) - token := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key, jwa.RS256) + token := createJWT(t, username1, audience1, issuer2, timeutil.Now().Add(time.Hour), key, jwa.RS256, "", "") // Make sure jwt auth is enabled and accepts jwk1 or jwk2 as valid signing keys. JWTAuthEnabled.Override(ctx, &s.ClusterSettings().SV, true) diff --git a/pkg/ccl/jwtauthccl/settings.go b/pkg/ccl/jwtauthccl/settings.go index db7f6408849a..633dbfdafb0c 100644 --- a/pkg/ccl/jwtauthccl/settings.go +++ b/pkg/ccl/jwtauthccl/settings.go @@ -24,8 +24,21 @@ const ( JWTAuthEnabledSettingName = baseJWTAuthSettingName + "enabled" JWTAuthIssuersSettingName = baseJWTAuthSettingName + "issuers" JWTAuthJWKSSettingName = baseJWTAuthSettingName + "jwks" + JWTAuthClaimSettingName = baseJWTAuthSettingName + "claim" ) +// JWTAuthClaim sets the JWT claim that is parsed to get the username. +var JWTAuthClaim = func() *settings.StringSetting { + s := settings.RegisterStringSetting( + settings.TenantWritable, + JWTAuthClaimSettingName, + "sets the JWT claim that is parsed to get the username", + "", + ) + s.SetReportable(true) + return s +}() + // JWTAuthAudience sets accepted audience values for JWT logins over the SQL interface. var JWTAuthAudience = func() *settings.StringSetting { s := settings.RegisterValidatedStringSetting( diff --git a/pkg/ccl/telemetryccl/telemetry_logging_test.go b/pkg/ccl/telemetryccl/telemetry_logging_test.go index fdac93b9161e..a2c68cc98820 100644 --- a/pkg/ccl/telemetryccl/telemetry_logging_test.go +++ b/pkg/ccl/telemetryccl/telemetry_logging_test.go @@ -107,7 +107,7 @@ func TestTelemetryLogRegions(t *testing.T) { sqlDB.Exec(t, tc.query) } - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, @@ -322,7 +322,7 @@ func TestBulkJobTelemetryLogging(t *testing.T) { execTimestamp++ } - log.FlushFileSinks() + log.Flush() var filteredSampleQueries []logpb.Entry testutils.SucceedsSoon(t, func() error { diff --git a/pkg/ccl/testccl/authccl/auth_test.go b/pkg/ccl/testccl/authccl/auth_test.go index 7b32286fc315..fe0ddefb63a9 100644 --- a/pkg/ccl/testccl/authccl/auth_test.go +++ b/pkg/ccl/testccl/authccl/auth_test.go @@ -227,6 +227,11 @@ func jwtRunTest(t *testing.T, insecure bool) { t.Fatalf("wrong number of argumenets to jwt_cluster_setting jwks: %d", len(a.Vals)) } jwtauthccl.JWTAuthJWKS.Override(context.Background(), sv, a.Vals[0]) + case "claim": + if len(a.Vals) != 1 { + t.Fatalf("wrong number of argumenets to jwt_cluster_setting claim: %d", len(a.Vals)) + } + jwtauthccl.JWTAuthClaim.Override(context.Background(), sv, a.Vals[0]) case "ident_map": if len(a.Vals) != 1 { t.Fatalf("wrong number of argumenets to jwt_cluster_setting ident_map: %d", len(a.Vals)) @@ -528,7 +533,7 @@ func TestClientAddrOverride(t *testing.T) { t.Run("check-server-log-uses-override", func(t *testing.T) { // Wait for the disconnection event in logs. testutils.SucceedsSoon(t, func() error { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(testStartTime.UnixNano(), math.MaxInt64, 10000, sessionTerminatedRe, log.WithMarkedSensitiveData) if err != nil { @@ -541,7 +546,7 @@ func TestClientAddrOverride(t *testing.T) { }) // Now we want to check that the logging tags are also updated. - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(testStartTime.UnixNano(), math.MaxInt64, 10000, authLogFileRe, log.WithMarkedSensitiveData) if err != nil { diff --git a/pkg/ccl/testccl/authccl/testdata/jwt b/pkg/ccl/testccl/authccl/testdata/jwt index 3a6d6548fe73..dddde1669744 100644 --- a/pkg/ccl/testccl/authccl/testdata/jwt +++ b/pkg/ccl/testccl/authccl/testdata/jwt @@ -101,8 +101,8 @@ jwt_cluster_setting issuers=issuer2 # see authentication_jwt_test.go for examples of how to generate these tokens. connect user=jwt_user options=--crdb:jwt_auth_enabled=true password=eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3QyIn0.eyJhdWQiOiJ0ZXN0X2NsdXN0ZXIiLCJleHAiOjI2NjEyNjQyNjksImlhdCI6MTY2MTI2NDI2OSwiaXNzIjoiaXNzdWVyMiIsInN1YiI6InRlc3QyIn0.Tot41E-wSz24wo1wj3b8CwEr-O_dqWZoHZkAh2x4nfK2hT4yhfiOcajmKQJVVZX2_897c8uDOqfLzl77JEe-AX4mlEBZXWUNqwwQIdIFZxpL6FEV_YjvTF0bQuu9oeD7kYW-6i3-QQpB6QpCVb-wLW8bBbJ4zCap88nYk14HZH-ZYSzPAP7YEVppHQNhWrxQ66nQU__RuYeQdL6J5Edes9qCHUgqnZCnMPzDZ4l_3Pc5tTSNVcOUl5MMHsvrYsb0VtSFTNCOjJIADXbc2KzVbfqLt-ArUDxs36__u_g84TfGFXoT0VTDbDjYwD7wpyLuT3oLcJuA4m_tto6Rrn7Rww ---- -ERROR: JWT authentication: invalid subject (SQLSTATE 28000) -DETAIL: token issued for test2 and login was for jwt_user +ERROR: JWT authentication: invalid principal (SQLSTATE 28000) +DETAIL: token issued for [test2] and login was for jwt_user jwt_cluster_setting issuers=["issuer1","issuer2"] ---- @@ -110,8 +110,8 @@ jwt_cluster_setting issuers=["issuer1","issuer2"] # see authentication_jwt_test.go for examples of how to generate these tokens. connect user=jwt_user options=--crdb:jwt_auth_enabled=true password=eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3QyIn0.eyJhdWQiOiJ0ZXN0X2NsdXN0ZXIiLCJleHAiOjI2NjEyNjQyNjksImlhdCI6MTY2MTI2NDI2OSwiaXNzIjoiaXNzdWVyMiIsInN1YiI6InRlc3QyIn0.Tot41E-wSz24wo1wj3b8CwEr-O_dqWZoHZkAh2x4nfK2hT4yhfiOcajmKQJVVZX2_897c8uDOqfLzl77JEe-AX4mlEBZXWUNqwwQIdIFZxpL6FEV_YjvTF0bQuu9oeD7kYW-6i3-QQpB6QpCVb-wLW8bBbJ4zCap88nYk14HZH-ZYSzPAP7YEVppHQNhWrxQ66nQU__RuYeQdL6J5Edes9qCHUgqnZCnMPzDZ4l_3Pc5tTSNVcOUl5MMHsvrYsb0VtSFTNCOjJIADXbc2KzVbfqLt-ArUDxs36__u_g84TfGFXoT0VTDbDjYwD7wpyLuT3oLcJuA4m_tto6Rrn7Rww ---- -ERROR: JWT authentication: invalid subject (SQLSTATE 28000) -DETAIL: token issued for test2 and login was for jwt_user +ERROR: JWT authentication: invalid principal (SQLSTATE 28000) +DETAIL: token issued for [test2] and login was for jwt_user subtest end @@ -144,8 +144,8 @@ subtest ident_map_subject_match # try to login with the test user even though the subject of the token is for test2 connect user=test options=--crdb:jwt_auth_enabled=true password=eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3QyIn0.eyJhdWQiOiJ0ZXN0X2NsdXN0ZXIiLCJleHAiOjI2NjEyNjQyNjksImlhdCI6MTY2MTI2NDI2OSwiaXNzIjoiaXNzdWVyMiIsInN1YiI6InRlc3QyIn0.Tot41E-wSz24wo1wj3b8CwEr-O_dqWZoHZkAh2x4nfK2hT4yhfiOcajmKQJVVZX2_897c8uDOqfLzl77JEe-AX4mlEBZXWUNqwwQIdIFZxpL6FEV_YjvTF0bQuu9oeD7kYW-6i3-QQpB6QpCVb-wLW8bBbJ4zCap88nYk14HZH-ZYSzPAP7YEVppHQNhWrxQ66nQU__RuYeQdL6J5Edes9qCHUgqnZCnMPzDZ4l_3Pc5tTSNVcOUl5MMHsvrYsb0VtSFTNCOjJIADXbc2KzVbfqLt-ArUDxs36__u_g84TfGFXoT0VTDbDjYwD7wpyLuT3oLcJuA4m_tto6Rrn7Rww ---- -ERROR: JWT authentication: invalid subject (SQLSTATE 28000) -DETAIL: token issued for test2 and login was for test +ERROR: JWT authentication: invalid principal (SQLSTATE 28000) +DETAIL: token issued for [test2] and login was for test # map the user test2 to test when issued by issuer2 jwt_cluster_setting ident_map=issuer2,test2,test @@ -158,3 +158,53 @@ connect user=test options=--crdb:jwt_auth_enabled=true password=eyJhbGciOiJSUzI1 ok defaultdb subtest end + + +subtest single_custom_claim_login + +jwt_cluster_setting jwks={"alg":"RS256","e":"AQAB","kid":"test_kid1","kty":"RSA","n":"7SIVb_TfkxvwoopYqCBGJyWVUXuMMfP6fdrxtb0WreAICher0VGD9xAF2ZddMNQuVycqHZVkxplN_2-nq8F17POgU4RKJ5V5HLCGhABx0HjRRpLn-akSDTuTUcD3P4cE8XbLjCVCbZTjVncWWpt-UeRV2XHU-17ih5LSZDInzSGlWpp6BUTXiSZ_H7-HjO5cO5Q7j6P1iInETrdAMXWeYbnHXMXNLKyN4uKIymingOohekwYlOCvkA4V2e-u6-FPP5W-51GDroDtWNIVtpSakk1SzWdBjClvdZ3V0nfhw58pvROz8OpnJTVgb9IkZiwRUSbplnCS92gm1wWKz0O-Mw"} +---- + +# see authentication_jwt_test.go for examples of how to generate these tokens. +# try to login with token with usernames in custom claim without claim field set. +connect user=test options=--crdb:jwt_auth_enabled=true password=eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3Rfa2lkMSIsInR5cCI6IkpXVCJ9.eyJhdWQiOlsidGVzdF9jbHVzdGVyIl0sImV4cCI6MTk5OTE3ODQwOSwiZ3JvdXBzIjoidGVzdCIsImlzcyI6Imlzc3VlcjEiLCJzdWIiOiJpbnZhbGlkX3VzZXIifQ.gfqWUshoNkEe2QDxpZBbLCcTbeogtd7vfUd9XLakhcBiqFjPyf3iP-yzCE3nAR90OWQFdtKVp-O19ymJOKOOe2yAcMBFHdwQSKJ5FHgX3M3IMZHcXNIkU0qTp698mJpGD_w_e8RBLN19OwKsAdUY3oj1oIkljBlsrTkhHIFQX1KG9NYqQQG2Py5eJiDtz9aBpqb2hRSBIcyLSWp7VxQ9sPNXOvIWAynDwRJxCIuF69FfbsR9yHdjPQfoc-6wRktllJ7q1ZZfp129OZZxcQWsbl2v1xPOQPkrT_O4ziElanDF_uReoUxBne3AzlEMIPybSkUaQZXrhhqmH3Hl9PswYw +---- +ERROR: JWT authentication: invalid principal (SQLSTATE 28000) +DETAIL: token issued for [invalid_user] and login was for test + +# use the groups claim instead of the subject claim +jwt_cluster_setting claim=groups +---- + +# see authentication_jwt_test.go for examples of how to generate these tokens. +# login with the test_user1 since it is the value of the groups claim in the token +connect user=test options=--crdb:jwt_auth_enabled=true password=eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3Rfa2lkMSIsInR5cCI6IkpXVCJ9.eyJhdWQiOlsidGVzdF9jbHVzdGVyIl0sImV4cCI6MTk5OTE3ODQwOSwiZ3JvdXBzIjoidGVzdCIsImlzcyI6Imlzc3VlcjEiLCJzdWIiOiJpbnZhbGlkX3VzZXIifQ.gfqWUshoNkEe2QDxpZBbLCcTbeogtd7vfUd9XLakhcBiqFjPyf3iP-yzCE3nAR90OWQFdtKVp-O19ymJOKOOe2yAcMBFHdwQSKJ5FHgX3M3IMZHcXNIkU0qTp698mJpGD_w_e8RBLN19OwKsAdUY3oj1oIkljBlsrTkhHIFQX1KG9NYqQQG2Py5eJiDtz9aBpqb2hRSBIcyLSWp7VxQ9sPNXOvIWAynDwRJxCIuF69FfbsR9yHdjPQfoc-6wRktllJ7q1ZZfp129OZZxcQWsbl2v1xPOQPkrT_O4ziElanDF_uReoUxBne3AzlEMIPybSkUaQZXrhhqmH3Hl9PswYw +---- +ok defaultdb + +subtest end + +subtest multiple_custom_claim_login + +# clear the claim value +jwt_cluster_setting claim=sub +---- + +# see authentication_jwt_test.go for examples of how to generate these tokens. +# try to login with token with usernames in custom claim without claim field set. +connect user=test2 options=--crdb:jwt_auth_enabled=true password=eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3Rfa2lkMSIsInR5cCI6IkpXVCJ9.eyJhdWQiOlsidGVzdF9jbHVzdGVyIl0sImV4cCI6MTk5OTE3ODQwOSwiZ3JvdXBzIjpbInRlc3QiLCJ0ZXN0MiJdLCJpc3MiOiJpc3N1ZXIxIiwic3ViIjoiaW52YWxpZF91c2VyIn0.5B2ihElB50zACjjqy0ATxrSxlECmMj-0KvPp0NwoBBvURG16bOnYYksSeN5Izl_-YaP9ZoKOywxgA-sRtw4fX4du6Oo0tDSk3GzkZI6_IQoOxt8eq8To43Y74VSg2P3ts98yyNYXG0n3fTv2qtPjs6ly9p2iSnZBor6Yhy-YIjheT93Ehhl5s2sUL0gTOlpzGnb4N9MDgjphKQinu81DK-w200nOweYF_8ft8aeNiJqqDq1sZuUnCI1KcryuUoqQu5mWh0pO74XYCYHPTLAXwQ2BtKpfj_RJQqPcLW7hy1YcVdWTsL0PPrs6gJ_YKuo99eb0dBl1g-5Kdd5xRIm72g +---- +ERROR: JWT authentication: invalid principal (SQLSTATE 28000) +DETAIL: token issued for [invalid_user] and login was for test2 + +# use the groups claim instead of the subject claim +jwt_cluster_setting claim=groups +---- + +# see authentication_jwt_test.go for examples of how to generate these tokens. +# login with the test_user2 since it is one of the values of the groups claim in the token +connect user=test2 options=--crdb:jwt_auth_enabled=true password=eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3Rfa2lkMSIsInR5cCI6IkpXVCJ9.eyJhdWQiOlsidGVzdF9jbHVzdGVyIl0sImV4cCI6MTk5OTE3ODQwOSwiZ3JvdXBzIjpbInRlc3QiLCJ0ZXN0MiJdLCJpc3MiOiJpc3N1ZXIxIiwic3ViIjoiaW52YWxpZF91c2VyIn0.5B2ihElB50zACjjqy0ATxrSxlECmMj-0KvPp0NwoBBvURG16bOnYYksSeN5Izl_-YaP9ZoKOywxgA-sRtw4fX4du6Oo0tDSk3GzkZI6_IQoOxt8eq8To43Y74VSg2P3ts98yyNYXG0n3fTv2qtPjs6ly9p2iSnZBor6Yhy-YIjheT93Ehhl5s2sUL0gTOlpzGnb4N9MDgjphKQinu81DK-w200nOweYF_8ft8aeNiJqqDq1sZuUnCI1KcryuUoqQu5mWh0pO74XYCYHPTLAXwQ2BtKpfj_RJQqPcLW7hy1YcVdWTsL0PPrs6gJ_YKuo99eb0dBl1g-5Kdd5xRIm72g +---- +ok defaultdb + +subtest end diff --git a/pkg/ccl/testccl/sqlccl/tenant_gc_test.go b/pkg/ccl/testccl/sqlccl/tenant_gc_test.go index 2ea85661f5f5..08c0d4586b4e 100644 --- a/pkg/ccl/testccl/sqlccl/tenant_gc_test.go +++ b/pkg/ccl/testccl/sqlccl/tenant_gc_test.go @@ -505,7 +505,7 @@ func TestGCTenantJobWaitsForProtectedTimestamps(t *testing.T) { checkGCBlockedByPTS := func(t *testing.T, sj *jobs.StartableJob, tenID uint64) { testutils.SucceedsSoon(t, func() error { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile(fmt.Sprintf("GC TTL for dropped tenant %d has expired, but protected timestamp record\\(s\\)", tenID)), log.WithFlattenedSensitiveData) diff --git a/pkg/cli/connect.go b/pkg/cli/connect.go index 9dee8e107f09..b60c5b91296c 100644 --- a/pkg/cli/connect.go +++ b/pkg/cli/connect.go @@ -76,7 +76,7 @@ func runConnectInit(cmd *cobra.Command, args []string) (retErr error) { } // Ensure that log files are populated when the process terminates. - defer log.FlushFileSinks() + defer log.Flush() peers := []string(serverCfg.JoinList) ctx, cancel := context.WithCancel(context.Background()) diff --git a/pkg/cli/debug_send_kv_batch_test.go b/pkg/cli/debug_send_kv_batch_test.go index 07c17b9784bd..9dca99252e9b 100644 --- a/pkg/cli/debug_send_kv_batch_test.go +++ b/pkg/cli/debug_send_kv_batch_test.go @@ -123,7 +123,7 @@ func TestSendKVBatch(t *testing.T) { require.JSONEq(t, jsonResponse, output) // Check that a structured log event was emitted. - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(start.UnixNano(), timeutil.Now().UnixNano(), 1, regexp.MustCompile("debug_send_kv_batch"), log.WithFlattenedSensitiveData) require.NoError(t, err) diff --git a/pkg/cli/demo.go b/pkg/cli/demo.go index db6e95a1fd2f..6923a7b4e157 100644 --- a/pkg/cli/demo.go +++ b/pkg/cli/demo.go @@ -378,7 +378,7 @@ func runDemoInternal( } // Ensure the last few entries in the log files are flushed at the end. - defer log.FlushFileSinks() + defer log.Flush() return sqlCtx.Run(ctx, conn) } diff --git a/pkg/cli/start.go b/pkg/cli/start.go index bdd97d17a684..684457e612b8 100644 --- a/pkg/cli/start.go +++ b/pkg/cli/start.go @@ -771,7 +771,7 @@ func createAndStartServerAsync( go func() { // Ensure that the log files see the startup messages immediately. - defer log.FlushFileSinks() + defer log.Flush() // If anything goes dramatically wrong, use Go's panic/recover // mechanism to intercept the panic and log the panic details to // the error reporting server. @@ -1524,7 +1524,7 @@ func reportReadinessExternally(ctx context.Context, cmd *cobra.Command, waitForI // Ensure the configuration logging is written to disk in case a // process is waiting for the sdnotify readiness to read important // information from there. - log.FlushFileSinks() + log.Flush() // Signal readiness. This unblocks the process when running with // --background or under systemd. diff --git a/pkg/cmd/roachtest/registry/owners.go b/pkg/cmd/roachtest/registry/owners.go index 5fdda3dd2a6c..d1b6c5f166e7 100644 --- a/pkg/cmd/roachtest/registry/owners.go +++ b/pkg/cmd/roachtest/registry/owners.go @@ -16,16 +16,15 @@ type Owner string // The allowable values of Owner. const ( - OwnerSQLSessions Owner = `sql-sessions` - OwnerDisasterRecovery Owner = `disaster-recovery` OwnerCDC Owner = `cdc` + OwnerDisasterRecovery Owner = `disaster-recovery` OwnerKV Owner = `kv` OwnerReplication Owner = `replication` OwnerAdmissionControl Owner = `admission-control` OwnerObsInf Owner = `obs-inf-prs` OwnerServer Owner = `server` // not currently staffed + OwnerSQLFoundations Owner = `sql-foundations` OwnerSQLQueries Owner = `sql-queries` - OwnerSQLSchema Owner = `sql-schema` OwnerStorage Owner = `storage` OwnerTestEng Owner = `test-eng` OwnerDevInf Owner = `dev-inf` diff --git a/pkg/cmd/roachtest/tests/activerecord.go b/pkg/cmd/roachtest/tests/activerecord.go index c71b08619a04..7d4df358b974 100644 --- a/pkg/cmd/roachtest/tests/activerecord.go +++ b/pkg/cmd/roachtest/tests/activerecord.go @@ -243,7 +243,7 @@ func registerActiveRecord(r registry.Registry) { r.Add(registry.TestSpec{ Name: "activerecord", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Timeout: 5 * time.Hour, Cluster: r.MakeClusterSpec(1), NativeLibs: registry.LibGEOS, diff --git a/pkg/cmd/roachtest/tests/alterpk.go b/pkg/cmd/roachtest/tests/alterpk.go index a61293c89881..a6c199507664 100644 --- a/pkg/cmd/roachtest/tests/alterpk.go +++ b/pkg/cmd/roachtest/tests/alterpk.go @@ -179,7 +179,7 @@ func registerAlterPK(r registry.Registry) { } r.Add(registry.TestSpec{ Name: "alterpk-bank", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, // Use a 4 node cluster -- 3 nodes will run cockroach, and the last will be the // workload driver node. Cluster: r.MakeClusterSpec(4), @@ -188,7 +188,7 @@ func registerAlterPK(r registry.Registry) { }) r.Add(registry.TestSpec{ Name: "alterpk-tpcc-250", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, // Use a 4 node cluster -- 3 nodes will run cockroach, and the last will be the // workload driver node. Cluster: r.MakeClusterSpec(4, spec.CPU(32)), @@ -199,7 +199,7 @@ func registerAlterPK(r registry.Registry) { }) r.Add(registry.TestSpec{ Name: "alterpk-tpcc-500", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, // Use a 4 node cluster -- 3 nodes will run cockroach, and the last will be the // workload driver node. Cluster: r.MakeClusterSpec(4, spec.CPU(16)), diff --git a/pkg/cmd/roachtest/tests/asyncpg.go b/pkg/cmd/roachtest/tests/asyncpg.go index 9b4581538fd4..a8448a8c0e8d 100644 --- a/pkg/cmd/roachtest/tests/asyncpg.go +++ b/pkg/cmd/roachtest/tests/asyncpg.go @@ -139,7 +139,7 @@ func registerAsyncpg(r registry.Registry) { r.Add(registry.TestSpec{ Name: "asyncpg", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1, spec.CPU(16)), Tags: registry.Tags(`default`, `orm`), Leases: registry.MetamorphicLeases, diff --git a/pkg/cmd/roachtest/tests/awsdms.go b/pkg/cmd/roachtest/tests/awsdms.go index 59c5ff5a81de..c1a2d2dbc33c 100644 --- a/pkg/cmd/roachtest/tests/awsdms.go +++ b/pkg/cmd/roachtest/tests/awsdms.go @@ -189,7 +189,7 @@ func dmsDescribeTasksInput( func registerAWSDMS(r registry.Registry) { r.Add(registry.TestSpec{ Name: "awsdms", - Owner: registry.OwnerSQLSessions, // TODO(otan): add a migrations OWNERS team + Owner: registry.OwnerSQLFoundations, // TODO(otan): add a migrations OWNERS team Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, Tags: registry.Tags(`default`, `awsdms`, `aws`), diff --git a/pkg/cmd/roachtest/tests/connection_latency.go b/pkg/cmd/roachtest/tests/connection_latency.go index 259edfe87203..fa937c7ff9c9 100644 --- a/pkg/cmd/roachtest/tests/connection_latency.go +++ b/pkg/cmd/roachtest/tests/connection_latency.go @@ -119,7 +119,7 @@ func registerConnectionLatencyTest(r registry.Registry) { numNodes := 3 r.Add(registry.TestSpec{ Name: fmt.Sprintf("connection_latency/nodes=%d/certs", numNodes), - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, // Add one more node for load node. Cluster: r.MakeClusterSpec(numNodes+1, spec.Zones(regionUsCentral)), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { @@ -135,7 +135,7 @@ func registerConnectionLatencyTest(r registry.Registry) { r.Add(registry.TestSpec{ Name: fmt.Sprintf("connection_latency/nodes=%d/multiregion/certs", numMultiRegionNodes), - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(numMultiRegionNodes+loadNodes, spec.Geo(), spec.Zones(geoZonesStr)), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { runConnectionLatencyTest(ctx, t, c, numMultiRegionNodes, numZones, false /*password*/) @@ -144,7 +144,7 @@ func registerConnectionLatencyTest(r registry.Registry) { r.Add(registry.TestSpec{ Name: fmt.Sprintf("connection_latency/nodes=%d/multiregion/password", numMultiRegionNodes), - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(numMultiRegionNodes+loadNodes, spec.Geo(), spec.Zones(geoZonesStr)), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { runConnectionLatencyTest(ctx, t, c, numMultiRegionNodes, numZones, true /*password*/) diff --git a/pkg/cmd/roachtest/tests/django.go b/pkg/cmd/roachtest/tests/django.go index b8dd1be2e468..02d36c0e875b 100644 --- a/pkg/cmd/roachtest/tests/django.go +++ b/pkg/cmd/roachtest/tests/django.go @@ -215,7 +215,7 @@ func registerDjango(r registry.Registry) { r.Add(registry.TestSpec{ Name: "django", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1, spec.CPU(16)), Leases: registry.MetamorphicLeases, Tags: registry.Tags(`default`, `orm`), diff --git a/pkg/cmd/roachtest/tests/drain.go b/pkg/cmd/roachtest/tests/drain.go index 52a76275a390..5e7a8c853ab5 100644 --- a/pkg/cmd/roachtest/tests/drain.go +++ b/pkg/cmd/roachtest/tests/drain.go @@ -38,7 +38,7 @@ func registerDrain(r registry.Registry) { { r.Add(registry.TestSpec{ Name: "drain/early-exit-conn-wait", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { @@ -48,7 +48,7 @@ func registerDrain(r registry.Registry) { r.Add(registry.TestSpec{ Name: "drain/warn-conn-wait-timeout", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { @@ -58,7 +58,7 @@ func registerDrain(r registry.Registry) { r.Add(registry.TestSpec{ Name: "drain/not-at-quorum", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(3), Leases: registry.MetamorphicLeases, SkipPostValidations: registry.PostValidationNoDeadNodes, diff --git a/pkg/cmd/roachtest/tests/flowable.go b/pkg/cmd/roachtest/tests/flowable.go index b2d5b180139c..e1e46a74d1e6 100644 --- a/pkg/cmd/roachtest/tests/flowable.go +++ b/pkg/cmd/roachtest/tests/flowable.go @@ -104,7 +104,7 @@ func registerFlowable(r registry.Registry) { r.Add(registry.TestSpec{ Name: "flowable", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/cmd/roachtest/tests/gopg.go b/pkg/cmd/roachtest/tests/gopg.go index c5f58ab899c9..a1f6f1e839a6 100644 --- a/pkg/cmd/roachtest/tests/gopg.go +++ b/pkg/cmd/roachtest/tests/gopg.go @@ -156,7 +156,7 @@ func registerGopg(r registry.Registry) { r.Add(registry.TestSpec{ Name: "gopg", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, Tags: registry.Tags(`default`, `orm`), diff --git a/pkg/cmd/roachtest/tests/gorm.go b/pkg/cmd/roachtest/tests/gorm.go index 86d820315240..4e6b7d1a2a71 100644 --- a/pkg/cmd/roachtest/tests/gorm.go +++ b/pkg/cmd/roachtest/tests/gorm.go @@ -134,7 +134,7 @@ func registerGORM(r registry.Registry) { r.Add(registry.TestSpec{ Name: "gorm", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, Tags: registry.Tags(`default`, `orm`), diff --git a/pkg/cmd/roachtest/tests/hibernate.go b/pkg/cmd/roachtest/tests/hibernate.go index 228aee4fbb42..ec2357bdd9e6 100644 --- a/pkg/cmd/roachtest/tests/hibernate.go +++ b/pkg/cmd/roachtest/tests/hibernate.go @@ -244,7 +244,7 @@ func registerHibernate(r registry.Registry, opt hibernateOptions) { r.Add(registry.TestSpec{ Name: opt.testName, - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, NativeLibs: registry.LibGEOS, diff --git a/pkg/cmd/roachtest/tests/inverted_index.go b/pkg/cmd/roachtest/tests/inverted_index.go index 2fb2cde33d0a..a1946e055874 100644 --- a/pkg/cmd/roachtest/tests/inverted_index.go +++ b/pkg/cmd/roachtest/tests/inverted_index.go @@ -26,7 +26,7 @@ import ( func registerSchemaChangeInvertedIndex(r registry.Registry) { r.Add(registry.TestSpec{ Name: "schemachange/invertedindex", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(5), Leases: registry.MetamorphicLeases, Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/cmd/roachtest/tests/jasyncsql.go b/pkg/cmd/roachtest/tests/jasyncsql.go index 1ea921c0c202..27e79cc7b925 100644 --- a/pkg/cmd/roachtest/tests/jasyncsql.go +++ b/pkg/cmd/roachtest/tests/jasyncsql.go @@ -141,7 +141,7 @@ func registerJasyncSQL(r registry.Registry) { r.Add(registry.TestSpec{ Name: "jasync", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, Tags: registry.Tags(`default`, `orm`), diff --git a/pkg/cmd/roachtest/tests/knex.go b/pkg/cmd/roachtest/tests/knex.go index 712bf6835cc0..8d9ff27d2da9 100644 --- a/pkg/cmd/roachtest/tests/knex.go +++ b/pkg/cmd/roachtest/tests/knex.go @@ -128,7 +128,7 @@ func registerKnex(r registry.Registry) { r.Add(registry.TestSpec{ Name: "knex", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, NativeLibs: registry.LibGEOS, diff --git a/pkg/cmd/roachtest/tests/libpq.go b/pkg/cmd/roachtest/tests/libpq.go index 8312591fa6a2..12e27171bb08 100644 --- a/pkg/cmd/roachtest/tests/libpq.go +++ b/pkg/cmd/roachtest/tests/libpq.go @@ -136,7 +136,7 @@ func registerLibPQ(r registry.Registry) { r.Add(registry.TestSpec{ Name: "lib/pq", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, Tags: registry.Tags(`default`, `driver`), diff --git a/pkg/cmd/roachtest/tests/liquibase.go b/pkg/cmd/roachtest/tests/liquibase.go index 3e72b330034b..82fa7cb38efd 100644 --- a/pkg/cmd/roachtest/tests/liquibase.go +++ b/pkg/cmd/roachtest/tests/liquibase.go @@ -132,7 +132,7 @@ func registerLiquibase(r registry.Registry) { r.Add(registry.TestSpec{ Name: "liquibase", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, Tags: registry.Tags(`default`, `tool`), diff --git a/pkg/cmd/roachtest/tests/mixed_version_decl_schemachange_compat.go b/pkg/cmd/roachtest/tests/mixed_version_decl_schemachange_compat.go index d06035a7e987..6161bb99d67a 100644 --- a/pkg/cmd/roachtest/tests/mixed_version_decl_schemachange_compat.go +++ b/pkg/cmd/roachtest/tests/mixed_version_decl_schemachange_compat.go @@ -28,7 +28,7 @@ import ( func registerDeclSchemaChangeCompatMixedVersions(r registry.Registry) { r.Add(registry.TestSpec{ Name: "schemachange/mixed-versions-compat", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { if c.IsLocal() && runtime.GOARCH == "arm64" { diff --git a/pkg/cmd/roachtest/tests/mixed_version_job_compatibility_in_declarative_schema_changer.go b/pkg/cmd/roachtest/tests/mixed_version_job_compatibility_in_declarative_schema_changer.go index fbcaa986c847..8eeeac3c93df 100644 --- a/pkg/cmd/roachtest/tests/mixed_version_job_compatibility_in_declarative_schema_changer.go +++ b/pkg/cmd/roachtest/tests/mixed_version_job_compatibility_in_declarative_schema_changer.go @@ -130,7 +130,7 @@ func registerDeclarativeSchemaChangerJobCompatibilityInMixedVersion(r registry.R // supported in the "previous" major release. r.Add(registry.TestSpec{ Name: "declarative_schema_changer/job-compatibility-mixed-version-V222-V231", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(4), Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { if c.IsLocal() && runtime.GOARCH == "arm64" { diff --git a/pkg/cmd/roachtest/tests/mixed_version_schemachange.go b/pkg/cmd/roachtest/tests/mixed_version_schemachange.go index fbc0014a4234..fb66c4003e60 100644 --- a/pkg/cmd/roachtest/tests/mixed_version_schemachange.go +++ b/pkg/cmd/roachtest/tests/mixed_version_schemachange.go @@ -25,7 +25,7 @@ import ( func registerSchemaChangeMixedVersions(r registry.Registry) { r.Add(registry.TestSpec{ Name: "schemachange/mixed-versions", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, // This tests the work done for 20.1 that made schema changes jobs and in // addition prevented making any new schema changes on a mixed cluster in // order to prevent bugs during upgrades. diff --git a/pkg/cmd/roachtest/tests/nodejs_postgres.go b/pkg/cmd/roachtest/tests/nodejs_postgres.go index eb19b7f5e9d6..2abd39a3afc2 100644 --- a/pkg/cmd/roachtest/tests/nodejs_postgres.go +++ b/pkg/cmd/roachtest/tests/nodejs_postgres.go @@ -168,7 +168,7 @@ PGSSLCERT=$HOME/certs/client.%s.crt PGSSLKEY=$HOME/certs/client.%s.key PGSSLROOT r.Add(registry.TestSpec{ Name: "node-postgres", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, Tags: registry.Tags(`default`, `driver`), diff --git a/pkg/cmd/roachtest/tests/npgsql.go b/pkg/cmd/roachtest/tests/npgsql.go index bec4edef6e17..56ce048222a0 100644 --- a/pkg/cmd/roachtest/tests/npgsql.go +++ b/pkg/cmd/roachtest/tests/npgsql.go @@ -167,7 +167,7 @@ echo '%s' | git apply --ignore-whitespace -`, npgsqlPatch), r.Add(registry.TestSpec{ Name: "npgsql", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, Tags: registry.Tags(`default`, `driver`), diff --git a/pkg/cmd/roachtest/tests/pgjdbc.go b/pkg/cmd/roachtest/tests/pgjdbc.go index 561871ec4b3a..a278e4c8425a 100644 --- a/pkg/cmd/roachtest/tests/pgjdbc.go +++ b/pkg/cmd/roachtest/tests/pgjdbc.go @@ -211,7 +211,7 @@ func registerPgjdbc(r registry.Registry) { r.Add(registry.TestSpec{ Name: "pgjdbc", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, Tags: registry.Tags(`default`, `driver`), diff --git a/pkg/cmd/roachtest/tests/pgx.go b/pkg/cmd/roachtest/tests/pgx.go index d079c448d7b9..34911fdcb25b 100644 --- a/pkg/cmd/roachtest/tests/pgx.go +++ b/pkg/cmd/roachtest/tests/pgx.go @@ -132,7 +132,7 @@ func registerPgx(r registry.Registry) { r.Add(registry.TestSpec{ Name: "pgx", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, Tags: registry.Tags(`default`, `driver`), diff --git a/pkg/cmd/roachtest/tests/pop.go b/pkg/cmd/roachtest/tests/pop.go index 1d97f4b66de8..c12d98634f08 100644 --- a/pkg/cmd/roachtest/tests/pop.go +++ b/pkg/cmd/roachtest/tests/pop.go @@ -100,7 +100,7 @@ func registerPop(r registry.Registry) { r.Add(registry.TestSpec{ Name: "pop", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, Tags: registry.Tags(`default`, `orm`), diff --git a/pkg/cmd/roachtest/tests/psycopg.go b/pkg/cmd/roachtest/tests/psycopg.go index 244ae8c3822d..809553b3da6e 100644 --- a/pkg/cmd/roachtest/tests/psycopg.go +++ b/pkg/cmd/roachtest/tests/psycopg.go @@ -141,7 +141,7 @@ func registerPsycopg(r registry.Registry) { r.Add(registry.TestSpec{ Name: "psycopg", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, Tags: registry.Tags(`default`, `driver`), diff --git a/pkg/cmd/roachtest/tests/ruby_pg.go b/pkg/cmd/roachtest/tests/ruby_pg.go index f66a0652fa4c..9703386f53c5 100644 --- a/pkg/cmd/roachtest/tests/ruby_pg.go +++ b/pkg/cmd/roachtest/tests/ruby_pg.go @@ -230,7 +230,7 @@ func registerRubyPG(r registry.Registry) { r.Add(registry.TestSpec{ Name: "ruby-pg", Timeout: 1 * time.Hour, - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, NativeLibs: registry.LibGEOS, diff --git a/pkg/cmd/roachtest/tests/rust_postgres.go b/pkg/cmd/roachtest/tests/rust_postgres.go index 7391cdd43963..9d5cff7a655e 100644 --- a/pkg/cmd/roachtest/tests/rust_postgres.go +++ b/pkg/cmd/roachtest/tests/rust_postgres.go @@ -166,7 +166,7 @@ func registerRustPostgres(r registry.Registry) { r.Add(registry.TestSpec{ Name: "rust-postgres", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1, spec.CPU(16)), Leases: registry.MetamorphicLeases, Tags: registry.Tags(`default`, `orm`), diff --git a/pkg/cmd/roachtest/tests/schemachange.go b/pkg/cmd/roachtest/tests/schemachange.go index cd853e43d237..da2a7ed19563 100644 --- a/pkg/cmd/roachtest/tests/schemachange.go +++ b/pkg/cmd/roachtest/tests/schemachange.go @@ -30,7 +30,7 @@ import ( func registerSchemaChangeDuringKV(r registry.Registry) { r.Add(registry.TestSpec{ Name: `schemachange/during/kv`, - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(5), Leases: registry.MetamorphicLeases, Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { @@ -308,7 +308,7 @@ func makeIndexAddTpccTest( ) registry.TestSpec { return registry.TestSpec{ Name: fmt.Sprintf("schemachange/index/tpcc/w=%d", warehouses), - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: spec, Leases: registry.MetamorphicLeases, Timeout: length * 3, @@ -341,7 +341,7 @@ func makeSchemaChangeBulkIngestTest( ) registry.TestSpec { return registry.TestSpec{ Name: "schemachange/bulkingest", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(numNodes), Leases: registry.MetamorphicLeases, Timeout: length * 2, @@ -428,7 +428,7 @@ func makeSchemaChangeDuringTPCC( ) registry.TestSpec { return registry.TestSpec{ Name: "schemachange/during/tpcc", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: spec, Leases: registry.MetamorphicLeases, Timeout: length * 3, diff --git a/pkg/cmd/roachtest/tests/schemachange_random_load.go b/pkg/cmd/roachtest/tests/schemachange_random_load.go index 6cbbf629045a..cd6cb29a9cdb 100644 --- a/pkg/cmd/roachtest/tests/schemachange_random_load.go +++ b/pkg/cmd/roachtest/tests/schemachange_random_load.go @@ -44,7 +44,7 @@ func registerSchemaChangeRandomLoad(r registry.Registry) { geoZonesStr := strings.Join(geoZones, ",") r.Add(registry.TestSpec{ Name: "schemachange/random-load", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec( 3, spec.Geo(), @@ -90,7 +90,7 @@ func registerRandomLoadBenchSpec(r registry.Registry, b randomLoadBenchSpec) { r.Add(registry.TestSpec{ Name: name, - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(b.Nodes), NativeLibs: registry.LibGEOS, Skip: "https://github.com/cockroachdb/cockroach/issues/56230", diff --git a/pkg/cmd/roachtest/tests/secondary_indexes.go b/pkg/cmd/roachtest/tests/secondary_indexes.go index 46426458e83d..78fc3a2decde 100644 --- a/pkg/cmd/roachtest/tests/secondary_indexes.go +++ b/pkg/cmd/roachtest/tests/secondary_indexes.go @@ -137,7 +137,7 @@ func verifyTableData(node int, expected [][]int) versionStep { func registerSecondaryIndexesMultiVersionCluster(r registry.Registry) { r.Add(registry.TestSpec{ Name: "schemachange/secondary-index-multi-version", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(3), Leases: registry.MetamorphicLeases, Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/cmd/roachtest/tests/sequelize.go b/pkg/cmd/roachtest/tests/sequelize.go index d79d197241e0..aa1551eba283 100644 --- a/pkg/cmd/roachtest/tests/sequelize.go +++ b/pkg/cmd/roachtest/tests/sequelize.go @@ -151,7 +151,7 @@ func registerSequelize(r registry.Registry) { r.Add(registry.TestSpec{ Name: "sequelize", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, NativeLibs: registry.LibGEOS, diff --git a/pkg/cmd/roachtest/tests/sqlalchemy.go b/pkg/cmd/roachtest/tests/sqlalchemy.go index 66c78f42ab69..eb715bfd11e1 100644 --- a/pkg/cmd/roachtest/tests/sqlalchemy.go +++ b/pkg/cmd/roachtest/tests/sqlalchemy.go @@ -37,7 +37,7 @@ var supportedSQLAlchemyTag = "2.0.2" func registerSQLAlchemy(r registry.Registry) { r.Add(registry.TestSpec{ Name: "sqlalchemy", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, Tags: registry.Tags(`default`, `orm`), diff --git a/pkg/cmd/roachtest/tests/tpcc.go b/pkg/cmd/roachtest/tests/tpcc.go index 214a7f93c399..4db0e47f6f2e 100644 --- a/pkg/cmd/roachtest/tests/tpcc.go +++ b/pkg/cmd/roachtest/tests/tpcc.go @@ -678,7 +678,7 @@ func registerTPCC(r registry.Registry) { tc := multiRegionTests[i] r.Add(registry.TestSpec{ Name: tc.name, - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, // Add an extra node which serves as the workload nodes. Cluster: r.MakeClusterSpec(len(regions)*nodesPerRegion+1, spec.Geo(), spec.Zones(strings.Join(zs, ","))), EncryptionSupport: registry.EncryptionMetamorphic, diff --git a/pkg/cmd/roachtest/tests/typeorm.go b/pkg/cmd/roachtest/tests/typeorm.go index 93ec33d79ae7..a125fba48eeb 100644 --- a/pkg/cmd/roachtest/tests/typeorm.go +++ b/pkg/cmd/roachtest/tests/typeorm.go @@ -180,7 +180,7 @@ func registerTypeORM(r registry.Registry) { r.Add(registry.TestSpec{ Name: "typeorm", - Owner: registry.OwnerSQLSessions, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, Tags: registry.Tags(`default`, `orm`), diff --git a/pkg/cmd/roachtest/tests/validate_system_schema_after_version_upgrade.go b/pkg/cmd/roachtest/tests/validate_system_schema_after_version_upgrade.go index 5c0486122e7b..e1fdbeb6f368 100644 --- a/pkg/cmd/roachtest/tests/validate_system_schema_after_version_upgrade.go +++ b/pkg/cmd/roachtest/tests/validate_system_schema_after_version_upgrade.go @@ -33,7 +33,7 @@ func registerValidateSystemSchemaAfterVersionUpgrade(r registry.Registry) { // and assert that the output matches the expected output content. r.Add(registry.TestSpec{ Name: "systemschema/validate-after-version-upgrade", - Owner: registry.OwnerSQLSchema, + Owner: registry.OwnerSQLFoundations, Cluster: r.MakeClusterSpec(1), Leases: registry.MetamorphicLeases, Run: func(ctx context.Context, t test.Test, c cluster.Cluster) { diff --git a/pkg/jobs/jobstest/logutils.go b/pkg/jobs/jobstest/logutils.go index 37102683d20a..7299d0b7e4c4 100644 --- a/pkg/jobs/jobstest/logutils.go +++ b/pkg/jobs/jobstest/logutils.go @@ -36,7 +36,7 @@ func CheckEmittedEvents( ) { // Check that the structured event was logged. testutils.SucceedsSoon(t, func() error { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(startTime, math.MaxInt64, 10000, cmLogRe, log.WithMarkedSensitiveData) if err != nil { diff --git a/pkg/jobs/registry_external_test.go b/pkg/jobs/registry_external_test.go index a600b257502d..8bf561d6a09e 100644 --- a/pkg/jobs/registry_external_test.go +++ b/pkg/jobs/registry_external_test.go @@ -543,7 +543,7 @@ SELECT unnest(execution_errors) t *testing.T, id jobspb.JobID, status jobs.Status, from, to time.Time, cause string, ) { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( from.UnixNano(), to.UnixNano(), 2, regexp.MustCompile(fmt.Sprintf( diff --git a/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go b/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go index f96f5d065492..482f5a1777ba 100644 --- a/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go +++ b/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go @@ -645,7 +645,7 @@ func TestCorruptData(t *testing.T) { require.NoError(t, err) } - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 100, msg, log.WithFlattenedSensitiveData) require.NoError(t, err) @@ -732,7 +732,7 @@ func TestCorruptData(t *testing.T) { require.Nil(t, got) _, err = pts.GetState(ctx) require.NoError(t, err) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 100, msg, log.WithFlattenedSensitiveData) diff --git a/pkg/kv/kvserver/replica_test.go b/pkg/kv/kvserver/replica_test.go index 30b8d29ed3d7..a4208069d728 100644 --- a/pkg/kv/kvserver/replica_test.go +++ b/pkg/kv/kvserver/replica_test.go @@ -13596,7 +13596,7 @@ func TestProposalNotAcknowledgedOrReproposedAfterApplication(t *testing.T) { if _, pErr := tc.repl.Send(ctx, ba); pErr != nil { t.Fatal(pErr) } - log.FlushFileSinks() + log.Flush() stopper.Quiesce(ctx) entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, diff --git a/pkg/kv/kvserver/replicate_queue_test.go b/pkg/kv/kvserver/replicate_queue_test.go index e2a17e1fac3b..9a7106f5b43b 100644 --- a/pkg/kv/kvserver/replicate_queue_test.go +++ b/pkg/kv/kvserver/replicate_queue_test.go @@ -887,7 +887,7 @@ func TestReplicateQueueTracingOnError(t *testing.T) { // Flush logs and get log messages from replicate_queue.go since just // before calling store.Enqueue(..). - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(testStartTs.UnixNano(), math.MaxInt64, 100, regexp.MustCompile(`replicate_queue\.go`), log.WithMarkedSensitiveData) require.NoError(t, err) diff --git a/pkg/security/certmgr/cert_manager_test.go b/pkg/security/certmgr/cert_manager_test.go index bef9ebeb706a..623f2cda66b1 100644 --- a/pkg/security/certmgr/cert_manager_test.go +++ b/pkg/security/certmgr/cert_manager_test.go @@ -57,7 +57,7 @@ var cmLogRe = regexp.MustCompile(`event_log\.go`) // Check that the structured event was logged. func checkLogStructEntry(t *testing.T, expectSuccess bool, beforeReload time.Time) error { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(beforeReload.UnixNano(), math.MaxInt64, 10000, cmLogRe, log.WithMarkedSensitiveData) if err != nil { diff --git a/pkg/security/certs_rotation_test.go b/pkg/security/certs_rotation_test.go index 0f39ee83df39..8979464c055a 100644 --- a/pkg/security/certs_rotation_test.go +++ b/pkg/security/certs_rotation_test.go @@ -201,7 +201,7 @@ func TestRotateCerts(t *testing.T) { // the moment the structured logging event is actually // written to the log file. testutils.SucceedsSoon(t, func() error { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(beforeReload.UnixNano(), math.MaxInt64, 10000, cmLogRe, log.WithMarkedSensitiveData) if err != nil { diff --git a/pkg/server/server_test.go b/pkg/server/server_test.go index acbfee91e78c..9870d9c34616 100644 --- a/pkg/server/server_test.go +++ b/pkg/server/server_test.go @@ -592,7 +592,7 @@ func TestPersistHLCUpperBound(t *testing.T) { var fatal bool defer log.ResetExitFunc() log.SetExitFunc(true /* hideStack */, func(r exit.Code) { - defer log.FlushFileSinks() + defer log.Flush() if r == exit.FatalError() { fatal = true } diff --git a/pkg/server/status.go b/pkg/server/status.go index 5aee4c3f7515..27fc454e5264 100644 --- a/pkg/server/status.go +++ b/pkg/server/status.go @@ -1239,7 +1239,7 @@ func (s *statusServer) LogFilesList( } return status.LogFilesList(ctx, req) } - log.FlushFileSinks() + log.Flush() logFiles, err := log.ListLogFiles() if err != nil { return nil, serverError(ctx, err) @@ -1279,7 +1279,7 @@ func (s *statusServer) LogFile( inputEditMode := log.SelectEditMode(req.Redact, log.KeepRedactable) // Ensure that the latest log entries are available in files. - log.FlushFileSinks() + log.Flush() // Read the logs. reader, err := log.GetLogReader(req.File) @@ -1409,7 +1409,7 @@ func (s *statusServer) Logs( } // Ensure that the latest log entries are available in files. - log.FlushFileSinks() + log.Flush() // Read the logs. entries, err := log.FetchEntriesFromFiles( diff --git a/pkg/server/status/runtime_stats_test.go b/pkg/server/status/runtime_stats_test.go index d04e453cf34b..601a36276159 100644 --- a/pkg/server/status/runtime_stats_test.go +++ b/pkg/server/status/runtime_stats_test.go @@ -44,7 +44,7 @@ func TestStructuredEventLogging(t *testing.T) { time.Sleep(10 * time.Second) // Ensure that the entry hits the OS so it can be read back below. - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(testStartTs.UnixNano(), math.MaxInt64, 10000, cmLogRe, log.WithMarkedSensitiveData) diff --git a/pkg/server/structlogging/hot_ranges_log_test.go b/pkg/server/structlogging/hot_ranges_log_test.go index 1454a7408dc4..c610b4586fdd 100644 --- a/pkg/server/structlogging/hot_ranges_log_test.go +++ b/pkg/server/structlogging/hot_ranges_log_test.go @@ -87,7 +87,7 @@ func TestHotRangesStats(t *testing.T) { }) testutils.SucceedsWithin(t, func() error { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, math.MaxInt64, diff --git a/pkg/sql/admin_audit_log_test.go b/pkg/sql/admin_audit_log_test.go index 0efeca3d452f..4b7ee9bae57e 100644 --- a/pkg/sql/admin_audit_log_test.go +++ b/pkg/sql/admin_audit_log_test.go @@ -73,7 +73,7 @@ func TestAdminAuditLogBasic(t *testing.T) { db.Exec(t, `SELECT 1;`) var selectAdminRe = regexp.MustCompile(`"EventType":"admin_query","Statement":"SELECT ‹1›","Tag":"SELECT","User":"root"`) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 10000, selectAdminRe, log.WithMarkedSensitiveData) @@ -124,7 +124,7 @@ func TestAdminAuditLogRegularUser(t *testing.T) { var selectRe = regexp.MustCompile(`SELECT 1`) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 10000, selectRe, log.WithMarkedSensitiveData) @@ -180,7 +180,7 @@ COMMIT; }, } - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, @@ -275,7 +275,7 @@ COMMIT; }, } - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, @@ -319,7 +319,7 @@ COMMIT; t.Fatal(err) } - log.FlushFileSinks() + log.Flush() entries, err = log.FetchEntriesFromFiles( 0, diff --git a/pkg/sql/event_log_test.go b/pkg/sql/event_log_test.go index 8aece89e2f72..ca237786077a 100644 --- a/pkg/sql/event_log_test.go +++ b/pkg/sql/event_log_test.go @@ -82,7 +82,7 @@ func TestStructuredEventLogging(t *testing.T) { } // Ensure that the entries hit the OS so they can be read back below. - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(testStartTs.UnixNano(), math.MaxInt64, 10000, execLogRe, log.WithMarkedSensitiveData) @@ -736,7 +736,7 @@ func TestPerfLogging(t *testing.T) { } var logRe = regexp.MustCompile(tc.logRe) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( start, math.MaxInt64, 1000, logRe, log.WithMarkedSensitiveData, ) diff --git a/pkg/sql/pgwire/auth_test.go b/pkg/sql/pgwire/auth_test.go index f6fa56e9c700..4858105c567c 100644 --- a/pkg/sql/pgwire/auth_test.go +++ b/pkg/sql/pgwire/auth_test.go @@ -734,7 +734,7 @@ func TestClientAddrOverride(t *testing.T) { t.Run("check-server-log-uses-override", func(t *testing.T) { // Wait for the disconnection event in logs. testutils.SucceedsSoon(t, func() error { - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(testStartTime.UnixNano(), math.MaxInt64, 10000, sessionTerminatedRe, log.WithMarkedSensitiveData) if err != nil { @@ -747,7 +747,7 @@ func TestClientAddrOverride(t *testing.T) { }) // Now we want to check that the logging tags are also updated. - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles(testStartTime.UnixNano(), math.MaxInt64, 10000, authLogFileRe, log.WithMarkedSensitiveData) if err != nil { diff --git a/pkg/sql/scheduledlogging/captured_index_usage_stats_test.go b/pkg/sql/scheduledlogging/captured_index_usage_stats_test.go index c4a0b70d2899..7c452c45f759 100644 --- a/pkg/sql/scheduledlogging/captured_index_usage_stats_test.go +++ b/pkg/sql/scheduledlogging/captured_index_usage_stats_test.go @@ -281,7 +281,7 @@ func checkNumTotalEntriesAndNumIndexEntries( expectedIndividualIndexEntries int, scheduleCompleteChan chan struct{}, ) error { - log.FlushFileSinks() + log.Flush() // Fetch log entries. entries, err := log.FetchEntriesFromFiles( 0, diff --git a/pkg/sql/telemetry_logging_test.go b/pkg/sql/telemetry_logging_test.go index 8c3ca58cf7cf..cad9ebd844aa 100644 --- a/pkg/sql/telemetry_logging_test.go +++ b/pkg/sql/telemetry_logging_test.go @@ -365,7 +365,7 @@ func TestTelemetryLogging(t *testing.T) { } } - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, @@ -672,7 +672,7 @@ func TestNoTelemetryLogOnTroubleshootMode(t *testing.T) { db.Exec(t, tc.query) } - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, @@ -876,7 +876,7 @@ func TestTelemetryLogJoinTypesAndAlgorithms(t *testing.T) { db.Exec(t, tc.query) } - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, @@ -1131,7 +1131,7 @@ func TestTelemetryScanCounts(t *testing.T) { db.Exec(t, tc.query) } - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, @@ -1245,7 +1245,7 @@ $$` db.Exec(t, stmt) - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, diff --git a/pkg/ui/workspaces/db-console/src/views/app/containers/layout/index.tsx b/pkg/ui/workspaces/db-console/src/views/app/containers/layout/index.tsx index dbbd2fded48c..172e6e19545e 100644 --- a/pkg/ui/workspaces/db-console/src/views/app/containers/layout/index.tsx +++ b/pkg/ui/workspaces/db-console/src/views/app/containers/layout/index.tsx @@ -106,7 +106,9 @@ class Layout extends React.Component {
- {this.props.children} + + {this.props.children} +
diff --git a/pkg/upgrade/upgrades/schema_changes_external_test.go b/pkg/upgrade/upgrades/schema_changes_external_test.go index 5f9df3a249f9..a7bd9894c05e 100644 --- a/pkg/upgrade/upgrades/schema_changes_external_test.go +++ b/pkg/upgrade/upgrades/schema_changes_external_test.go @@ -510,7 +510,7 @@ func testMigrationWithFailures( }) if test.waitForMigrationRestart { // Ensure that we have observed the expected number of ignored schema change jobs. - log.FlushFileSinks() + log.Flush() entries, err := log.FetchEntriesFromFiles( 0, math.MaxInt64, 10000, regexp.MustCompile("skipping.*operation as the schema change already exists."), diff --git a/pkg/util/hlc/hlc_test.go b/pkg/util/hlc/hlc_test.go index 11c88e075f33..50cd0e1c0298 100644 --- a/pkg/util/hlc/hlc_test.go +++ b/pkg/util/hlc/hlc_test.go @@ -438,7 +438,7 @@ func TestHLCEnforceWallTimeWithinBoundsInNow(t *testing.T) { var fatal bool defer log.ResetExitFunc() log.SetExitFunc(true /* hideStack */, func(r exit.Code) { - defer log.FlushFileSinks() + defer log.Flush() if r == exit.FatalError() { fatal = true } @@ -487,7 +487,7 @@ func TestHLCEnforceWallTimeWithinBoundsInUpdate(t *testing.T) { var fatal bool defer log.ResetExitFunc() log.SetExitFunc(true /* hideStack */, func(r exit.Code) { - defer log.FlushFileSinks() + defer log.Flush() if r == exit.FatalError() { fatal = true } diff --git a/pkg/util/log/clog_test.go b/pkg/util/log/clog_test.go index b51bcf461003..69b4ef0aaea2 100644 --- a/pkg/util/log/clog_test.go +++ b/pkg/util/log/clog_test.go @@ -651,7 +651,7 @@ func TestFileSeverityFilter(t *testing.T) { Infof(context.Background(), "test1") Errorf(context.Background(), "test2") - FlushFileSinks() + Flush() debugFileSink := debugFileSinkInfo.sink.(*fileSink) contents, err := os.ReadFile(debugFileSink.getFileName(t)) diff --git a/pkg/util/log/doc.go b/pkg/util/log/doc.go index c1fb3f1aa10f..5885a423b641 100644 --- a/pkg/util/log/doc.go +++ b/pkg/util/log/doc.go @@ -83,10 +83,10 @@ // // # Output // -// Log output is buffered and written periodically using FlushFileSinks. -// Programs should call FlushFileSinks before exiting to guarantee all +// Log output is buffered and written periodically using Flush. +// Programs should call Flush before exiting to guarantee all // log output is written to files. Note that buffered network sinks also -// exist. If you'd like to flush these as well, call FlushAllSync. +// exist. If you'd like to flush these as well, call Flush. // // By default, all log statements write to files in a temporary directory. // This package provides several flags that modify this behavior. diff --git a/pkg/util/log/file.go b/pkg/util/log/file.go index 8778e0f82fcd..52f0ed218c64 100644 --- a/pkg/util/log/file.go +++ b/pkg/util/log/file.go @@ -67,7 +67,7 @@ type fileSink struct { // name generator for log files. nameGenerator fileNameGenerator - // bufferedWrites if false calls file.FlushFileSinks on every log + // bufferedWrites if false calls file.Flush on every log // write. This can be set per-logger e.g. for audit logging. // // Note that synchronization for all log files simultaneously can diff --git a/pkg/util/log/file_log_gc_test.go b/pkg/util/log/file_log_gc_test.go index ed6e907bc5b6..813d774a95cc 100644 --- a/pkg/util/log/file_log_gc_test.go +++ b/pkg/util/log/file_log_gc_test.go @@ -158,7 +158,7 @@ func testLogGC(t *testing.T, fileSink *fileSink, logFn func(ctx context.Context, const newLogFiles = 20 for i := 1; i < newLogFiles; i++ { logFn(context.Background(), fmt.Sprint(i)) - FlushFileSinks() + Flush() } if _, err := expectFileCount(newLogFiles); err != nil { t.Fatal(err) @@ -169,7 +169,7 @@ func testLogGC(t *testing.T, fileSink *fileSink, logFn func(ctx context.Context, // Emit a log line which will rotate the files and trigger GC. logFn(context.Background(), "final") - FlushFileSinks() + Flush() succeedsSoon(t, func() error { _, err := expectFileCount(expectedFilesAfterGC) diff --git a/pkg/util/log/formats_test.go b/pkg/util/log/formats_test.go index 510f6033c4db..dc4895602049 100644 --- a/pkg/util/log/formats_test.go +++ b/pkg/util/log/formats_test.go @@ -79,7 +79,7 @@ func TestFormatRedaction(t *testing.T) { defer cleanupFn() Infof(ctx, "safe2 %s", "secret3") - FlushFileSinks() + Flush() contents, err := os.ReadFile(getDebugLogFileName(t)) require.NoError(t, err) diff --git a/pkg/util/log/log_flush.go b/pkg/util/log/log_flush.go index 05054f0e51cc..ff9e3783aaf4 100644 --- a/pkg/util/log/log_flush.go +++ b/pkg/util/log/log_flush.go @@ -17,6 +17,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/util/envutil" + "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/sysutil" ) @@ -27,42 +28,66 @@ type flushSyncWriter interface { io.Writer } -// FlushFileSinks explicitly flushes all pending log file I/O. -// See also flushDaemon() that manages background (asynchronous) -// flushes, and signalFlusher() that manages flushes in reaction to a -// user signal. -func FlushFileSinks() { - _ = logging.allSinkInfos.iterFileSinks(func(l *fileSink) error { - l.lockAndFlushAndMaybeSync(true /*doSync*/) - return nil - }) -} +// flushActive indicates if a current Flush() is executing. If true, +// additional calls to Flush() will be a noop and return early, until +// the current Flush() call has completed. +var flushActive syncutil.AtomicBool -// FlushAllSync explicitly flushes all asynchronous buffered logging sinks, +// Flush explicitly flushes all asynchronous buffered logging sinks, // including pending log file I/O and buffered network sinks. // // NB: This is a synchronous operation, and will block until all flushes // have completed. Generally only recommended for use in crash reporting // scenarios. -func FlushAllSync() { - FlushFileSinks() +// +// When flushing buffered network logging sinks, each sink is given a +// 5-second timeout before we move on to attempt flushing the next. +func Flush() { + if flushActive.Swap(true) { + return + } + defer flushActive.Swap(false) + + // Flush all file sinks. + _ = logging.allSinkInfos.iterFileSinks(func(l *fileSink) error { + l.lockAndFlushAndMaybeSync(true /*doSync*/) + return nil + }) + + // Flush all buffered network sinks. _ = logging.allSinkInfos.iterBufferedSinks(func(bs *bufferedSink) error { - // Trigger a synchronous flush by calling output on the bufferedSink - // with a `forceSync` option. - err := bs.output([]byte{}, sinkOutputOptions{forceSync: true}) - if err != nil { - // We don't want to let errors to stop us from iterating and flushing - // the remaining buffered log sinks. Nor do we want to log the error - // using the logging system, as it's unlikely to make it to the - // destination sink anyway (there's a good chance we're flushing - // as part of handling a panic). Display the error and continue. - fmt.Printf("Error draining buffered log sink: %v\n", err) + doneCh := make(chan struct{}) + // Set a timer, so we don't prevent the process from exiting if the + // child sink is unavailable & the request hangs. + timer := time.NewTimer(5 * time.Second) + go func() { + // Trigger a synchronous flush by calling output on the bufferedSink + // with a `forceSync` option. + err := bs.output([]byte{}, sinkOutputOptions{forceSync: true}) + if err != nil { + // We don't want to let errors to stop us from iterating and flushing + // the remaining buffered log sinks. Nor do we want to log the error + // using the logging system, as it's unlikely to make it to the + // destination sink anyway (there's a good chance we're flushing + // as part of handling a panic). Display the error. + fmt.Fprintf(OrigStderr, "error draining buffered log sink: %v\n", err) + } + doneCh <- struct{}{} + }() + + select { + case <-doneCh: + case <-timer.C: + fmt.Fprintf(OrigStderr, "timed out draining buffered log sink: %T\n", bs.child) } + // In the event of errors or timeouts, we still want to attempt to flush + // any remaining buffered sinks. Return nil so the iterator can continue. return nil }) } func init() { + flushActive.Set(false) go flushDaemon() go signalFlusher() } @@ -86,7 +111,7 @@ const syncWarnDuration = 10 * time.Second // flushDaemon periodically flushes and syncs the log file buffers. // This manages both the primary and secondary loggers. // -// FlushFileSinks propagates the in-memory buffer inside CockroachDB to the +// Flush propagates the in-memory buffer inside CockroachDB to the // in-memory buffer(s) of the OS. The flush is relatively frequent so // that a human operator can see "up to date" logging data in the log // file. @@ -122,7 +147,7 @@ func signalFlusher() { ch := sysutil.RefreshSignaledChan() for sig := range ch { Ops.Infof(context.Background(), "%s received, flushing logs", sig) - FlushFileSinks() + Flush() } } @@ -134,5 +159,5 @@ func signalFlusher() { func StartAlwaysFlush() { logging.flushWrites.Set(true) // There may be something in the buffers already; flush it. - FlushFileSinks() + Flush() } diff --git a/pkg/util/log/logcrash/crash_reporting.go b/pkg/util/log/logcrash/crash_reporting.go index a22d87a3ead6..e5a74d7e2054 100644 --- a/pkg/util/log/logcrash/crash_reporting.go +++ b/pkg/util/log/logcrash/crash_reporting.go @@ -192,7 +192,7 @@ func ReportPanic(ctx context.Context, sv *settings.Values, r interface{}, depth // Ensure that the logs are flushed before letting a panic // terminate the server. - log.FlushAllSync() + log.Flush() } // PanicAsError turns r into an error if it is not one already. diff --git a/pkg/util/log/secondary_log_test.go b/pkg/util/log/secondary_log_test.go index 1e0770c8f56c..f691712bd851 100644 --- a/pkg/util/log/secondary_log_test.go +++ b/pkg/util/log/secondary_log_test.go @@ -71,7 +71,7 @@ func TestSecondaryLog(t *testing.T) { Infof(context.Background(), "test2") // Make sure the content made it to disk. - FlushFileSinks() + Flush() // Check that the messages indeed made it to different files. @@ -151,7 +151,7 @@ func TestListLogFilesIncludeSecondaryLogs(t *testing.T) { // Emit some logging and ensure the files gets created. ctx := context.Background() Sessions.Infof(ctx, "story time") - FlushFileSinks() + Flush() results, err := ListLogFiles() if err != nil { diff --git a/pkg/util/log/test_log_scope.go b/pkg/util/log/test_log_scope.go index 78e7b2303fcd..de586987c7fd 100644 --- a/pkg/util/log/test_log_scope.go +++ b/pkg/util/log/test_log_scope.go @@ -365,7 +365,7 @@ func (l *TestLogScope) Rotate(t tShim) { t.Helper() t.Logf("-- test log scope file rotation --") // Ensure remaining logs are written. - FlushFileSinks() + Flush() if err := logging.allSinkInfos.iterFileSinks(func(l *fileSink) error { l.mu.Lock() @@ -387,7 +387,7 @@ func (l *TestLogScope) Close(t tShim) { t.Logf("-- test log scope end --") // Ensure any remaining logs are written to files. - FlushFileSinks() + Flush() if l.logDir != "" { defer func() {