diff --git a/docs/generated/sql/bnf/show_jobs.bnf b/docs/generated/sql/bnf/show_jobs.bnf index 83832564ca1c..02b570a4a9f4 100644 --- a/docs/generated/sql/bnf/show_jobs.bnf +++ b/docs/generated/sql/bnf/show_jobs.bnf @@ -1,11 +1,14 @@ show_jobs_stmt ::= 'SHOW' 'AUTOMATIC' 'JOBS' | 'SHOW' 'JOBS' + | 'SHOW' 'JOBS' 'WITH' show_job_options_list | 'SHOW' 'CHANGEFEED' 'JOBS' | 'SHOW' 'JOBS' select_stmt + | 'SHOW' 'JOBS' select_stmt 'WITH' show_job_options_list | 'SHOW' 'JOBS' 'WHEN' 'COMPLETE' select_stmt | 'SHOW' 'JOBS' for_schedules_clause | 'SHOW' 'CHANGEFEED' 'JOBS' select_stmt | 'SHOW' 'JOB' job_id + | 'SHOW' 'JOB' job_id 'WITH' show_job_options_list | 'SHOW' 'CHANGEFEED' 'JOB' job_id | 'SHOW' 'JOB' 'WHEN' 'COMPLETE' job_id diff --git a/docs/generated/sql/bnf/stmt_block.bnf b/docs/generated/sql/bnf/stmt_block.bnf index 6e80662fd918..370bb12c4bb8 100644 --- a/docs/generated/sql/bnf/stmt_block.bnf +++ b/docs/generated/sql/bnf/stmt_block.bnf @@ -853,12 +853,15 @@ show_partitions_stmt ::= show_jobs_stmt ::= 'SHOW' 'AUTOMATIC' 'JOBS' | 'SHOW' 'JOBS' + | 'SHOW' 'JOBS' 'WITH' show_job_options_list | 'SHOW' 'CHANGEFEED' 'JOBS' | 'SHOW' 'JOBS' select_stmt + | 'SHOW' 'JOBS' select_stmt 'WITH' show_job_options_list | 'SHOW' 'JOBS' 'WHEN' 'COMPLETE' select_stmt | 'SHOW' 'JOBS' for_schedules_clause | 'SHOW' 'CHANGEFEED' 'JOBS' select_stmt | 'SHOW' 'JOB' a_expr + | 'SHOW' 'JOB' a_expr 'WITH' show_job_options_list | 'SHOW' 'CHANGEFEED' 'JOB' a_expr | 'SHOW' 'JOB' 'WHEN' 'COMPLETE' a_expr @@ -1962,6 +1965,9 @@ for_grantee_clause ::= 'FOR' role_spec_list | +show_job_options_list ::= + ( show_job_options ) ( ( ',' show_job_options ) )* + opt_schedule_executor_type ::= 'FOR' 'BACKUP' | 'FOR' 'SQL' 'STATISTICS' @@ -2724,6 +2730,9 @@ targets_roles ::= | 'TYPE' type_name_list | grant_targets +show_job_options ::= + 'EXECUTION' 'DETAILS' + show_ranges_options ::= ( 'TABLES' | 'INDEXES' | 'DETAILS' | 'KEYS' | 'EXPLAIN' ) ( ( ',' 'TABLES' | ',' 'INDEXES' | ',' 'DETAILS' | ',' 'EXPLAIN' | ',' 'KEYS' ) )* diff --git a/docs/generated/sql/functions.md b/docs/generated/sql/functions.md index f44b62145760..e24ab2476abb 100644 --- a/docs/generated/sql/functions.md +++ b/docs/generated/sql/functions.md @@ -3181,6 +3181,8 @@ Note: the name pattern must contain ASCII letters already for capital letters to crdb_internal.is_constraint_active(table_name: string, constraint_name: string) → bool

This function is used to determine if a given constraint is currently. active for the current transaction.

Volatile +crdb_internal.job_execution_details(job_id: int) → jsonb

Output a JSONB version of the specified job’s execution details. The execution details are collectedand persisted during the lifetime of the job and provide more observability into the job’s execution

+
Volatile crdb_internal.lease_holder(key: bytes) → int

This function is used to fetch the leaseholder corresponding to a request key

Volatile crdb_internal.list_sql_keys_in_range(range_id: int) → tuple{string AS key, string AS value, string AS ts}

Returns all SQL K/V pairs within the requested range.

diff --git a/pkg/BUILD.bazel b/pkg/BUILD.bazel index dd9db07b448f..d4c5ab1235b5 100644 --- a/pkg/BUILD.bazel +++ b/pkg/BUILD.bazel @@ -1174,6 +1174,7 @@ GO_TARGETS = [ "//pkg/jobs/jobsauth:jobsauth_test", "//pkg/jobs/jobspb:jobspb", "//pkg/jobs/jobspb:jobspb_test", + "//pkg/jobs/jobsprofiler/profilerconstants:profilerconstants", "//pkg/jobs/jobsprofiler:jobsprofiler", "//pkg/jobs/jobsprofiler:jobsprofiler_test", "//pkg/jobs/jobsprotectedts:jobsprotectedts", @@ -2678,6 +2679,7 @@ GET_X_DATA_TARGETS = [ "//pkg/jobs/jobsauth:get_x_data", "//pkg/jobs/jobspb:get_x_data", "//pkg/jobs/jobsprofiler:get_x_data", + "//pkg/jobs/jobsprofiler/profilerconstants:get_x_data", "//pkg/jobs/jobsprotectedts:get_x_data", "//pkg/jobs/jobstest:get_x_data", "//pkg/jobs/metricspoller:get_x_data", diff --git a/pkg/jobs/jobsprofiler/BUILD.bazel b/pkg/jobs/jobsprofiler/BUILD.bazel index 0c0625cf24bd..31240a88f9a6 100644 --- a/pkg/jobs/jobsprofiler/BUILD.bazel +++ b/pkg/jobs/jobsprofiler/BUILD.bazel @@ -9,6 +9,7 @@ go_library( deps = [ "//pkg/jobs", "//pkg/jobs/jobspb", + "//pkg/jobs/jobsprofiler/profilerconstants", "//pkg/sql", "//pkg/sql/execinfrapb", "//pkg/sql/isql", diff --git a/pkg/jobs/jobsprofiler/profiler.go b/pkg/jobs/jobsprofiler/profiler.go index a0145961736f..5ea2bab647bf 100644 --- a/pkg/jobs/jobsprofiler/profiler.go +++ b/pkg/jobs/jobsprofiler/profiler.go @@ -16,6 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/jobs/jobsprofiler/profilerconstants" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/isql" @@ -37,15 +38,15 @@ func StorePlanDiagram( err := db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { flowSpecs := p.GenerateFlowSpecs() - _, diagURL, err := execinfrapb.GeneratePlanDiagramURL(fmt.Sprintf("job:%d", jobID), flowSpecs, execinfrapb.DiagramFlags{}) + _, diagURL, err := execinfrapb.GeneratePlanDiagramURL(fmt.Sprintf("job:%d", jobID), flowSpecs, + execinfrapb.DiagramFlags{}) if err != nil { return err } - const infoKey = "dsp-diag-url-%d" infoStorage := jobs.InfoStorageForJob(txn, jobID) - return infoStorage.Write(ctx, fmt.Sprintf(infoKey, timeutil.Now().UnixNano()), - []byte(diagURL.String())) + return infoStorage.Write(ctx, fmt.Sprintf(profilerconstants.DSPDiagramInfoKeyPrefix+"%d", + timeutil.Now().UnixNano()), []byte(diagURL.String())) }) if err != nil { log.Warningf(ctx, "failed to generate and write DistSQL diagram for job %d: %v", diff --git a/pkg/jobs/jobsprofiler/profilerconstants/BUILD.bazel b/pkg/jobs/jobsprofiler/profilerconstants/BUILD.bazel new file mode 100644 index 000000000000..291fbfb895e2 --- /dev/null +++ b/pkg/jobs/jobsprofiler/profilerconstants/BUILD.bazel @@ -0,0 +1,11 @@ +load("//build/bazelutil/unused_checker:unused.bzl", "get_x_data") +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "profilerconstants", + srcs = ["constants.go"], + importpath = "github.com/cockroachdb/cockroach/pkg/jobs/jobsprofiler/profilerconstants", + visibility = ["//visibility:public"], +) + +get_x_data(name = "get_x_data") diff --git a/pkg/jobs/jobsprofiler/profilerconstants/constants.go b/pkg/jobs/jobsprofiler/profilerconstants/constants.go new file mode 100644 index 000000000000..5be37466b682 --- /dev/null +++ b/pkg/jobs/jobsprofiler/profilerconstants/constants.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package profilerconstants + +// DSPDiagramInfoKeyPrefix is the prefix of the info key used for rows that +// store the DistSQL plan diagram for a job. +const DSPDiagramInfoKeyPrefix = "dsp-diag-url-" diff --git a/pkg/sql/BUILD.bazel b/pkg/sql/BUILD.bazel index 165f7b551f85..4b092f6db594 100644 --- a/pkg/sql/BUILD.bazel +++ b/pkg/sql/BUILD.bazel @@ -141,6 +141,7 @@ go_library( "job_exec_context.go", "job_exec_context_test_util.go", "jobs_collection.go", + "jobs_execution_details.go", "join.go", "join_predicate.go", "join_token.go", @@ -308,6 +309,7 @@ go_library( "//pkg/jobs", "//pkg/jobs/jobsauth", "//pkg/jobs/jobspb", + "//pkg/jobs/jobsprofiler/profilerconstants", "//pkg/keys", "//pkg/keyvisualizer", "//pkg/kv", @@ -635,6 +637,7 @@ go_test( "indexbackfiller_test.go", "instrumentation_test.go", "internal_test.go", + "jobs_execution_details_test.go", "join_token_test.go", "main_test.go", "materialized_view_test.go", @@ -712,6 +715,7 @@ go_test( deps = [ "//pkg/base", "//pkg/build/bazel", + "//pkg/cloud/impl:cloudimpl", "//pkg/clusterversion", "//pkg/col/coldata", "//pkg/config", @@ -720,6 +724,8 @@ go_test( "//pkg/internal/sqlsmith", "//pkg/jobs", "//pkg/jobs/jobspb", + "//pkg/jobs/jobsprofiler", + "//pkg/jobs/jobsprofiler/profilerconstants", "//pkg/jobs/jobstest", "//pkg/keys", "//pkg/keyvisualizer", diff --git a/pkg/sql/conn_executor.go b/pkg/sql/conn_executor.go index 80a85dbb5240..e466aad140b0 100644 --- a/pkg/sql/conn_executor.go +++ b/pkg/sql/conn_executor.go @@ -3398,6 +3398,7 @@ func (ex *connExecutor) initEvalCtx(ctx context.Context, evalCtx *extendedEvalCo QueryCancelKey: ex.queryCancelKey, DescIDGenerator: ex.getDescIDGenerator(), RangeStatsFetcher: p.execCfg.RangeStatsFetcher, + JobsProfiler: p, }, Tracing: &ex.sessionTracing, MemMetrics: &ex.memMetrics, diff --git a/pkg/sql/delegate/show_jobs.go b/pkg/sql/delegate/show_jobs.go index 950f8047b8bc..16002c063031 100644 --- a/pkg/sql/delegate/show_jobs.go +++ b/pkg/sql/delegate/show_jobs.go @@ -20,25 +20,25 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" ) -func (d *delegator) delegateShowJobs(n *tree.ShowJobs) (tree.Statement, error) { - if n.Schedules != nil { - // Limit the jobs displayed to the ones started by specified schedules. - return d.parse(fmt.Sprintf(` -SHOW JOBS SELECT id FROM system.jobs WHERE created_by_type='%s' and created_by_id IN (%s) -`, jobs.CreatedByScheduledJobs, n.Schedules.String()), - ) - } - - sqltelemetry.IncrementShowCounter(sqltelemetry.Jobs) - - const ( - selectClause = ` +func constructSelectQuery(n *tree.ShowJobs) string { + var baseQuery strings.Builder + baseQuery.WriteString(` SELECT job_id, job_type, description, statement, user_name, status, running_status, created, started, finished, modified, fraction_completed, error, coordinator_id, trace_id, last_run, next_run, num_runs, execution_errors - FROM crdb_internal.jobs` - ) +`) + + // Check if there are any SHOW JOBS options that we need to add columns for. + if n.Options != nil { + if n.Options.ExecutionDetails { + baseQuery.WriteString(`, NULLIF(crdb_internal.job_execution_details(job_id)->>'plan_diagram'::STRING, '') AS plan_diagram`) + } + } + + baseQuery.WriteString("\nFROM crdb_internal.jobs") + + // Now add the predicates and ORDER BY clauses. var typePredicate, whereClause, orderbyClause string if n.Jobs == nil { // Display all [only automatic] jobs without selecting specific jobs. @@ -74,10 +74,23 @@ SELECT job_id, job_type, description, statement, user_name, status, // Limit the jobs displayed to the select statement in n.Jobs. whereClause = fmt.Sprintf(`WHERE job_id in (%s)`, n.Jobs.String()) } + return fmt.Sprintf("%s %s %s", baseQuery.String(), whereClause, orderbyClause) +} + +func (d *delegator) delegateShowJobs(n *tree.ShowJobs) (tree.Statement, error) { + if n.Schedules != nil { + // Limit the jobs displayed to the ones started by specified schedules. + return d.parse(fmt.Sprintf(` +SHOW JOBS SELECT id FROM system.jobs WHERE created_by_type='%s' and created_by_id IN (%s) +`, jobs.CreatedByScheduledJobs, n.Schedules.String()), + ) + } + + sqltelemetry.IncrementShowCounter(sqltelemetry.Jobs) - sqlStmt := fmt.Sprintf("%s %s %s", selectClause, whereClause, orderbyClause) + stmt := constructSelectQuery(n) if n.Block { - sqlStmt = fmt.Sprintf( + stmt = fmt.Sprintf( ` WITH jobs AS (SELECT * FROM [%s]), sleep_and_restart_if_unfinished AS ( @@ -92,7 +105,7 @@ SELECT job_id, job_type, description, statement, user_name, status, ) SELECT * FROM jobs - WHERE NOT EXISTS(SELECT * FROM fail_if_slept_too_long)`, sqlStmt) + WHERE NOT EXISTS(SELECT * FROM fail_if_slept_too_long)`, stmt) } - return d.parse(sqlStmt) + return d.parse(stmt) } diff --git a/pkg/sql/jobs_execution_details.go b/pkg/sql/jobs_execution_details.go new file mode 100644 index 000000000000..43428458708d --- /dev/null +++ b/pkg/sql/jobs_execution_details.go @@ -0,0 +1,75 @@ +// Copyright 2023 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package sql + +import ( + "context" + gojson "encoding/json" + + "github.com/cockroachdb/cockroach/pkg/jobs" + "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/jobs/jobsprofiler/profilerconstants" + "github.com/cockroachdb/cockroach/pkg/sql/isql" + "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" +) + +// GenerateExecutionDetailsJSON implements the Profiler interface. +func (p *planner) GenerateExecutionDetailsJSON( + ctx context.Context, evalCtx *eval.Context, jobID jobspb.JobID, +) ([]byte, error) { + execCfg := evalCtx.Planner.ExecutorConfig().(*ExecutorConfig) + j, err := execCfg.JobRegistry.LoadJob(ctx, jobID) + if err != nil { + return nil, err + } + + var executionDetailsJSON []byte + payload := j.Payload() + switch payload.Type() { + // TODO(adityamaru): This allows different job types to implement custom + // execution detail aggregation. + default: + executionDetailsJSON, err = constructDefaultExecutionDetails(ctx, jobID, execCfg.InternalDB) + } + + return executionDetailsJSON, err +} + +// defaultExecutionDetails is a JSON serializable struct that captures the +// execution details that are not specific to a particular job type. +type defaultExecutionDetails struct { + // PlanDiagram is the URL to render the latest DistSQL plan that is being + // executed by the job. + PlanDiagram string `json:"plan_diagram"` +} + +func constructDefaultExecutionDetails( + ctx context.Context, jobID jobspb.JobID, db isql.DB, +) ([]byte, error) { + executionDetails := &defaultExecutionDetails{} + err := db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + // Populate the latest DSP diagram URL. + infoStorage := jobs.InfoStorageForJob(txn, jobID) + err := infoStorage.GetLast(ctx, profilerconstants.DSPDiagramInfoKeyPrefix, func(infoKey string, value []byte) error { + executionDetails.PlanDiagram = string(value) + return nil + }) + if err != nil { + return err + } + return nil + }) + if err != nil { + return nil, err + } + j, err := gojson.Marshal(executionDetails) + return j, err +} diff --git a/pkg/sql/jobs_execution_details_test.go b/pkg/sql/jobs_execution_details_test.go new file mode 100644 index 000000000000..5034e3c95e7b --- /dev/null +++ b/pkg/sql/jobs_execution_details_test.go @@ -0,0 +1,126 @@ +// Copyright 2023 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package sql_test + +import ( + "context" + "testing" + "time" + + "github.com/cockroachdb/cockroach/pkg/base" + _ "github.com/cockroachdb/cockroach/pkg/cloud/impl" // register cloud storage providers + "github.com/cockroachdb/cockroach/pkg/jobs" + "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/jobs/jobsprofiler" + "github.com/cockroachdb/cockroach/pkg/jobs/jobsprofiler/profilerconstants" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" + "github.com/cockroachdb/cockroach/pkg/sql/physicalplan" + "github.com/cockroachdb/cockroach/pkg/sql/tests" + "github.com/cockroachdb/cockroach/pkg/testutils" + "github.com/cockroachdb/cockroach/pkg/testutils/jobutils" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/uuid" + "github.com/cockroachdb/errors" + "github.com/stretchr/testify/require" +) + +// fakeExecResumer calls optional callbacks during the job lifecycle. +type fakeExecResumer struct { + OnResume func(context.Context) error + FailOrCancel func(context.Context) error +} + +var _ jobs.Resumer = fakeExecResumer{} + +func (d fakeExecResumer) Resume(ctx context.Context, execCtx interface{}) error { + if d.OnResume != nil { + if err := d.OnResume(ctx); err != nil { + return err + } + } + return nil +} + +func (d fakeExecResumer) OnFailOrCancel(ctx context.Context, _ interface{}, _ error) error { + if d.FailOrCancel != nil { + return d.FailOrCancel(ctx) + } + return nil +} + +// checkForPlanDiagram is a method used in tests to wait for the existence of a +// DSP diagram for the provided jobID. +func checkForPlanDiagram(ctx context.Context, t *testing.T, db isql.DB, jobID jobspb.JobID) { + testutils.SucceedsSoon(t, func() error { + return db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + infoStorage := jobs.InfoStorageForJob(txn, jobID) + var found bool + err := infoStorage.GetLast(ctx, profilerconstants.DSPDiagramInfoKeyPrefix, + func(infoKey string, value []byte) error { + found = true + return nil + }) + if err != nil || !found { + return errors.New("not found") + } + return nil + }) + }) +} + +// TestJobsExecutionDetails tests that a job's execution details are retrieved +// and rendered correctly. +func TestJobsExecutionDetails(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + // Timeout the test in a few minutes if it hasn't succeeded. + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, time.Minute*2) + defer cancel() + + params, _ := tests.CreateTestServerParams() + params.Knobs.JobsTestingKnobs = jobs.NewTestingKnobsWithShortIntervals() + defer jobs.ResetConstructors()() + s, sqlDB, _ := serverutils.StartServer(t, params) + defer s.Stopper().Stop(ctx) + + runner := sqlutils.MakeSQLRunner(sqlDB) + + jobs.RegisterConstructor(jobspb.TypeImport, func(j *jobs.Job, _ *cluster.Settings) jobs.Resumer { + return fakeExecResumer{ + OnResume: func(ctx context.Context) error { + p := sql.PhysicalPlan{} + infra := physicalplan.NewPhysicalInfrastructure(uuid.FastMakeV4(), base.SQLInstanceID(1)) + p.PhysicalInfrastructure = infra + jobsprofiler.StorePlanDiagram(ctx, s.Stopper(), &p, s.InternalDB().(isql.DB), j.ID()) + checkForPlanDiagram(ctx, t, s.InternalDB().(isql.DB), j.ID()) + return nil + }, + } + }, jobs.UsesTenantCostControl) + + runner.Exec(t, `CREATE TABLE t (id INT)`) + runner.Exec(t, `INSERT INTO t SELECT generate_series(1, 100)`) + var importJobID int + runner.QueryRow(t, `IMPORT INTO t CSV DATA ('nodelocal://1/foo') WITH DETACHED`).Scan(&importJobID) + jobutils.WaitForJobToSucceed(t, runner, jobspb.JobID(importJobID)) + + var count int + runner.QueryRow(t, `SELECT count(*) FROM [SHOW JOB $1 WITH EXECUTION DETAILS] WHERE plan_diagram IS NOT NULL`, importJobID).Scan(&count) + require.NotZero(t, count) + runner.CheckQueryResults(t, `SELECT count(*) FROM [SHOW JOBS WITH EXECUTION DETAILS] WHERE plan_diagram IS NOT NULL`, [][]string{{"1"}}) +} diff --git a/pkg/sql/parser/BUILD.bazel b/pkg/sql/parser/BUILD.bazel index 07c9f1518fd8..9e1ae39ca90f 100644 --- a/pkg/sql/parser/BUILD.bazel +++ b/pkg/sql/parser/BUILD.bazel @@ -71,7 +71,6 @@ go_test( "//pkg/testutils/datapathutils", "//pkg/testutils/sqlutils", "//pkg/util/leaktest", - "//pkg/util/log", "//pkg/util/randutil", "@com_github_cockroachdb_datadriven//:datadriven", "@com_github_cockroachdb_errors//:errors", diff --git a/pkg/sql/parser/parse_test.go b/pkg/sql/parser/parse_test.go index d1814617d06a..79d5de1c6de6 100644 --- a/pkg/sql/parser/parse_test.go +++ b/pkg/sql/parser/parse_test.go @@ -30,7 +30,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/datapathutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" - _ "github.com/cockroachdb/cockroach/pkg/util/log" // for flags "github.com/cockroachdb/cockroach/pkg/util/randutil" "github.com/cockroachdb/datadriven" "github.com/cockroachdb/errors" diff --git a/pkg/sql/parser/sql.y b/pkg/sql/parser/sql.y index 542f91eeb62c..d0264d99325f 100644 --- a/pkg/sql/parser/sql.y +++ b/pkg/sql/parser/sql.y @@ -701,6 +701,9 @@ func (u *sqlSymUnion) backupOptions() *tree.BackupOptions { func (u *sqlSymUnion) copyOptions() *tree.CopyOptions { return u.val.(*tree.CopyOptions) } +func (u *sqlSymUnion) showJobOptions() *tree.ShowJobOptions { + return u.val.(*tree.ShowJobOptions) +} func (u *sqlSymUnion) showBackupDetails() tree.ShowBackupDetails { return u.val.(tree.ShowBackupDetails) } @@ -1323,6 +1326,7 @@ func (u *sqlSymUnion) showCreateFormatOption() tree.ShowCreateFormatOption { %type <*tree.RestoreOptions> opt_with_restore_options restore_options restore_options_list %type <*tree.TenantReplicationOptions> opt_with_tenant_replication_options tenant_replication_options tenant_replication_options_list %type show_backup_details +%type <*tree.ShowJobOptions> show_job_options show_job_options_list %type <*tree.ShowBackupOptions> opt_with_show_backup_options show_backup_options show_backup_options_list show_backup_connection_options show_backup_connection_options_list %type <*tree.CopyOptions> opt_with_copy_options copy_options copy_options_list copy_generic_options copy_generic_options_list %type import_format @@ -7810,9 +7814,9 @@ statements_or_queries: // %Help: SHOW JOBS - list background jobs // %Category: Misc // %Text: -// SHOW [AUTOMATIC | CHANGEFEED] JOBS [select clause] +// SHOW [AUTOMATIC | CHANGEFEED] JOBS [select clause] [WITH EXECUTION DETAILS] // SHOW JOBS FOR SCHEDULES [select clause] -// SHOW [CHANGEFEED] JOB +// SHOW [CHANGEFEED] JOB [WITH EXECUTION DETAILS] // %SeeAlso: CANCEL JOBS, PAUSE JOBS, RESUME JOBS show_jobs_stmt: SHOW AUTOMATIC JOBS @@ -7821,7 +7825,16 @@ show_jobs_stmt: } | SHOW JOBS { - $$.val = &tree.ShowJobs{Automatic: false} + $$.val = &tree.ShowJobs{ + Automatic: false, + } + } +| SHOW JOBS WITH show_job_options_list + { + $$.val = &tree.ShowJobs{ + Automatic: false, + Options: $4.showJobOptions(), + } } | SHOW CHANGEFEED JOBS { @@ -7834,6 +7847,13 @@ show_jobs_stmt: { $$.val = &tree.ShowJobs{Jobs: $3.slct()} } +| SHOW JOBS select_stmt WITH show_job_options_list + { + $$.val = &tree.ShowJobs{ + Jobs: $3.slct(), + Options: $5.showJobOptions(), + } + } | SHOW JOBS WHEN COMPLETE select_stmt { $$.val = &tree.ShowJobs{Jobs: $5.slct(), Block: true} @@ -7855,6 +7875,15 @@ show_jobs_stmt: }, } } +| SHOW JOB a_expr WITH show_job_options_list + { + $$.val = &tree.ShowJobs{ + Jobs: &tree.Select{ + Select: &tree.ValuesClause{Rows: []tree.Exprs{tree.Exprs{$3.expr()}}}, + }, + Options: $5.showJobOptions(), + } + } | SHOW CHANGEFEED JOB a_expr { $$.val = &tree.ShowChangefeedJobs{ @@ -7875,6 +7904,29 @@ show_jobs_stmt: | SHOW JOB error // SHOW HELP: SHOW JOBS | SHOW CHANGEFEED JOB error // SHOW HELP: SHOW JOBS + +show_job_options_list: + // Require at least one option + show_job_options + { + $$.val = $1.showJobOptions() + } +| show_job_options_list ',' show_job_options + { + if err := $1.showJobOptions().CombineWith($3.showJobOptions()); err != nil { + return setErr(sqllex, err) + } + } + +// List of valid SHOW JOB options. +show_job_options: + EXECUTION DETAILS + { + $$.val = &tree.ShowJobOptions{ + ExecutionDetails: true, + } + } + // %Help: SHOW SCHEDULES - list periodic schedules // %Category: Misc // %Text: diff --git a/pkg/sql/parser/testdata/control_job b/pkg/sql/parser/testdata/control_job index f781b21d745a..fedf1aa6b545 100644 --- a/pkg/sql/parser/testdata/control_job +++ b/pkg/sql/parser/testdata/control_job @@ -159,6 +159,30 @@ SHOW JOBS SELECT (a) -- fully parenthesized SHOW JOBS SELECT a -- literals removed SHOW JOBS SELECT _ -- identifiers removed +parse +SHOW JOBS WITH EXECUTION DETAILS +---- +SHOW JOBS WITH EXECUTION DETAILS +SHOW JOBS WITH EXECUTION DETAILS -- fully parenthesized +SHOW JOBS WITH EXECUTION DETAILS -- literals removed +SHOW JOBS WITH EXECUTION DETAILS -- identifiers removed + +parse +SHOW JOB a WITH EXECUTION DETAILS +---- +SHOW JOBS VALUES (a) WITH EXECUTION DETAILS -- normalized! +SHOW JOBS VALUES ((a)) WITH EXECUTION DETAILS -- fully parenthesized +SHOW JOBS VALUES (a) WITH EXECUTION DETAILS -- literals removed +SHOW JOBS VALUES (_) WITH EXECUTION DETAILS -- identifiers removed + +parse +SHOW JOBS SELECT a WITH EXECUTION DETAILS +---- +SHOW JOBS SELECT a WITH EXECUTION DETAILS +SHOW JOBS SELECT (a) WITH EXECUTION DETAILS -- fully parenthesized +SHOW JOBS SELECT a WITH EXECUTION DETAILS -- literals removed +SHOW JOBS SELECT _ WITH EXECUTION DETAILS -- identifiers removed + parse EXPLAIN SHOW JOBS SELECT a ---- diff --git a/pkg/sql/parser/testdata/create_function b/pkg/sql/parser/testdata/create_function index 1abac0cca983..b299757093ea 100644 --- a/pkg/sql/parser/testdata/create_function +++ b/pkg/sql/parser/testdata/create_function @@ -217,41 +217,32 @@ HINT: try \h CREATE FUNCTION error CREATE OR REPLACE FUNCTION f(OUT a int = 7) RETURNS INT AS 'SELECT 1' LANGUAGE SQL ---- ----- at or near "out": syntax error: unimplemented: this syntax DETAIL: source SQL: CREATE OR REPLACE FUNCTION f(OUT a int = 7) RETURNS INT AS 'SELECT 1' LANGUAGE SQL ^ HINT: You have attempted to use a feature that is not yet implemented. See: https://go.crdb.dev/issue-v/100405/ ----- ----- error CREATE OR REPLACE FUNCTION f(INOUT a int = 7) RETURNS INT AS 'SELECT 1' LANGUAGE SQL ---- ----- at or near "inout": syntax error: unimplemented: this syntax DETAIL: source SQL: CREATE OR REPLACE FUNCTION f(INOUT a int = 7) RETURNS INT AS 'SELECT 1' LANGUAGE SQL ^ HINT: You have attempted to use a feature that is not yet implemented. See: https://go.crdb.dev/issue-v/100405/ ----- ----- error CREATE OR REPLACE FUNCTION f(IN OUT a int = 7) RETURNS INT AS 'SELECT 1' LANGUAGE SQL ---- ----- at or near "out": syntax error: unimplemented: this syntax DETAIL: source SQL: CREATE OR REPLACE FUNCTION f(IN OUT a int = 7) RETURNS INT AS 'SELECT 1' LANGUAGE SQL ^ HINT: You have attempted to use a feature that is not yet implemented. See: https://go.crdb.dev/issue-v/100405/ ----- ----- error CREATE OR REPLACE FUNCTION f(VARIADIC a int = 7) RETURNS INT AS 'SELECT 1' LANGUAGE SQL diff --git a/pkg/sql/planner.go b/pkg/sql/planner.go index 5523232ab859..2905d58b251e 100644 --- a/pkg/sql/planner.go +++ b/pkg/sql/planner.go @@ -440,6 +440,7 @@ func newInternalPlanner( p.extendedEvalCtx.Regions = p p.extendedEvalCtx.JoinTokenCreator = p p.extendedEvalCtx.Gossip = p + p.extendedEvalCtx.JobsProfiler = p p.extendedEvalCtx.ClusterID = execCfg.NodeInfo.LogicalClusterID() p.extendedEvalCtx.ClusterName = execCfg.RPCContext.ClusterName() p.extendedEvalCtx.NodeID = execCfg.NodeInfo.NodeID diff --git a/pkg/sql/sem/builtins/builtins.go b/pkg/sql/sem/builtins/builtins.go index 56f79395f01f..b45d1d336565 100644 --- a/pkg/sql/sem/builtins/builtins.go +++ b/pkg/sql/sem/builtins/builtins.go @@ -4100,6 +4100,29 @@ value if you rely on the HLC for accuracy.`, Volatility: volatility.Immutable, }), + "crdb_internal.job_execution_details": makeBuiltin( + tree.FunctionProperties{Category: builtinconstants.CategorySystemInfo}, + tree.Overload{ + Types: tree.ParamTypes{ + {Name: "job_id", Typ: types.Int}, + }, + ReturnType: tree.FixedReturnType(types.Jsonb), + Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { + if args[0] == tree.DNull { + return nil, pgerror.Newf(pgcode.NullValueNotAllowed, "argument cannot be NULL") + } + jobID := tree.MustBeDInt(args[0]) + json, err := evalCtx.JobsProfiler.GenerateExecutionDetailsJSON(ctx, evalCtx, jobspb.JobID(jobID)) + if err != nil { + return nil, err + } + return tree.ParseDJSON(string(json)) + }, + Info: "Output a JSONB version of the specified job's execution details. The execution details are collected" + + "and persisted during the lifetime of the job and provide more observability into the job's execution", + Volatility: volatility.Volatile, + }), + "crdb_internal.read_file": makeBuiltin( tree.FunctionProperties{Category: builtinconstants.CategorySystemInfo}, tree.Overload{ diff --git a/pkg/sql/sem/builtins/fixed_oids.go b/pkg/sql/sem/builtins/fixed_oids.go index 1eb69d8c7863..dec79341f5c8 100644 --- a/pkg/sql/sem/builtins/fixed_oids.go +++ b/pkg/sql/sem/builtins/fixed_oids.go @@ -2378,6 +2378,7 @@ var builtinOidsArray = []string{ 2405: `ts_rank(weights: float[], vector: tsvector, query: tsquery) -> float4`, 2406: `crdb_internal.fingerprint(span: bytes[], stripped: bool) -> int`, 2407: `crdb_internal.tenant_span() -> bytes[]`, + 2408: `crdb_internal.job_execution_details(job_id: int) -> jsonb`, } var builtinOidsBySignature map[string]oid.Oid diff --git a/pkg/sql/sem/eval/context.go b/pkg/sql/sem/eval/context.go index d159df105d1c..60fca2cdd3a8 100644 --- a/pkg/sql/sem/eval/context.go +++ b/pkg/sql/sem/eval/context.go @@ -263,6 +263,18 @@ type Context struct { // database which owns a table accessed by the current SQL request. // This slice is only populated during the optbuild stage. RemoteRegions catpb.RegionNames + + // JobsProfiler is the interface for builtins to extract job specific + // execution details that may have been aggregated during a job's lifetime. + JobsProfiler JobsProfiler +} + +// JobsProfiler is the interface used to fetch job specific execution details +// that may have been aggregated during a job's lifetime. +type JobsProfiler interface { + // GenerateExecutionDetailsJSON generates a JSON blob of the job specific + // execution details. + GenerateExecutionDetailsJSON(ctx context.Context, evalCtx *Context, jobID jobspb.JobID) ([]byte, error) } // DescIDGenerator generates unique descriptor IDs. diff --git a/pkg/sql/sem/tree/show.go b/pkg/sql/sem/tree/show.go index abbd5ae569d5..fcf222faf16a 100644 --- a/pkg/sql/sem/tree/show.go +++ b/pkg/sql/sem/tree/show.go @@ -495,6 +495,9 @@ type ShowJobs struct { // If non-nil, only display jobs started by the specified // schedules. Schedules *Select + + // Options contain any options that were specified in the `SHOW JOB` query. + Options *ShowJobOptions } // Format implements the NodeFormatter interface. @@ -515,8 +518,33 @@ func (node *ShowJobs) Format(ctx *FmtCtx) { ctx.WriteString(" FOR SCHEDULES ") ctx.FormatNode(node.Schedules) } + if node.Options != nil { + ctx.WriteString(" WITH") + ctx.FormatNode(node.Options) + } +} + +// ShowJobOptions describes options for the SHOW JOB execution. +type ShowJobOptions struct { + // ExecutionDetails, if true, will render job specific details about the job's + // execution. These details will provide improved observability into the + // execution of the job. + ExecutionDetails bool } +func (s *ShowJobOptions) Format(ctx *FmtCtx) { + if s.ExecutionDetails { + ctx.WriteString(" EXECUTION DETAILS") + } +} + +func (s *ShowJobOptions) CombineWith(other *ShowJobOptions) error { + s.ExecutionDetails = other.ExecutionDetails + return nil +} + +var _ NodeFormatter = &ShowJobOptions{} + // ShowChangefeedJobs represents a SHOW CHANGEFEED JOBS statement type ShowChangefeedJobs struct { // If non-nil, a select statement that provides the job ids to be shown. diff --git a/pkg/sql/show_test.go b/pkg/sql/show_test.go index 8396403325bf..242c9189cdc9 100644 --- a/pkg/sql/show_test.go +++ b/pkg/sql/show_test.go @@ -1158,6 +1158,7 @@ func TestLintClusterSettingNames(t *testing.T) { "sql.defaults.lock_timeout": `sql.defaults.lock_timeout: use ".timeout" instead of "_timeout"`, "sql.defaults.idle_in_session_timeout": `sql.defaults.idle_in_session_timeout: use ".timeout" instead of "_timeout"`, "sql.defaults.idle_in_transaction_session_timeout": `sql.defaults.idle_in_transaction_session_timeout: use ".timeout" instead of "_timeout"`, + "cloudstorage.gs.chunking.retry_timeout": `cloudstorage.gs.chunking.retry_timeout: use ".timeout" instead of "_timeout"`, } expectedErr, found := grandFathered[varName] if !found || expectedErr != nameErr.Error() {