From 9936310686e3ec3f6c03ed0a9fc940511b6ffcaf Mon Sep 17 00:00:00 2001 From: Andrew Werner Date: Wed, 16 Nov 2022 14:30:41 -0500 Subject: [PATCH] *,isql: introduce isql.Txn and isql.DB This massive refactor works to bind the `*kv.Txn` with the internal executor and other sql-layer, txn-associated state. This work follows on from an earlier project to more tightly couple internal executors to the rest of `extraTxnState`. That project resulted in sprawling changes to propagated the paired dependencies through the system. In practice, we're better off coupling them through an object. There are some refactors added in here to curry and hide some of these dependencies from interfaces. Those may be possible to extract to be separate. Additionally, not all of the dependency sprawl has been eliminated; there are cases where we could pass a `isql.Txn` but instead keep passing the underlying `isql.Executor` and `*kv.Txn`. We can do more cleanup along the way. Lastly, I couldn't help myself from lifting some `sql.ExecCfg` arguments up and being more specific in some places. Epic: none Release note: None --- pkg/BUILD.bazel | 4 +- pkg/ccl/backupccl/BUILD.bazel | 4 +- pkg/ccl/backupccl/alter_backup_planning.go | 5 +- pkg/ccl/backupccl/alter_backup_schedule.go | 25 +- pkg/ccl/backupccl/backup_job.go | 89 ++-- pkg/ccl/backupccl/backup_metadata_test.go | 8 +- pkg/ccl/backupccl/backup_planning.go | 67 +-- pkg/ccl/backupccl/backup_planning_tenant.go | 15 +- pkg/ccl/backupccl/backup_processor.go | 2 +- pkg/ccl/backupccl/backup_test.go | 80 ++-- .../backupccl/backupencryption/BUILD.bazel | 3 +- .../backupccl/backupencryption/encryption.go | 29 +- pkg/ccl/backupccl/backupresolver/BUILD.bazel | 2 +- pkg/ccl/backupccl/backupresolver/targets.go | 8 +- pkg/ccl/backupccl/create_scheduled_backup.go | 27 +- .../backupccl/create_scheduled_backup_test.go | 37 +- pkg/ccl/backupccl/restore_data_processor.go | 2 +- .../backupccl/restore_data_processor_test.go | 7 +- pkg/ccl/backupccl/restore_job.go | 238 ++++++----- .../backupccl/restore_old_versions_test.go | 72 ++-- pkg/ccl/backupccl/restore_planning.go | 120 +++--- .../restore_schema_change_creation.go | 6 +- pkg/ccl/backupccl/schedule_exec.go | 65 ++- pkg/ccl/backupccl/schedule_pts_chaining.go | 70 ++-- .../backupccl/schedule_pts_chaining_test.go | 73 ++-- pkg/ccl/backupccl/show.go | 8 +- .../backupccl/split_and_scatter_processor.go | 2 +- .../split_and_scatter_processor_test.go | 4 +- pkg/ccl/backupccl/system_schema.go | 161 ++++---- pkg/ccl/changefeedccl/BUILD.bazel | 5 +- .../changefeedccl/alter_changefeed_stmt.go | 18 +- pkg/ccl/changefeedccl/authorization.go | 2 +- pkg/ccl/changefeedccl/cdceval/BUILD.bazel | 2 +- pkg/ccl/changefeedccl/cdceval/plan.go | 37 +- .../changefeedccl/cdcevent/rowfetcher_test.go | 2 +- pkg/ccl/changefeedccl/cdctest/BUILD.bazel | 2 +- pkg/ccl/changefeedccl/cdctest/row.go | 6 +- pkg/ccl/changefeedccl/changefeed_dist.go | 7 +- .../changefeedccl/changefeed_processors.go | 18 +- pkg/ccl/changefeedccl/changefeed_stmt.go | 44 +- pkg/ccl/changefeedccl/changefeed_test.go | 27 +- pkg/ccl/changefeedccl/scheduled_changefeed.go | 26 +- .../scheduled_changefeed_test.go | 6 +- .../changefeedccl/schemafeed/schema_feed.go | 35 +- pkg/ccl/changefeedccl/sink.go | 6 +- .../changefeedccl/sink_cloudstorage_test.go | 4 +- .../changefeedccl/sink_external_connection.go | 10 +- pkg/ccl/changefeedccl/testfeed_test.go | 4 +- .../jobsccl/jobsprotectedtsccl/BUILD.bazel | 2 +- .../jobs_protected_ts_test.go | 37 +- pkg/ccl/multiregionccl/BUILD.bazel | 1 + pkg/ccl/multiregionccl/datadriven_test.go | 6 +- pkg/ccl/multiregionccl/region_util_test.go | 22 +- .../tenantcostserver/BUILD.bazel | 6 +- .../tenantcostserver/configure.go | 18 +- .../multitenantccl/tenantcostserver/server.go | 11 +- .../tenantcostserver/server_test.go | 33 +- .../tenantcostserver/system_table.go | 72 ++-- .../tenantcostserver/token_bucket.go | 26 +- pkg/ccl/serverccl/role_authentication_test.go | 8 +- .../spanconfigsqltranslatorccl/BUILD.bazel | 2 - .../datadriven_test.go | 18 +- pkg/ccl/storageccl/BUILD.bazel | 3 +- .../storageccl/external_sst_reader_test.go | 7 +- .../partitioned_stream_client_test.go | 2 +- pkg/ccl/streamingccl/streamingest/BUILD.bazel | 4 +- .../streamingest/alter_replication_job.go | 25 +- .../replication_stream_e2e_test.go | 11 +- .../streamingest/stream_ingest_manager.go | 19 +- .../stream_ingestion_frontier_processor.go | 10 +- ...tream_ingestion_frontier_processor_test.go | 4 +- .../streamingest/stream_ingestion_job.go | 112 ++--- .../streamingest/stream_ingestion_job_test.go | 10 +- .../streamingest/stream_ingestion_planning.go | 17 +- .../stream_ingestion_processor.go | 4 +- .../stream_ingestion_processor_test.go | 22 +- .../streamingccl/streamproducer/BUILD.bazel | 3 + .../streamproducer/producer_job.go | 8 +- .../streamproducer/producer_job_test.go | 23 +- .../streamproducer/replication_manager.go | 6 +- .../replication_manager_test.go | 11 +- .../streamproducer/replication_stream_test.go | 7 +- .../streamproducer/stream_lifetime.go | 102 +++-- pkg/ccl/testccl/sqlccl/BUILD.bazel | 2 +- pkg/ccl/testccl/sqlccl/tenant_gc_test.go | 62 +-- pkg/ccl/workloadccl/storage.go | 4 +- pkg/cli/BUILD.bazel | 1 + pkg/cli/debug_job_trace_test.go | 6 +- pkg/cli/democluster/demo_cluster.go | 4 +- pkg/cloud/BUILD.bazel | 3 +- pkg/cloud/amazon/s3_storage_test.go | 91 ++--- pkg/cloud/azure/azure_storage_test.go | 11 +- pkg/cloud/cloudtestutils/BUILD.bazel | 3 +- .../cloudtestutils/cloud_test_helpers.go | 54 +-- pkg/cloud/external_storage.go | 19 +- pkg/cloud/externalconn/BUILD.bazel | 3 +- pkg/cloud/externalconn/connection_kms.go | 6 +- pkg/cloud/externalconn/connection_storage.go | 7 +- pkg/cloud/externalconn/record.go | 11 +- pkg/cloud/externalconn/utils/BUILD.bazel | 3 +- .../externalconn/utils/connection_utils.go | 12 +- pkg/cloud/gcp/gcs_storage_test.go | 76 +--- pkg/cloud/httpsink/http_storage_test.go | 24 +- pkg/cloud/impl_registry.go | 29 +- pkg/cloud/kms.go | 6 +- pkg/cloud/kms_test_utils.go | 13 +- pkg/cloud/nodelocal/nodelocal_storage_test.go | 11 +- pkg/cloud/nullsink/nullsink_storage_test.go | 4 +- pkg/cloud/userfile/BUILD.bazel | 4 +- pkg/cloud/userfile/file_table_storage.go | 14 +- pkg/cloud/userfile/file_table_storage_test.go | 29 +- pkg/cloud/userfile/filetable/BUILD.bazel | 4 +- .../filetable/file_table_read_writer.go | 110 +++-- .../filetable/filetabletest/BUILD.bazel | 3 +- .../file_table_read_writer_test.go | 27 +- pkg/jobs/BUILD.bazel | 6 +- pkg/jobs/adopt.go | 44 +- pkg/jobs/delegate_control_test.go | 16 +- pkg/jobs/executor_impl.go | 22 +- pkg/jobs/executor_impl_test.go | 11 +- pkg/jobs/helpers_test.go | 43 +- pkg/jobs/job_scheduler.go | 57 ++- pkg/jobs/job_scheduler_test.go | 142 +++---- pkg/jobs/jobs.go | 279 ++++++------- pkg/jobs/jobs_test.go | 215 +++++----- pkg/jobs/jobsprotectedts/BUILD.bazel | 3 +- pkg/jobs/jobsprotectedts/jobs_protected_ts.go | 16 +- .../jobs_protected_ts_manager.go | 21 +- pkg/jobs/progress.go | 2 +- pkg/jobs/registry.go | 317 +++++++------- pkg/jobs/registry_external_test.go | 17 +- pkg/jobs/registry_test.go | 136 +++--- pkg/jobs/scheduled_job.go | 192 ++++++--- pkg/jobs/scheduled_job_executor.go | 32 +- pkg/jobs/scheduled_job_executor_test.go | 36 +- pkg/jobs/scheduled_job_test.go | 20 +- pkg/jobs/test_helpers.go | 6 +- pkg/jobs/testutils_test.go | 18 +- pkg/jobs/update.go | 386 ++++++++++-------- pkg/jobs/utils.go | 10 +- pkg/jobs/wait.go | 33 +- pkg/kv/db.go | 6 +- pkg/kv/kvclient/rangefeed/db_adapter.go | 1 + pkg/kv/kvserver/BUILD.bazel | 2 +- pkg/kv/kvserver/client_protectedts_test.go | 23 +- pkg/kv/kvserver/client_replica_test.go | 10 +- pkg/kv/kvserver/client_spanconfigs_test.go | 6 +- pkg/kv/kvserver/protectedts/BUILD.bazel | 2 +- pkg/kv/kvserver/protectedts/protectedts.go | 22 +- .../kvserver/protectedts/ptcache/BUILD.bazel | 5 +- pkg/kv/kvserver/protectedts/ptcache/cache.go | 19 +- .../protectedts/ptcache/cache_test.go | 202 ++++----- .../protectedts/ptprovider/BUILD.bazel | 3 +- .../protectedts/ptprovider/provider.go | 14 +- .../protectedts/ptreconcile/BUILD.bazel | 5 +- .../protectedts/ptreconcile/reconciler.go | 18 +- .../ptreconcile/reconciler_test.go | 20 +- .../protectedts/ptstorage/BUILD.bazel | 6 +- .../kvserver/protectedts/ptstorage/storage.go | 328 +++++++-------- .../protectedts/ptstorage/storage_test.go | 298 ++++++-------- .../ptstorage/storage_with_database.go | 101 ++--- pkg/kv/kvserver/rangelog/BUILD.bazel | 2 +- .../rangelog/internal_executor_writer_test.go | 10 +- pkg/kv/kvserver/rangelog/rangelog.go | 2 +- pkg/kv/kvserver/rangelog/rangelog_test.go | 2 +- pkg/kv/kvserver/reports/BUILD.bazel | 4 +- .../reports/constraint_stats_report.go | 14 +- .../reports/constraint_stats_report_test.go | 4 +- .../reports/critical_localities_report.go | 14 +- .../critical_localities_report_test.go | 8 +- .../reports/replication_stats_report.go | 14 +- .../reports/replication_stats_report_test.go | 4 +- pkg/kv/kvserver/reports/reporter.go | 8 +- pkg/multitenant/BUILD.bazel | 3 +- pkg/multitenant/tenant_usage.go | 6 +- pkg/repstream/BUILD.bazel | 2 +- pkg/repstream/api.go | 10 +- pkg/scheduledjobs/BUILD.bazel | 2 +- pkg/scheduledjobs/env.go | 26 +- pkg/scheduledjobs/schedulebase/BUILD.bazel | 2 +- pkg/scheduledjobs/schedulebase/util.go | 21 +- pkg/server/BUILD.bazel | 3 +- pkg/server/admin.go | 38 +- pkg/server/api_v2_sql.go | 40 +- pkg/server/authentication.go | 2 - pkg/server/external_storage_builder.go | 26 +- pkg/server/index_usage_stats.go | 11 +- pkg/server/loss_of_quorum.go | 4 +- pkg/server/node.go | 9 +- pkg/server/server.go | 37 +- .../server_internal_executor_factory_test.go | 4 +- pkg/server/server_sql.go | 212 +++++----- pkg/server/status.go | 12 +- pkg/server/tenant.go | 27 +- pkg/server/testserver.go | 19 +- pkg/server/tracedumper/BUILD.bazel | 4 +- pkg/server/tracedumper/tracedumper.go | 6 +- pkg/server/tracedumper/tracedumper_test.go | 4 +- pkg/spanconfig/spanconfig.go | 12 +- pkg/spanconfig/spanconfigjob/BUILD.bazel | 2 +- pkg/spanconfig/spanconfigjob/job.go | 6 +- .../spanconfigkvaccessor/BUILD.bazel | 4 +- .../spanconfigkvaccessor/kvaccessor.go | 8 +- .../spanconfigkvaccessor/kvaccessor_test.go | 10 +- .../spanconfigkvsubscriber/BUILD.bazel | 2 +- .../spanconfigkvsubscriber/datadriven_test.go | 4 +- pkg/spanconfig/spanconfiglimiter/BUILD.bazel | 2 +- pkg/spanconfig/spanconfiglimiter/limiter.go | 8 +- pkg/spanconfig/spanconfigmanager/BUILD.bazel | 5 +- pkg/spanconfig/spanconfigmanager/manager.go | 16 +- .../spanconfigmanager/manager_test.go | 17 +- .../spanconfigreconciler/BUILD.bazel | 2 +- .../spanconfigreconciler/reconciler.go | 31 +- .../spanconfigsqltranslator/BUILD.bazel | 2 - .../spanconfigsqltranslator/sqltranslator.go | 134 +++--- .../spanconfigsqlwatcher/BUILD.bazel | 3 +- .../protectedtsdecoder_test.go | 20 +- .../spanconfigtestcluster/BUILD.bazel | 2 +- .../spanconfigtestcluster/tenant_state.go | 47 +-- pkg/sql/BUILD.bazel | 4 +- pkg/sql/alter_database.go | 16 +- pkg/sql/alter_index.go | 2 +- pkg/sql/alter_role.go | 13 +- pkg/sql/alter_table.go | 56 ++- pkg/sql/alter_table_locality.go | 6 +- pkg/sql/authorization.go | 40 +- pkg/sql/authorization_test.go | 29 +- pkg/sql/backfill.go | 273 +++++++------ pkg/sql/backfill/BUILD.bazel | 1 + pkg/sql/backfill/backfill.go | 9 +- pkg/sql/backfill/mvcc_index_merger.go | 121 +++--- pkg/sql/catalog/descidgen/generate_id.go | 53 ++- pkg/sql/catalog/descs/BUILD.bazel | 5 +- pkg/sql/catalog/descs/collection.go | 24 +- pkg/sql/catalog/descs/collection_test.go | 203 ++++----- pkg/sql/catalog/descs/factory.go | 38 +- pkg/sql/catalog/descs/system_table.go | 21 +- pkg/sql/catalog/descs/txn.go | 17 +- pkg/sql/catalog/descs/txn_external_test.go | 21 +- .../txn_with_executor_datadriven_test.go | 16 +- pkg/sql/catalog/lease/BUILD.bazel | 4 +- pkg/sql/catalog/lease/count.go | 4 +- pkg/sql/catalog/lease/helpers_test.go | 2 +- pkg/sql/catalog/lease/ie_writer_test.go | 6 +- pkg/sql/catalog/lease/kv_writer_test.go | 4 +- pkg/sql/catalog/lease/lease.go | 49 ++- pkg/sql/catalog/lease/lease_test.go | 78 ++-- pkg/sql/catalog/lease/storage.go | 21 +- pkg/sql/catalog/resolver/BUILD.bazel | 2 +- pkg/sql/catalog/resolver/resolver_test.go | 20 +- pkg/sql/catalog/schematelemetry/BUILD.bazel | 3 +- .../schematelemetry/scheduled_job_executor.go | 19 +- .../schematelemetry/schema_telemetry_event.go | 8 +- .../schematelemetrycontroller/BUILD.bazel | 3 +- .../schematelemetrycontroller/controller.go | 46 +-- pkg/sql/check.go | 56 +-- pkg/sql/check_test.go | 19 +- pkg/sql/compact_sql_stats.go | 75 ++-- pkg/sql/conn_executor.go | 48 ++- pkg/sql/conn_executor_exec.go | 58 ++- pkg/sql/conn_executor_internal_test.go | 2 +- pkg/sql/conn_executor_prepare.go | 2 +- pkg/sql/control_jobs.go | 10 +- pkg/sql/control_schedules.go | 50 +-- pkg/sql/crdb_internal.go | 32 +- pkg/sql/crdb_internal_test.go | 10 +- pkg/sql/create_external_connection.go | 37 +- pkg/sql/create_function_test.go | 29 +- pkg/sql/create_index.go | 2 +- pkg/sql/create_role.go | 15 +- pkg/sql/create_schema.go | 17 +- pkg/sql/create_stats.go | 42 +- pkg/sql/create_table.go | 15 +- pkg/sql/create_view.go | 2 +- pkg/sql/database_region_change_finalizer.go | 37 +- pkg/sql/database_test.go | 6 +- pkg/sql/delete_preserving_index_test.go | 15 +- pkg/sql/descmetadata/BUILD.bazel | 3 +- pkg/sql/descmetadata/metadata_updater.go | 24 +- pkg/sql/discard.go | 40 +- pkg/sql/distsql/server.go | 2 +- pkg/sql/drop_database.go | 3 +- pkg/sql/drop_external_connection.go | 4 +- pkg/sql/drop_function_test.go | 43 +- pkg/sql/drop_role.go | 15 +- pkg/sql/drop_schema.go | 2 +- pkg/sql/drop_table.go | 39 +- pkg/sql/drop_test.go | 21 +- pkg/sql/event_log.go | 50 +-- pkg/sql/exec_util.go | 23 +- pkg/sql/execinfra/BUILD.bazel | 1 - pkg/sql/execinfra/server_config.go | 13 +- pkg/sql/execstats/traceanalyzer_test.go | 2 +- pkg/sql/function_resolver_test.go | 14 +- pkg/sql/gcjob/BUILD.bazel | 1 + pkg/sql/gcjob/gc_job.go | 5 +- pkg/sql/gcjob/gc_job_utils.go | 14 +- pkg/sql/gcjob/index_garbage_collection.go | 9 +- pkg/sql/gcjob/refresh_statuses.go | 10 +- pkg/sql/gcjob/table_garbage_collection.go | 9 +- pkg/sql/gcjob/tenant_garbage_collection.go | 9 +- pkg/sql/gcjob_test/BUILD.bazel | 2 +- pkg/sql/gcjob_test/gc_job_test.go | 78 ++-- pkg/sql/grant_revoke_system.go | 19 +- pkg/sql/grant_role.go | 105 +++-- pkg/sql/importer/BUILD.bazel | 3 +- pkg/sql/importer/exportparquet_test.go | 11 +- pkg/sql/importer/import_job.go | 137 ++++--- pkg/sql/importer/import_planning.go | 35 +- pkg/sql/importer/import_processor.go | 4 +- pkg/sql/importer/import_processor_planning.go | 62 +-- pkg/sql/importer/import_processor_test.go | 31 +- pkg/sql/importer/import_stmt_test.go | 34 +- .../importer/read_import_avro_logical_test.go | 2 +- pkg/sql/importer/read_import_base.go | 2 +- pkg/sql/importer/read_import_pgdump.go | 19 +- pkg/sql/index_backfiller.go | 5 +- pkg/sql/indexbackfiller_test.go | 15 +- pkg/sql/information_schema.go | 55 +-- pkg/sql/instrumentation.go | 5 +- pkg/sql/internal.go | 261 +++++++----- pkg/sql/internal_test.go | 4 +- pkg/sql/{sqlutil => isql}/BUILD.bazel | 11 +- pkg/sql/isql/doc.go | 13 + .../internal_executor.go => isql/isql_db.go} | 117 +++--- pkg/sql/isql/options.go | 113 +++++ pkg/sql/join_token.go | 17 +- .../testdata/logic_test/drop_database | 5 +- .../logictest/testdata/logic_test/drop_table | 2 +- pkg/sql/logictest/testdata/logic_test/jobs | 52 +-- pkg/sql/mvcc_backfiller.go | 15 +- pkg/sql/opt_catalog.go | 2 +- pkg/sql/opt_exec_factory.go | 2 +- pkg/sql/pg_catalog.go | 13 +- pkg/sql/pgwire/BUILD.bazel | 2 +- pkg/sql/pgwire/auth.go | 4 - pkg/sql/pgwire/conn_test.go | 4 +- pkg/sql/pgwire/server.go | 1 - pkg/sql/planhook.go | 2 + pkg/sql/planner.go | 97 +++-- pkg/sql/privileged_accessor.go | 2 +- pkg/sql/reassign_owned_by.go | 4 +- pkg/sql/region_util.go | 19 +- pkg/sql/resolve_oid.go | 12 +- pkg/sql/revoke_role.go | 2 +- pkg/sql/row/BUILD.bazel | 2 + pkg/sql/row/expr_walker.go | 15 +- pkg/sql/row/expr_walker_test.go | 7 +- pkg/sql/rowexec/BUILD.bazel | 3 +- pkg/sql/rowexec/backfiller.go | 10 +- pkg/sql/rowexec/backfiller_test.go | 21 +- pkg/sql/rowexec/bulk_row_writer.go | 4 +- pkg/sql/rowexec/columnbackfiller.go | 13 +- pkg/sql/rowexec/indexbackfiller.go | 10 +- pkg/sql/rowexec/sample_aggregator.go | 13 +- pkg/sql/rowexec/sample_aggregator_test.go | 11 +- pkg/sql/rowexec/tablereader.go | 2 +- pkg/sql/save_table.go | 6 +- pkg/sql/scheduledlogging/BUILD.bazel | 3 +- .../captured_index_usage_stats.go | 21 +- pkg/sql/schema_change_plan_node.go | 34 +- pkg/sql/schema_changer.go | 228 +++++------ pkg/sql/schema_changer_test.go | 16 +- pkg/sql/schemachanger/scbackup/BUILD.bazel | 3 +- pkg/sql/schemachanger/scbackup/job.go | 11 +- .../alter_table_alter_primary_key.go | 6 +- pkg/sql/schemachanger/scdeps/BUILD.bazel | 2 +- pkg/sql/schemachanger/scdeps/build_deps.go | 29 +- pkg/sql/schemachanger/scdeps/exec_deps.go | 33 +- pkg/sql/schemachanger/scdeps/run_deps.go | 77 ++-- .../scdeps/sctestutils/sctestutils.go | 6 +- pkg/sql/schemachanger/scdeps/validator.go | 14 +- pkg/sql/schemachanger/scexec/BUILD.bazel | 2 +- .../scexec/backfiller/BUILD.bazel | 2 +- .../scexec/backfiller/tracker.go | 10 +- .../scexec/executor_external_test.go | 67 ++- pkg/sql/schemachanger/scjob/BUILD.bazel | 1 + pkg/sql/schemachanger/scjob/job.go | 14 +- pkg/sql/scrub_constraint.go | 2 +- pkg/sql/scrub_fk.go | 4 +- pkg/sql/scrub_index.go | 2 +- pkg/sql/scrub_unique_constraint.go | 2 +- pkg/sql/sem/eval/deps.go | 2 +- pkg/sql/sessiondata/internal.go | 2 +- pkg/sql/sessioninit/BUILD.bazel | 1 - pkg/sql/sessioninit/cache.go | 35 +- pkg/sql/sessioninit/cache_test.go | 87 ++-- pkg/sql/set_cluster_setting.go | 73 ++-- pkg/sql/set_zone_config.go | 9 +- pkg/sql/show_cluster_setting.go | 8 +- pkg/sql/show_create_clauses.go | 4 +- pkg/sql/show_create_external_connection.go | 7 +- pkg/sql/show_create_schedule.go | 15 +- pkg/sql/show_create_table_test.go | 22 +- pkg/sql/show_fingerprints.go | 2 +- pkg/sql/show_histogram.go | 2 +- pkg/sql/show_stats.go | 2 +- pkg/sql/show_tenant.go | 10 +- pkg/sql/sql_cursor.go | 24 +- .../instancestorage/instancestorage.go | 10 +- .../instancestorage/instancestorage_test.go | 4 +- .../sqlstats/persistedsqlstats/BUILD.bazel | 5 +- .../persistedsqlstats/combined_iterator.go | 10 +- .../persistedsqlstats/compaction_exec.go | 14 +- .../compaction_scheduling.go | 17 +- .../persistedsqlstats/compaction_test.go | 13 +- .../sqlstats/persistedsqlstats/controller.go | 28 +- pkg/sql/sqlstats/persistedsqlstats/flush.go | 62 +-- .../sqlstats/persistedsqlstats/provider.go | 11 +- .../scheduled_job_monitor.go | 25 +- .../scheduled_sql_stats_compaction_test.go | 12 +- .../sqlstats/persistedsqlstats/stmt_reader.go | 8 +- .../sqlstats/persistedsqlstats/txn_reader.go | 16 +- pkg/sql/sqlstats/sslocal/BUILD.bazel | 2 - pkg/sql/sqlstats/sslocal/sslocal_provider.go | 6 +- pkg/sql/stats/BUILD.bazel | 5 +- pkg/sql/stats/automatic_stats.go | 13 +- pkg/sql/stats/automatic_stats_test.go | 58 +-- pkg/sql/stats/delete_stats.go | 20 +- pkg/sql/stats/delete_stats_test.go | 31 +- pkg/sql/stats/new_stat.go | 21 +- pkg/sql/stats/stats_cache.go | 37 +- pkg/sql/stats/stats_cache_test.go | 58 +-- pkg/sql/stmtdiagnostics/BUILD.bazel | 3 +- .../stmtdiagnostics/statement_diagnostics.go | 37 +- pkg/sql/syntheticprivilegecache/BUILD.bazel | 2 +- pkg/sql/syntheticprivilegecache/cache.go | 28 +- pkg/sql/table.go | 4 +- pkg/sql/table_test.go | 6 +- pkg/sql/temporary_schema.go | 157 ++++--- pkg/sql/temporary_schema_test.go | 20 +- pkg/sql/tenant_accessors.go | 27 +- pkg/sql/tenant_creation.go | 50 ++- pkg/sql/tenant_deletion.go | 43 +- pkg/sql/tenant_gc.go | 31 +- pkg/sql/tenant_settings.go | 6 +- pkg/sql/tenant_spec.go | 4 +- pkg/sql/tenant_test.go | 16 +- pkg/sql/tenant_update.go | 42 +- pkg/sql/truncate.go | 5 +- pkg/sql/ttl/ttljob/BUILD.bazel | 3 +- pkg/sql/ttl/ttljob/ttljob.go | 3 +- pkg/sql/ttl/ttljob/ttljob_metrics.go | 2 +- pkg/sql/ttl/ttljob/ttljob_processor.go | 25 +- pkg/sql/ttl/ttljob/ttljob_query_builder.go | 13 +- pkg/sql/ttl/ttlschedule/BUILD.bazel | 2 +- pkg/sql/ttl/ttlschedule/ttlschedule.go | 38 +- pkg/sql/type_change.go | 89 ++-- pkg/sql/unsplit.go | 2 +- pkg/sql/user.go | 58 +-- pkg/sql/zone_config.go | 7 +- pkg/sql/zone_config_test.go | 9 +- pkg/testutils/serverutils/test_server_shim.go | 6 +- pkg/upgrade/BUILD.bazel | 2 +- pkg/upgrade/migrationstable/BUILD.bazel | 2 +- .../migrationstable/migrations_table.go | 14 +- pkg/upgrade/system_upgrade.go | 12 +- pkg/upgrade/tenant_upgrade.go | 20 +- pkg/upgrade/upgradejob/upgrade_job.go | 29 +- pkg/upgrade/upgrademanager/BUILD.bazel | 5 +- pkg/upgrade/upgrademanager/manager.go | 52 ++- .../upgrademanager/manager_external_test.go | 12 +- pkg/upgrade/upgrades/BUILD.bazel | 3 +- .../upgrades/alter_jobs_add_job_type.go | 2 +- .../desc_id_sequence_for_system_tenant.go | 2 +- .../ensure_sql_schema_telemetry_schedule.go | 6 +- .../fix_userfile_descriptor_corruption.go | 66 +-- pkg/upgrade/upgrades/helpers_test.go | 15 +- pkg/upgrade/upgrades/permanent_upgrades.go | 7 +- ...precondition_before_starting_an_upgrade.go | 59 ++- .../upgrades/role_id_sequence_migration.go | 2 +- .../upgrades/role_members_ids_migration.go | 2 +- pkg/upgrade/upgrades/schema_changes.go | 7 +- .../upgrades/schema_changes_external_test.go | 13 +- .../upgrades/system_external_connections.go | 2 +- pkg/upgrade/upgrades/system_job_info.go | 2 +- .../system_users_role_id_migration.go | 2 +- ..._column_ids_in_sequence_back_references.go | 11 +- ...upgrade_sequence_to_be_referenced_by_ID.go | 60 ++- .../upgrades/wait_for_del_range_in_gc_job.go | 4 +- .../upgrades/wait_for_schema_changes.go | 2 +- pkg/util/stop/stopper.go | 2 +- pkg/util/tracing/zipper/BUILD.bazel | 2 +- pkg/util/tracing/zipper/zipper.go | 8 +- 484 files changed, 6523 insertions(+), 6657 deletions(-) rename pkg/sql/{sqlutil => isql}/BUILD.bazel (64%) create mode 100644 pkg/sql/isql/doc.go rename pkg/sql/{sqlutil/internal_executor.go => isql/isql_db.go} (76%) create mode 100644 pkg/sql/isql/options.go diff --git a/pkg/BUILD.bazel b/pkg/BUILD.bazel index 5ea9e893247a..7ceecf5e690d 100644 --- a/pkg/BUILD.bazel +++ b/pkg/BUILD.bazel @@ -1581,6 +1581,7 @@ GO_TARGETS = [ "//pkg/sql/importer:importer_test", "//pkg/sql/inverted:inverted", "//pkg/sql/inverted:inverted_test", + "//pkg/sql/isql:isql", "//pkg/sql/lex:lex", "//pkg/sql/lex:lex_test", "//pkg/sql/lexbase/allkeywords:allkeywords", @@ -1856,7 +1857,6 @@ GO_TARGETS = [ "//pkg/sql/sqltelemetry:sqltelemetry", "//pkg/sql/sqltestutils:sqltestutils", "//pkg/sql/sqltestutils:sqltestutils_test", - "//pkg/sql/sqlutil:sqlutil", "//pkg/sql/stats/bounds:bounds", "//pkg/sql/stats:stats", "//pkg/sql/stats:stats_test", @@ -2798,6 +2798,7 @@ GET_X_DATA_TARGETS = [ "//pkg/sql/idxusage:get_x_data", "//pkg/sql/importer:get_x_data", "//pkg/sql/inverted:get_x_data", + "//pkg/sql/isql:get_x_data", "//pkg/sql/lex:get_x_data", "//pkg/sql/lexbase:get_x_data", "//pkg/sql/lexbase/allkeywords:get_x_data", @@ -2978,7 +2979,6 @@ GET_X_DATA_TARGETS = [ "//pkg/sql/sqlstats/ssmemstorage:get_x_data", "//pkg/sql/sqltelemetry:get_x_data", "//pkg/sql/sqltestutils:get_x_data", - "//pkg/sql/sqlutil:get_x_data", "//pkg/sql/stats:get_x_data", "//pkg/sql/stats/bounds:get_x_data", "//pkg/sql/stmtdiagnostics:get_x_data", diff --git a/pkg/ccl/backupccl/BUILD.bazel b/pkg/ccl/backupccl/BUILD.bazel index 8c018e552a7a..f4c994b92efd 100644 --- a/pkg/ccl/backupccl/BUILD.bazel +++ b/pkg/ccl/backupccl/BUILD.bazel @@ -97,6 +97,7 @@ go_library( "//pkg/sql/execinfra", "//pkg/sql/execinfrapb", "//pkg/sql/exprutil", + "//pkg/sql/isql", "//pkg/sql/parser", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", @@ -115,7 +116,6 @@ go_library( "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", "//pkg/sql/sqlerrors", - "//pkg/sql/sqlutil", "//pkg/sql/stats", "//pkg/sql/syntheticprivilege", "//pkg/sql/types", @@ -251,6 +251,7 @@ go_test( "//pkg/sql/execinfra", "//pkg/sql/execinfrapb", "//pkg/sql/importer", + "//pkg/sql/isql", "//pkg/sql/parser", "//pkg/sql/pgwire/pgerror", "//pkg/sql/randgen", @@ -259,7 +260,6 @@ go_test( "//pkg/sql/sem/eval", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/sql/stats", "//pkg/storage", "//pkg/testutils", diff --git a/pkg/ccl/backupccl/alter_backup_planning.go b/pkg/ccl/backupccl/alter_backup_planning.go index 7b9f92df40fa..44b44e0a0e04 100644 --- a/pkg/ccl/backupccl/alter_backup_planning.go +++ b/pkg/ccl/backupccl/alter_backup_planning.go @@ -149,8 +149,9 @@ func doAlterBackupPlan( } ioConf := baseStore.ExternalIOConf() - kmsEnv := backupencryption.MakeBackupKMSEnv(baseStore.Settings(), &ioConf, p.ExecCfg().DB, - p.User(), p.ExecCfg().InternalExecutor) + kmsEnv := backupencryption.MakeBackupKMSEnv( + baseStore.Settings(), &ioConf, p.ExecCfg().InternalDB, p.User(), + ) // Check that at least one of the old keys has been used to encrypt the backup in the past. // Use the first one that works to decrypt the ENCRYPTION-INFO file(s). diff --git a/pkg/ccl/backupccl/alter_backup_schedule.go b/pkg/ccl/backupccl/alter_backup_schedule.go index 05aedb3d2bdc..4207e0e9afca 100644 --- a/pkg/ccl/backupccl/alter_backup_schedule.go +++ b/pkg/ccl/backupccl/alter_backup_schedule.go @@ -50,8 +50,9 @@ func loadSchedules( } execCfg := p.ExecCfg() - env := sql.JobSchedulerEnv(execCfg) - schedule, err := jobs.LoadScheduledJob(ctx, env, int64(scheduleID), execCfg.InternalExecutor, p.Txn()) + env := sql.JobSchedulerEnv(execCfg.JobsKnobs()) + schedules := jobs.ScheduledJobTxn(p.InternalSQLTxn()) + schedule, err := schedules.Load(ctx, env, int64(scheduleID)) if err != nil { return s, err } @@ -74,7 +75,7 @@ func loadSchedules( var dependentStmt *tree.Backup if args.DependentScheduleID != 0 { - dependentSchedule, err = jobs.LoadScheduledJob(ctx, env, args.DependentScheduleID, execCfg.InternalExecutor, p.Txn()) + dependentSchedule, err = schedules.Load(ctx, env, args.DependentScheduleID) if err != nil { return scheduleDetails{}, err } @@ -188,10 +189,11 @@ func doAlterBackupSchedules( if err != nil { return err } + scheduledJobs := jobs.ScheduledJobTxn(p.InternalSQLTxn()) s.fullJob.SetExecutionDetails( tree.ScheduledBackupExecutor.InternalName(), jobspb.ExecutionArguments{Args: fullAny}) - if err := s.fullJob.Update(ctx, p.ExecCfg().InternalExecutor, p.Txn()); err != nil { + if err := scheduledJobs.Update(ctx, s.fullJob); err != nil { return err } @@ -204,7 +206,8 @@ func doAlterBackupSchedules( s.incJob.SetExecutionDetails( tree.ScheduledBackupExecutor.InternalName(), jobspb.ExecutionArguments{Args: incAny}) - if err := s.incJob.Update(ctx, p.ExecCfg().InternalExecutor, p.Txn()); err != nil { + + if err := scheduledJobs.Update(ctx, s.incJob); err != nil { return err } @@ -382,8 +385,8 @@ func processFullBackupRecurrence( return s, nil } - env := sql.JobSchedulerEnv(p.ExecCfg()) - ex := p.ExecCfg().InternalExecutor + env := sql.JobSchedulerEnv(p.ExecCfg().JobsKnobs()) + scheduledJobs := jobs.ScheduledJobTxn(p.InternalSQLTxn()) if fullBackupAlways { if s.incJob == nil { // Nothing to do. @@ -396,7 +399,7 @@ func processFullBackupRecurrence( } s.fullArgs.DependentScheduleID = 0 s.fullArgs.UnpauseOnSuccess = 0 - if err := s.incJob.Delete(ctx, ex, p.Txn()); err != nil { + if err := scheduledJobs.Delete(ctx, s.incJob); err != nil { return scheduleDetails{}, err } s.incJob = nil @@ -453,7 +456,7 @@ func processFullBackupRecurrence( tree.ScheduledBackupExecutor.InternalName(), jobspb.ExecutionArguments{Args: incAny}) - if err := s.incJob.Create(ctx, ex, p.Txn()); err != nil { + if err := scheduledJobs.Create(ctx, s.incJob); err != nil { return scheduleDetails{}, err } s.fullArgs.UnpauseOnSuccess = s.incJob.ScheduleID() @@ -480,7 +483,7 @@ func validateFullIncrementalFrequencies(p sql.PlanHookState, s scheduleDetails) if s.incJob == nil { return nil } - env := sql.JobSchedulerEnv(p.ExecCfg()) + env := sql.JobSchedulerEnv(p.ExecCfg().JobsKnobs()) now := env.Now() fullFreq, err := frequencyFromCron(now, s.fullJob.ScheduleExpr()) @@ -536,7 +539,7 @@ func processInto(p sql.PlanHookState, spec *alterBackupScheduleSpec, s scheduleD // Kick off a full backup immediately so we can unpause incrementals. // This mirrors the behavior of CREATE SCHEDULE FOR BACKUP. - env := sql.JobSchedulerEnv(p.ExecCfg()) + env := sql.JobSchedulerEnv(p.ExecCfg().JobsKnobs()) s.fullJob.SetNextRun(env.Now()) return nil diff --git a/pkg/ccl/backupccl/backup_job.go b/pkg/ccl/backupccl/backup_job.go index a99b5bfbbc9b..5acebaaeb35a 100644 --- a/pkg/ccl/backupccl/backup_job.go +++ b/pkg/ccl/backupccl/backup_job.go @@ -32,7 +32,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/joberror" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" @@ -44,6 +43,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/stats" @@ -140,9 +140,12 @@ func backup( var lastCheckpoint time.Time var completedSpans, completedIntroducedSpans []roachpb.Span - kmsEnv := backupencryption.MakeBackupKMSEnv(execCtx.ExecCfg().Settings, - &execCtx.ExecCfg().ExternalIODirConfig, execCtx.ExecCfg().DB, execCtx.User(), - execCtx.ExecCfg().InternalExecutor) + kmsEnv := backupencryption.MakeBackupKMSEnv( + execCtx.ExecCfg().Settings, + &execCtx.ExecCfg().ExternalIODirConfig, + execCtx.ExecCfg().InternalDB, + execCtx.User(), + ) // TODO(benesch): verify these files, rather than accepting them as truth // blindly. // No concurrency yet, so these assignments are safe. @@ -369,13 +372,13 @@ func backup( } func releaseProtectedTimestamp( - ctx context.Context, txn *kv.Txn, pts protectedts.Storage, ptsID *uuid.UUID, + ctx context.Context, pts protectedts.Storage, ptsID *uuid.UUID, ) error { // If the job doesn't have a protected timestamp then there's nothing to do. if ptsID == nil { return nil } - err := pts.Release(ctx, txn, *ptsID) + err := pts.Release(ctx, *ptsID) if errors.Is(err, protectedts.ErrNotExists) { // No reason to return an error which might cause problems if it doesn't // seem to exist. @@ -447,8 +450,12 @@ func (b *backupResumer) Resume(ctx context.Context, execCtx interface{}) error { // The span is finished by the registry executing the job. details := b.job.Details().(jobspb.BackupDetails) p := execCtx.(sql.JobExecContext) - kmsEnv := backupencryption.MakeBackupKMSEnv(p.ExecCfg().Settings, - &p.ExecCfg().ExternalIODirConfig, p.ExecCfg().DB, p.User(), p.ExecCfg().InternalExecutor) + kmsEnv := backupencryption.MakeBackupKMSEnv( + p.ExecCfg().Settings, + &p.ExecCfg().ExternalIODirConfig, + p.ExecCfg().InternalDB, + p.User(), + ) // Resolve the backup destination. We can skip this step if we // have already resolved and persisted the destination either @@ -511,9 +518,10 @@ func (b *backupResumer) Resume(ctx context.Context, execCtx interface{}) error { // details and manifest in a prior resumption. // // TODO(adityamaru: Break this code block into helper methods. + insqlDB := p.ExecCfg().InternalDB if details.URI == "" { initialDetails := details - if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { backupDetails, m, err := getBackupDetailAndManifest( ctx, p.ExecCfg(), txn, details, p.User(), backupDest, ) @@ -538,9 +546,12 @@ func (b *backupResumer) Resume(ctx context.Context, execCtx interface{}) error { details.ProtectedTimestampRecord = &protectedtsID if details.ProtectedTimestampRecord != nil { - if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := insqlDB.Txn(ctx, func( + ctx context.Context, txn isql.Txn, + ) error { + ptp := p.ExecCfg().ProtectedTimestampProvider.WithTxn(txn) return protectTimestampForBackup( - ctx, p.ExecCfg(), txn, b.job.ID(), backupManifest, details, + ctx, b.job.ID(), ptp, backupManifest, details, ) }); err != nil { return err @@ -562,8 +573,8 @@ func (b *backupResumer) Resume(ctx context.Context, execCtx interface{}) error { return err } - if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return planSchedulePTSChaining(ctx, p.ExecCfg(), txn, &details, b.job.CreatedBy()) + if err := insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return planSchedulePTSChaining(ctx, p.ExecCfg().JobsKnobs(), txn, &details, b.job.CreatedBy()) }); err != nil { return err } @@ -586,7 +597,7 @@ func (b *backupResumer) Resume(ctx context.Context, execCtx interface{}) error { // Update the job payload (non-volatile job definition) once, with the now // resolved destination, updated description, etc. If we resume again we'll // skip this whole block so this isn't an excessive update of payload. - if err := b.job.Update(ctx, nil, func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + if err := b.job.NoTxn().Update(ctx, func(txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { if err := md.CheckRunningOrReverting(); err != nil { return err } @@ -732,10 +743,12 @@ func (b *backupResumer) Resume(ctx context.Context, execCtx interface{}) error { } if details.ProtectedTimestampRecord != nil && !b.testingKnobs.ignoreProtectedTimestamps { - if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := p.ExecCfg().InternalDB.Txn(ctx, func( + ctx context.Context, txn isql.Txn, + ) error { details := b.job.Details().(jobspb.BackupDetails) - return releaseProtectedTimestamp(ctx, txn, p.ExecCfg().ProtectedTimestampProvider, - details.ProtectedTimestampRecord) + pts := p.ExecCfg().ProtectedTimestampProvider.WithTxn(txn) + return releaseProtectedTimestamp(ctx, pts, details.ProtectedTimestampRecord) }); err != nil { log.Errorf(ctx, "failed to release protected timestamp: %v", err) } @@ -807,7 +820,9 @@ func (b *backupResumer) Resume(ctx context.Context, execCtx interface{}) error { logJobCompletion(ctx, b.getTelemetryEventType(), b.job.ID(), true, nil) } - return b.maybeNotifyScheduledJobCompletion(ctx, jobs.StatusSucceeded, p.ExecCfg()) + return b.maybeNotifyScheduledJobCompletion( + ctx, jobs.StatusSucceeded, p.ExecCfg().JobsKnobs(), p.ExecCfg().InternalDB, + ) } // ReportResults implements JobResultsReporter interface. @@ -830,15 +845,19 @@ func (b *backupResumer) ReportResults(ctx context.Context, resultsCh chan<- tree func getBackupDetailAndManifest( ctx context.Context, execCfg *sql.ExecutorConfig, - txn *kv.Txn, + txn isql.Txn, initialDetails jobspb.BackupDetails, user username.SQLUsername, backupDestination backupdest.ResolvedDestination, ) (jobspb.BackupDetails, backuppb.BackupManifest, error) { makeCloudStorage := execCfg.DistSQLSrv.ExternalStorageFromURI - kmsEnv := backupencryption.MakeBackupKMSEnv(execCfg.Settings, &execCfg.ExternalIODirConfig, - execCfg.DB, user, execCfg.InternalExecutor) + kmsEnv := backupencryption.MakeBackupKMSEnv( + execCfg.Settings, + &execCfg.ExternalIODirConfig, + execCfg.InternalDB, + user, + ) mem := execCfg.RootMemoryMonitor.MakeBoundAccount() defer mem.Close(ctx) @@ -1004,22 +1023,20 @@ func (b *backupResumer) readManifestOnResume( } func (b *backupResumer) maybeNotifyScheduledJobCompletion( - ctx context.Context, jobStatus jobs.Status, exec *sql.ExecutorConfig, + ctx context.Context, jobStatus jobs.Status, knobs *jobs.TestingKnobs, db isql.DB, ) error { env := scheduledjobs.ProdJobSchedulerEnv - if knobs, ok := exec.DistSQLSrv.TestingKnobs.JobsTestingKnobs.(*jobs.TestingKnobs); ok { - if knobs.JobSchedulerEnv != nil { - env = knobs.JobSchedulerEnv - } + if knobs != nil && knobs.JobSchedulerEnv != nil { + env = knobs.JobSchedulerEnv } - err := exec.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + err := db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { // We cannot rely on b.job containing created_by_id because on job // resumption the registry does not populate the resumer's CreatedByInfo. - datums, err := exec.InternalExecutor.QueryRowEx( + datums, err := txn.QueryRowEx( ctx, "lookup-schedule-info", - txn, + txn.KV(), sessiondata.NodeUserSessionDataOverride, fmt.Sprintf( "SELECT created_by_id FROM %s WHERE id=$1 AND created_by_type=$2", @@ -1035,7 +1052,8 @@ func (b *backupResumer) maybeNotifyScheduledJobCompletion( scheduleID := int64(tree.MustBeDInt(datums[0])) if err := jobs.NotifyJobTermination( - ctx, env, b.job.ID(), jobStatus, b.job.Details(), scheduleID, exec.InternalExecutor, txn); err != nil { + ctx, txn, env, b.job.ID(), jobStatus, b.job.Details(), scheduleID, + ); err != nil { return errors.Wrapf(err, "failed to notify schedule %d of completion of job %d", scheduleID, b.job.ID()) } @@ -1056,10 +1074,10 @@ func (b *backupResumer) OnFailOrCancel( p := execCtx.(sql.JobExecContext) cfg := p.ExecCfg() b.deleteCheckpoint(ctx, cfg, p.User()) - if err := cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := cfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { details := b.job.Details().(jobspb.BackupDetails) - return releaseProtectedTimestamp(ctx, txn, cfg.ProtectedTimestampProvider, - details.ProtectedTimestampRecord) + pts := cfg.ProtectedTimestampProvider.WithTxn(txn) + return releaseProtectedTimestamp(ctx, pts, details.ProtectedTimestampRecord) }); err != nil { return err } @@ -1067,8 +1085,9 @@ func (b *backupResumer) OnFailOrCancel( // This should never return an error unless resolving the schedule that the // job is being run under fails. This could happen if the schedule is dropped // while the job is executing. - if err := b.maybeNotifyScheduledJobCompletion(ctx, jobs.StatusFailed, - execCtx.(sql.JobExecContext).ExecCfg()); err != nil { + if err := b.maybeNotifyScheduledJobCompletion( + ctx, jobs.StatusFailed, cfg.JobsKnobs(), cfg.InternalDB, + ); err != nil { log.Errorf(ctx, "failed to notify job %d on completion of OnFailOrCancel: %+v", b.job.ID(), err) } diff --git a/pkg/ccl/backupccl/backup_metadata_test.go b/pkg/ccl/backupccl/backup_metadata_test.go index 120de51b10da..c9338be5365c 100644 --- a/pkg/ccl/backupccl/backup_metadata_test.go +++ b/pkg/ccl/backupccl/backup_metadata_test.go @@ -25,7 +25,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/stats" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -87,9 +87,7 @@ func checkMetadata( tc.Servers[0].ClusterSettings(), blobs.TestEmptyBlobClientFactory, username.RootUserName(), - tc.Servers[0].InternalExecutor().(*sql.InternalExecutor), - tc.Servers[0].InternalExecutorFactory().(sqlutil.InternalExecutorFactory), - tc.Servers[0].DB(), + tc.Servers[0].InternalDB().(isql.DB), nil, /* limiters */ cloud.NilMetrics, ) @@ -104,7 +102,7 @@ func checkMetadata( srv := tc.Servers[0] execCfg := srv.ExecutorConfig().(sql.ExecutorConfig) kmsEnv := backupencryption.MakeBackupKMSEnv(srv.ClusterSettings(), &base.ExternalIODirConfig{}, - srv.DB(), username.RootUserName(), execCfg.InternalExecutor) + execCfg.InternalDB, username.RootUserName()) bm, err := backupinfo.NewBackupMetadata(ctx, store, backupinfo.MetadataSSTName, nil /* encryption */, &kmsEnv) if err != nil { diff --git a/pkg/ccl/backupccl/backup_planning.go b/pkg/ccl/backupccl/backup_planning.go index 6daff2f287ff..937503942e1c 100644 --- a/pkg/ccl/backupccl/backup_planning.go +++ b/pkg/ccl/backupccl/backup_planning.go @@ -30,6 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobsprotectedts" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" @@ -41,6 +42,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/exprutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice" @@ -795,7 +797,7 @@ func backupPlanHook( // When running inside an explicit transaction, we simply create the job // record. We do not wait for the job to finish. _, err := p.ExecCfg().JobRegistry.CreateAdoptableJobWithTxn( - ctx, jr, jobID, plannerTxn) + ctx, jr, jobID, p.InternalSQLTxn()) if err != nil { return err } @@ -812,7 +814,9 @@ func backupPlanHook( log.Errorf(ctx, "failed to cleanup job: %v", cleanupErr) } }() - if err := p.ExecCfg().JobRegistry.CreateStartableJobWithTxn(ctx, &sj, jobID, plannerTxn, jr); err != nil { + if err := p.ExecCfg().JobRegistry.CreateStartableJobWithTxn( + ctx, &sj, jobID, p.InternalSQLTxn(), jr, + ); err != nil { return err } // We commit the transaction here so that the job can be started. This @@ -823,6 +827,16 @@ func backupPlanHook( }(); err != nil { return err } + // Release all descriptor leases here. Note that we committed the + // underlying transaction in the above closure -- so we're not using any + // leases anymore, but we might be holding some because some sql queries + // might have been executed by this transaction (indeed some certainly + // were when we created the job we're going to run). + // + // This call is not strictly necessary, but it's parallel to the other + // locations where we commit a transaction and wait for a job; it seems + // best to release leases we don't need. + p.InternalSQLTxn().Descriptors().ReleaseAll(ctx) if err := sj.Start(ctx); err != nil { return err } @@ -950,12 +964,11 @@ func collectTelemetry( func getScheduledBackupExecutionArgsFromSchedule( ctx context.Context, env scheduledjobs.JobSchedulerEnv, - txn *kv.Txn, - ie *sql.InternalExecutor, + storage jobs.ScheduledJobStorage, scheduleID int64, ) (*jobs.ScheduledJob, *backuppb.ScheduledBackupExecutionArgs, error) { // Load the schedule that has spawned this job. - sj, err := jobs.LoadScheduledJob(ctx, env, scheduleID, ie, txn) + sj, err := storage.Load(ctx, env, scheduleID) if err != nil { return nil, nil, errors.Wrapf(err, "failed to load scheduled job %d", scheduleID) } @@ -975,16 +988,14 @@ func getScheduledBackupExecutionArgsFromSchedule( // completion of the backup job. func planSchedulePTSChaining( ctx context.Context, - execCfg *sql.ExecutorConfig, - txn *kv.Txn, + knobs *jobs.TestingKnobs, + txn isql.Txn, backupDetails *jobspb.BackupDetails, createdBy *jobs.CreatedByInfo, ) error { env := scheduledjobs.ProdJobSchedulerEnv - if knobs, ok := execCfg.DistSQLSrv.TestingKnobs.JobsTestingKnobs.(*jobs.TestingKnobs); ok { - if knobs.JobSchedulerEnv != nil { - env = knobs.JobSchedulerEnv - } + if knobs != nil && knobs.JobSchedulerEnv != nil { + env = knobs.JobSchedulerEnv } // If this is not a scheduled backup, we do not chain pts records. if createdBy == nil || createdBy.Name != jobs.CreatedByScheduledJobs { @@ -992,7 +1003,8 @@ func planSchedulePTSChaining( } _, args, err := getScheduledBackupExecutionArgsFromSchedule( - ctx, env, txn, execCfg.InternalExecutor, createdBy.ID) + ctx, env, jobs.ScheduledJobTxn(txn), createdBy.ID, + ) if err != nil { return err } @@ -1014,7 +1026,8 @@ func planSchedulePTSChaining( } _, incArgs, err := getScheduledBackupExecutionArgsFromSchedule( - ctx, env, txn, execCfg.InternalExecutor, args.DependentScheduleID) + ctx, env, jobs.ScheduledJobTxn(txn), args.DependentScheduleID, + ) if err != nil { // We should always be able to resolve the dependent schedule ID. If the // incremental schedule was dropped then it would have unlinked itself @@ -1193,9 +1206,8 @@ func getProtectedTimestampTargetForBackup(backupManifest *backuppb.BackupManifes func protectTimestampForBackup( ctx context.Context, - execCfg *sql.ExecutorConfig, - txn *kv.Txn, jobID jobspb.JobID, + pts protectedts.Storage, backupManifest *backuppb.BackupManifest, backupDetails jobspb.BackupDetails, ) error { @@ -1213,9 +1225,14 @@ func protectTimestampForBackup( // `exclude_data_from_backup`. This ensures that the backup job does not // holdup GC on that table span for the duration of execution. target.IgnoreIfExcludedFromBackup = true - rec := jobsprotectedts.MakeRecord(*backupDetails.ProtectedTimestampRecord, int64(jobID), - tsToProtect, backupManifest.Spans, jobsprotectedts.Jobs, target) - return execCfg.ProtectedTimestampProvider.Protect(ctx, txn, rec) + return pts.Protect(ctx, jobsprotectedts.MakeRecord( + *backupDetails.ProtectedTimestampRecord, + int64(jobID), + tsToProtect, + backupManifest.Spans, + jobsprotectedts.Jobs, + target, + )) } // checkForNewDatabases returns an error if any new complete databases were @@ -1297,24 +1314,22 @@ func checkForNewTables( } func getTenantInfo( - ctx context.Context, execCfg *sql.ExecutorConfig, txn *kv.Txn, jobDetails jobspb.BackupDetails, + ctx context.Context, codec keys.SQLCodec, txn isql.Txn, jobDetails jobspb.BackupDetails, ) ([]roachpb.Span, []descpb.TenantInfoWithUsage, error) { var spans []roachpb.Span var tenants []descpb.TenantInfoWithUsage var err error - if jobDetails.FullCluster && execCfg.Codec.ForSystemTenant() { + if jobDetails.FullCluster && codec.ForSystemTenant() { // Include all tenants. tenants, err = retrieveAllTenantsMetadata( - ctx, execCfg.InternalExecutor, txn, + ctx, txn, ) if err != nil { return nil, nil, err } } else if len(jobDetails.SpecificTenantIds) > 0 { for _, id := range jobDetails.SpecificTenantIds { - tenantInfo, err := retrieveSingleTenantMetadata( - ctx, execCfg.InternalExecutor, txn, id, - ) + tenantInfo, err := retrieveSingleTenantMetadata(ctx, txn, id) if err != nil { return nil, nil, err } @@ -1340,7 +1355,7 @@ func getTenantInfo( func createBackupManifest( ctx context.Context, execCfg *sql.ExecutorConfig, - txn *kv.Txn, + txn isql.Txn, jobDetails jobspb.BackupDetails, prevBackups []backuppb.BackupManifest, ) (backuppb.BackupManifest, error) { @@ -1399,7 +1414,7 @@ func createBackupManifest( var spans []roachpb.Span var tenants []descpb.TenantInfoWithUsage tenantSpans, tenantInfos, err := getTenantInfo( - ctx, execCfg, txn, jobDetails, + ctx, execCfg.Codec, txn, jobDetails, ) if err != nil { return backuppb.BackupManifest{}, err diff --git a/pkg/ccl/backupccl/backup_planning_tenant.go b/pkg/ccl/backupccl/backup_planning_tenant.go index 55673b23faef..68a1110314cd 100644 --- a/pkg/ccl/backupccl/backup_planning_tenant.go +++ b/pkg/ccl/backupccl/backup_planning_tenant.go @@ -11,10 +11,9 @@ package backupccl import ( "context" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" @@ -77,10 +76,10 @@ func tenantMetadataFromRow(row tree.Datums) (descpb.TenantInfoWithUsage, error) } func retrieveSingleTenantMetadata( - ctx context.Context, ie *sql.InternalExecutor, txn *kv.Txn, tenantID roachpb.TenantID, + ctx context.Context, txn isql.Txn, tenantID roachpb.TenantID, ) (descpb.TenantInfoWithUsage, error) { - row, err := ie.QueryRow( - ctx, "backupccl.retrieveSingleTenantMetadata", txn, + row, err := txn.QueryRow( + ctx, "backupccl.retrieveSingleTenantMetadata", txn.KV(), tenantMetadataQuery+` WHERE id = $1`, tenantID.ToUint64(), ) if err != nil { @@ -97,10 +96,10 @@ func retrieveSingleTenantMetadata( } func retrieveAllTenantsMetadata( - ctx context.Context, ie *sql.InternalExecutor, txn *kv.Txn, + ctx context.Context, txn isql.Txn, ) ([]descpb.TenantInfoWithUsage, error) { - rows, err := ie.QueryBuffered( - ctx, "backupccl.retrieveAllTenantsMetadata", txn, + rows, err := txn.QueryBuffered( + ctx, "backupccl.retrieveAllTenantsMetadata", txn.KV(), // TODO(?): Should we add a `WHERE active`? We require the tenant to be active // when it is specified. // See: https://github.com/cockroachdb/cockroach/issues/89997 diff --git a/pkg/ccl/backupccl/backup_processor.go b/pkg/ccl/backupccl/backup_processor.go index 2d56b56185a8..60fbf49bd00b 100644 --- a/pkg/ccl/backupccl/backup_processor.go +++ b/pkg/ccl/backupccl/backup_processor.go @@ -434,7 +434,7 @@ func runBackupProcessor( fmt.Sprintf("ExportRequest for span %s", span.span), timeoutPerAttempt.Get(&clusterSettings.SV), func(ctx context.Context) error { rawResp, pErr = kv.SendWrappedWithAdmission( - ctx, flowCtx.Cfg.DB.NonTransactionalSender(), header, admissionHeader, req) + ctx, flowCtx.Cfg.DB.KV().NonTransactionalSender(), header, admissionHeader, req) if pErr != nil { return pErr.GoError() } diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index 0d9aee88dbfd..4d0a3ab567f8 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -80,11 +80,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/randgen" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/stats" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/jobutils" @@ -643,9 +643,7 @@ func TestBackupRestoreAppend(t *testing.T) { tc.Servers[0].ClusterSettings(), blobs.TestEmptyBlobClientFactory, username.RootUserName(), - tc.Servers[0].InternalExecutor().(*sql.InternalExecutor), - tc.Servers[0].InternalExecutorFactory().(sqlutil.InternalExecutorFactory), - tc.Servers[0].DB(), + tc.Servers[0].InternalDB().(isql.DB), nil, /* limiters */ cloud.NilMetrics, ) @@ -4361,9 +4359,8 @@ func TestRegionalKMSEncryptedBackup(t *testing.T) { type testKMSEnv struct { settings *cluster.Settings externalIOConfig *base.ExternalIODirConfig - db *kv.DB + db isql.DB user username.SQLUsername - ie sqlutil.InternalExecutor } var _ cloud.KMSEnv = &testKMSEnv{} @@ -4376,7 +4373,7 @@ func (e *testKMSEnv) KMSConfig() *base.ExternalIODirConfig { return e.externalIOConfig } -func (e *testKMSEnv) DBHandle() *kv.DB { +func (e *testKMSEnv) DBHandle() isql.DB { return e.db } @@ -4384,10 +4381,6 @@ func (e *testKMSEnv) User() username.SQLUsername { return e.user } -func (e *testKMSEnv) InternalExecutor() sqlutil.InternalExecutor { - return e.ie -} - type testKMS struct { uri string } @@ -4502,7 +4495,6 @@ func TestValidateKMSURIsAgainstFullBackup(t *testing.T) { externalIOConfig: &base.ExternalIODirConfig{}, db: nil, user: username.RootUserName(), - ie: nil, } kmsInfo, err := backupencryption.ValidateKMSURIsAgainstFullBackup( ctx, tc.incrementalBackupURIs, masterKeyIDToDataKey, kmsEnv) @@ -5841,9 +5833,8 @@ func TestBackupRestoreCorruptedStatsIgnored(t *testing.T) { kmsEnv := &testKMSEnv{ settings: execCfg.Settings, externalIOConfig: &execCfg.ExternalIODirConfig, - db: execCfg.DB, + db: execCfg.InternalDB, user: username.RootUserName(), - ie: execCfg.InternalExecutor, } require.NoError(t, backupinfo.WriteTableStatistics(ctx, store, nil, /* encryption */ kmsEnv, &statsTable)) @@ -8205,9 +8196,7 @@ func TestReadBackupManifestMemoryMonitoring(t *testing.T) { st, blobs.TestBlobServiceClient(dir), username.RootUserName(), - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ nil, /* limiters */ cloud.NilMetrics, ) @@ -8230,7 +8219,6 @@ func TestReadBackupManifestMemoryMonitoring(t *testing.T) { externalIOConfig: &base.ExternalIODirConfig{}, db: nil, user: username.RootUserName(), - ie: nil, } require.NoError(t, backupinfo.WriteBackupManifest(ctx, storage, "testmanifest", encOpts, &kmsEnv, desc)) @@ -10499,31 +10487,31 @@ $$; require.Equal(t, 1, len(rows[0])) udfID, err := strconv.Atoi(rows[0][0]) require.NoError(t, err) - err = sql.TestingDescsTxn(ctx, srcServer, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - dbDesc, err := col.ByNameWithLeased(txn).Get().Database(ctx, "db1") + err = sql.TestingDescsTxn(ctx, srcServer, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + dbDesc, err := col.ByNameWithLeased(txn.KV()).Get().Database(ctx, "db1") require.NoError(t, err) require.Equal(t, 104, int(dbDesc.GetID())) - scDesc, err := col.ByNameWithLeased(txn).Get().Schema(ctx, dbDesc, "sc1") + scDesc, err := col.ByNameWithLeased(txn.KV()).Get().Schema(ctx, dbDesc, "sc1") require.NoError(t, err) require.Equal(t, 106, int(scDesc.GetID())) tbName := tree.MakeTableNameWithSchema("db1", "sc1", "tbl1") - _, tbDesc, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).Get(), &tbName) + _, tbDesc, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).Get(), &tbName) require.NoError(t, err) require.Equal(t, 107, int(tbDesc.GetID())) typName := tree.MakeQualifiedTypeName("db1", "sc1", "enum1") - _, typDesc, err := descs.PrefixAndType(ctx, col.ByNameWithLeased(txn).Get(), &typName) + _, typDesc, err := descs.PrefixAndType(ctx, col.ByNameWithLeased(txn.KV()).Get(), &typName) require.NoError(t, err) require.Equal(t, 108, int(typDesc.GetID())) tbName = tree.MakeTableNameWithSchema("db1", "sc1", "sq1") - _, tbDesc, err = descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).Get(), &tbName) + _, tbDesc, err = descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).Get(), &tbName) require.NoError(t, err) require.Equal(t, 110, int(tbDesc.GetID())) - fnDesc, err := col.ByIDWithLeased(txn).WithoutNonPublic().Get().Function(ctx, descpb.ID(udfID)) + fnDesc, err := col.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Function(ctx, descpb.ID(udfID)) require.NoError(t, err) require.Equal(t, 111, int(fnDesc.GetID())) require.Equal(t, 104, int(fnDesc.GetParentID())) @@ -10551,31 +10539,31 @@ $$; require.Equal(t, 1, len(rows[0])) udfID, err = strconv.Atoi(rows[0][0]) require.NoError(t, err) - err = sql.TestingDescsTxn(ctx, srcServer, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - dbDesc, err := col.ByNameWithLeased(txn).Get().Database(ctx, "db1_new") + err = sql.TestingDescsTxn(ctx, srcServer, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + dbDesc, err := col.ByNameWithLeased(txn.KV()).Get().Database(ctx, "db1_new") require.NoError(t, err) require.Equal(t, 112, int(dbDesc.GetID())) - scDesc, err := col.ByNameWithLeased(txn).Get().Schema(ctx, dbDesc, "sc1") + scDesc, err := col.ByNameWithLeased(txn.KV()).Get().Schema(ctx, dbDesc, "sc1") require.NoError(t, err) require.Equal(t, 114, int(scDesc.GetID())) tbName := tree.MakeTableNameWithSchema("db1_new", "sc1", "tbl1") - _, tbDesc, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).Get(), &tbName) + _, tbDesc, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).Get(), &tbName) require.NoError(t, err) require.Equal(t, 115, int(tbDesc.GetID())) typName := tree.MakeQualifiedTypeName("db1_new", "sc1", "enum1") - _, typDesc, err := descs.PrefixAndType(ctx, col.ByNameWithLeased(txn).Get(), &typName) + _, typDesc, err := descs.PrefixAndType(ctx, col.ByNameWithLeased(txn.KV()).Get(), &typName) require.NoError(t, err) require.Equal(t, 116, int(typDesc.GetID())) tbName = tree.MakeTableNameWithSchema("db1_new", "sc1", "sq1") - _, tbDesc, err = descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).Get(), &tbName) + _, tbDesc, err = descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).Get(), &tbName) require.NoError(t, err) require.Equal(t, 118, int(tbDesc.GetID())) - fnDesc, err := col.ByIDWithLeased(txn).WithoutNonPublic().Get().Function(ctx, descpb.ID(udfID)) + fnDesc, err := col.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Function(ctx, descpb.ID(udfID)) require.NoError(t, err) require.Equal(t, 119, int(fnDesc.GetID())) require.Equal(t, 112, int(fnDesc.GetParentID())) @@ -10638,31 +10626,31 @@ $$; require.Equal(t, 1, len(rows[0])) udfID, err := strconv.Atoi(rows[0][0]) require.NoError(t, err) - err = sql.TestingDescsTxn(ctx, srcServer, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - dbDesc, err := col.ByNameWithLeased(txn).Get().Database(ctx, "db1") + err = sql.TestingDescsTxn(ctx, srcServer, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + dbDesc, err := col.ByNameWithLeased(txn.KV()).Get().Database(ctx, "db1") require.NoError(t, err) require.Equal(t, 104, int(dbDesc.GetID())) - scDesc, err := col.ByNameWithLeased(txn).Get().Schema(ctx, dbDesc, "sc1") + scDesc, err := col.ByNameWithLeased(txn.KV()).Get().Schema(ctx, dbDesc, "sc1") require.NoError(t, err) require.Equal(t, 106, int(scDesc.GetID())) tbName := tree.MakeTableNameWithSchema("db1", "sc1", "tbl1") - _, tbDesc, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).Get(), &tbName) + _, tbDesc, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).Get(), &tbName) require.NoError(t, err) require.Equal(t, 107, int(tbDesc.GetID())) typName := tree.MakeQualifiedTypeName("db1", "sc1", "enum1") - _, typDesc, err := descs.PrefixAndType(ctx, col.ByNameWithLeased(txn).Get(), &typName) + _, typDesc, err := descs.PrefixAndType(ctx, col.ByNameWithLeased(txn.KV()).Get(), &typName) require.NoError(t, err) require.Equal(t, 108, int(typDesc.GetID())) tbName = tree.MakeTableNameWithSchema("db1", "sc1", "sq1") - _, tbDesc, err = descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).Get(), &tbName) + _, tbDesc, err = descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).Get(), &tbName) require.NoError(t, err) require.Equal(t, 110, int(tbDesc.GetID())) - fnDesc, err := col.ByIDWithLeased(txn).WithoutNonPublic().Get().Function(ctx, descpb.ID(udfID)) + fnDesc, err := col.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Function(ctx, descpb.ID(udfID)) require.NoError(t, err) require.Equal(t, 111, int(fnDesc.GetID())) require.Equal(t, 104, int(fnDesc.GetParentID())) @@ -10692,31 +10680,31 @@ $$; require.Equal(t, 1, len(rows[0])) udfID, err = strconv.Atoi(rows[0][0]) require.NoError(t, err) - err = sql.TestingDescsTxn(ctx, tgtServer, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - dbDesc, err := col.ByNameWithLeased(txn).Get().Database(ctx, "db1") + err = sql.TestingDescsTxn(ctx, tgtServer, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + dbDesc, err := col.ByNameWithLeased(txn.KV()).Get().Database(ctx, "db1") require.NoError(t, err) require.Equal(t, 107, int(dbDesc.GetID())) - scDesc, err := col.ByNameWithLeased(txn).Get().Schema(ctx, dbDesc, "sc1") + scDesc, err := col.ByNameWithLeased(txn.KV()).Get().Schema(ctx, dbDesc, "sc1") require.NoError(t, err) require.Equal(t, 125, int(scDesc.GetID())) tbName := tree.MakeTableNameWithSchema("db1", "sc1", "tbl1") - _, tbDesc, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).Get(), &tbName) + _, tbDesc, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).Get(), &tbName) require.NoError(t, err) require.Equal(t, 126, int(tbDesc.GetID())) typName := tree.MakeQualifiedTypeName("db1", "sc1", "enum1") - _, typDesc, err := descs.PrefixAndType(ctx, col.ByNameWithLeased(txn).Get(), &typName) + _, typDesc, err := descs.PrefixAndType(ctx, col.ByNameWithLeased(txn.KV()).Get(), &typName) require.NoError(t, err) require.Equal(t, 127, int(typDesc.GetID())) tbName = tree.MakeTableNameWithSchema("db1", "sc1", "sq1") - _, tbDesc, err = descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).Get(), &tbName) + _, tbDesc, err = descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).Get(), &tbName) require.NoError(t, err) require.Equal(t, 129, int(tbDesc.GetID())) - fnDesc, err := col.ByIDWithLeased(txn).WithoutNonPublic().Get().Function(ctx, descpb.ID(udfID)) + fnDesc, err := col.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Function(ctx, descpb.ID(udfID)) require.NoError(t, err) require.Equal(t, 130, int(fnDesc.GetID())) require.Equal(t, 107, int(fnDesc.GetParentID())) diff --git a/pkg/ccl/backupccl/backupencryption/BUILD.bazel b/pkg/ccl/backupccl/backupencryption/BUILD.bazel index 71b26b359485..560da672cd1d 100644 --- a/pkg/ccl/backupccl/backupencryption/BUILD.bazel +++ b/pkg/ccl/backupccl/backupencryption/BUILD.bazel @@ -12,10 +12,9 @@ go_library( "//pkg/ccl/storageccl", "//pkg/cloud", "//pkg/jobs/jobspb", - "//pkg/kv", "//pkg/security/username", "//pkg/settings/cluster", - "//pkg/sql/sqlutil", + "//pkg/sql/isql", "//pkg/util/ioctx", "//pkg/util/protoutil", "@com_github_cockroachdb_errors//:errors", diff --git a/pkg/ccl/backupccl/backupencryption/encryption.go b/pkg/ccl/backupccl/backupencryption/encryption.go index e309bbeb120a..19e162fc2647 100644 --- a/pkg/ccl/backupccl/backupencryption/encryption.go +++ b/pkg/ccl/backupccl/backupencryption/encryption.go @@ -22,10 +22,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/storageccl" "github.com/cockroachdb/cockroach/pkg/cloud" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/ioctx" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" @@ -56,28 +55,21 @@ type BackupKMSEnv struct { // Conf represents the ExternalIODirConfig that applies to the BackupKMSEnv. Conf *base.ExternalIODirConfig // DB is the database handle that applies to the BackupKMSEnv. - DB *kv.DB + DB isql.DB // Username is the use that applies to the BackupKMSEnv. Username username.SQLUsername - // InternalEx is the InternalExecutor that applies to the BackupKMSEnv. - InternalEx sqlutil.InternalExecutor } // MakeBackupKMSEnv returns an instance of `BackupKMSEnv` that defines the // environment in which KMS is configured and used. func MakeBackupKMSEnv( - settings *cluster.Settings, - conf *base.ExternalIODirConfig, - db *kv.DB, - user username.SQLUsername, - ie sqlutil.InternalExecutor, + settings *cluster.Settings, conf *base.ExternalIODirConfig, db isql.DB, user username.SQLUsername, ) BackupKMSEnv { return BackupKMSEnv{ - Settings: settings, - Conf: conf, - DB: db, - Username: user, - InternalEx: ie, + Settings: settings, + Conf: conf, + DB: db, + Username: user, } } @@ -94,7 +86,7 @@ func (p *BackupKMSEnv) KMSConfig() *base.ExternalIODirConfig { } // DBHandle implements the cloud.KMSEnv interface. -func (p *BackupKMSEnv) DBHandle() *kv.DB { +func (p *BackupKMSEnv) DBHandle() isql.DB { return p.DB } @@ -103,11 +95,6 @@ func (p *BackupKMSEnv) User() username.SQLUsername { return p.Username } -// InternalExecutor returns the internal executor associated with the KMSEnv. -func (p *BackupKMSEnv) InternalExecutor() sqlutil.InternalExecutor { - return p.InternalEx -} - type ( // PlaintextMasterKeyID is the plain text version of the master key ID. PlaintextMasterKeyID string diff --git a/pkg/ccl/backupccl/backupresolver/BUILD.bazel b/pkg/ccl/backupccl/backupresolver/BUILD.bazel index 2dc75e709127..40c7aa8f9c3d 100644 --- a/pkg/ccl/backupccl/backupresolver/BUILD.bazel +++ b/pkg/ccl/backupccl/backupresolver/BUILD.bazel @@ -8,13 +8,13 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/keys", - "//pkg/kv", "//pkg/sql", "//pkg/sql/catalog", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/descs", "//pkg/sql/catalog/resolver", "//pkg/sql/catalog/schemadesc", + "//pkg/sql/isql", "//pkg/sql/sem/catconstants", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", diff --git a/pkg/ccl/backupccl/backupresolver/targets.go b/pkg/ccl/backupccl/backupresolver/targets.go index a94d9a22d395..58d673bb8b95 100644 --- a/pkg/ccl/backupccl/backupresolver/targets.go +++ b/pkg/ccl/backupccl/backupresolver/targets.go @@ -14,13 +14,13 @@ import ( "sort" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" @@ -661,12 +661,12 @@ func DescriptorsMatchingTargets( func LoadAllDescs( ctx context.Context, execCfg *sql.ExecutorConfig, asOf hlc.Timestamp, ) (allDescs []catalog.Descriptor, _ error) { - if err := sql.DescsTxn(ctx, execCfg, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - err := txn.SetFixedTimestamp(ctx, asOf) + if err := sql.DescsTxn(ctx, execCfg, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + err := txn.KV().SetFixedTimestamp(ctx, asOf) if err != nil { return err } - all, err := col.GetAllDescriptors(ctx, txn) + all, err := col.GetAllDescriptors(ctx, txn.KV()) allDescs = all.OrderedDescriptors() return err }); err != nil { diff --git a/pkg/ccl/backupccl/create_scheduled_backup.go b/pkg/ccl/backupccl/create_scheduled_backup.go index c927f28da50c..2daafe8262f9 100644 --- a/pkg/ccl/backupccl/create_scheduled_backup.go +++ b/pkg/ccl/backupccl/create_scheduled_backup.go @@ -19,7 +19,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/cloud" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/scheduledjobs/schedulebase" "github.com/cockroachdb/cockroach/pkg/security/username" @@ -172,7 +171,7 @@ func doCreateBackupSchedules( } } - env := sql.JobSchedulerEnv(p.ExecCfg()) + env := sql.JobSchedulerEnv(p.ExecCfg().JobsKnobs()) // Evaluate incremental and full recurrence. incRecurrence, err := schedulebase.ComputeScheduleRecurrence(env.Now(), eval.recurrence) @@ -285,13 +284,12 @@ func doCreateBackupSchedules( return err } - ex := p.ExecCfg().InternalExecutor - unpauseOnSuccessID := jobs.InvalidScheduleID var chainProtectedTimestampRecords bool // If needed, create incremental. var inc *jobs.ScheduledJob + scheduledJobs := jobs.ScheduledJobTxn(p.InternalSQLTxn()) var incScheduledBackupArgs *backuppb.ScheduledBackupExecutionArgs if incRecurrence != nil { chainProtectedTimestampRecords = scheduledBackupGCProtectionEnabled.Get(&p.ExecCfg().Settings.SV) @@ -314,7 +312,7 @@ func doCreateBackupSchedules( inc.Pause() inc.SetScheduleStatus("Waiting for initial backup to complete") - if err := inc.Create(ctx, ex, p.Txn()); err != nil { + if err := scheduledJobs.Create(ctx, inc); err != nil { return err } if err := emitSchedule(inc, backupNode, destinations, nil, /* incrementalFrom */ @@ -347,20 +345,22 @@ func doCreateBackupSchedules( } // Create the schedule (we need its ID to link dependent schedules below). - if err := full.Create(ctx, ex, p.Txn()); err != nil { + if err := scheduledJobs.Create(ctx, full); err != nil { return err } // If schedule creation has resulted in a full and incremental schedule then // we update both the schedules with the ID of the other "dependent" schedule. if incRecurrence != nil { - if err := setDependentSchedule(ctx, ex, fullScheduledBackupArgs, full, inc.ScheduleID(), - p.Txn()); err != nil { + if err := setDependentSchedule( + ctx, scheduledJobs, fullScheduledBackupArgs, full, inc.ScheduleID(), + ); err != nil { return errors.Wrap(err, "failed to update full schedule with dependent incremental schedule id") } - if err := setDependentSchedule(ctx, ex, incScheduledBackupArgs, inc, full.ScheduleID(), - p.Txn()); err != nil { + if err := setDependentSchedule( + ctx, scheduledJobs, incScheduledBackupArgs, inc, full.ScheduleID(), + ); err != nil { return errors.Wrap(err, "failed to update incremental schedule with dependent full schedule id") } @@ -373,11 +373,10 @@ func doCreateBackupSchedules( func setDependentSchedule( ctx context.Context, - ex *sql.InternalExecutor, + storage jobs.ScheduledJobStorage, scheduleExecutionArgs *backuppb.ScheduledBackupExecutionArgs, schedule *jobs.ScheduledJob, dependentID int64, - txn *kv.Txn, ) error { scheduleExecutionArgs.DependentScheduleID = dependentID any, err := pbtypes.MarshalAny(scheduleExecutionArgs) @@ -387,7 +386,7 @@ func setDependentSchedule( schedule.SetExecutionDetails( schedule.ExecutorType(), jobspb.ExecutionArguments{Args: any}, ) - return schedule.Update(ctx, ex, txn) + return storage.Update(ctx, schedule) } // checkForExistingBackupsInCollection checks that there are no existing backups @@ -536,7 +535,7 @@ func dryRunInvokeBackup( if err != nil { return eventpb.RecoveryEvent{}, err } - return invokeBackup(ctx, backupFn, p.ExecCfg().JobRegistry, p.Txn()) + return invokeBackup(ctx, backupFn, p.ExecCfg().JobRegistry, p.InternalSQLTxn()) } // makeScheduleBackupSpec prepares helper scheduledBackupSpec struct to assist in evaluation diff --git a/pkg/ccl/backupccl/create_scheduled_backup_test.go b/pkg/ccl/backupccl/create_scheduled_backup_test.go index 4bd41fccb20b..494c55524ad2 100644 --- a/pkg/ccl/backupccl/create_scheduled_backup_test.go +++ b/pkg/ccl/backupccl/create_scheduled_backup_test.go @@ -26,10 +26,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/jobs/jobstest" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/scheduledjobs/schedulebase" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" @@ -64,6 +66,10 @@ type testHelper struct { executeSchedules func() error } +func (th *testHelper) protectedTimestamps() protectedts.Manager { + return th.server.ExecutorConfig().(sql.ExecutorConfig).ProtectedTimestampProvider +} + // newTestHelper creates and initializes appropriate state for a test, // returning testHelper as well as a cleanup function. func newTestHelper(t *testing.T) (*testHelper, func()) { @@ -114,8 +120,8 @@ func newTestHelper(t *testing.T) (*testHelper, func()) { func (h *testHelper) loadSchedule(t *testing.T, scheduleID int64) *jobs.ScheduledJob { t.Helper() - loaded, err := jobs.LoadScheduledJob( - context.Background(), h.env, scheduleID, h.cfg.InternalExecutor, nil) + loaded, err := jobs.ScheduledJobDB(h.internalDB()). + Load(context.Background(), h.env, scheduleID) require.NoError(t, err) return loaded } @@ -157,7 +163,7 @@ func (h *testHelper) createBackupSchedule( var id int64 require.NoError(t, rows.Scan(&id, &unusedStr, &unusedStr, &unusedTS, &unusedStr, &unusedStr)) // Query system.scheduled_job table and load those schedules. - datums, cols, err := h.cfg.InternalExecutor.QueryRowExWithCols( + datums, cols, err := h.cfg.DB.Executor().QueryRowExWithCols( context.Background(), "sched-load", nil, sessiondata.RootUserSessionDataOverride, "SELECT * FROM system.scheduled_jobs WHERE schedule_id = $1", @@ -177,6 +183,10 @@ func (h *testHelper) createBackupSchedule( return schedules, nil } +func (th *testHelper) internalDB() descs.DB { + return th.server.InternalDB().(descs.DB) +} + func getScheduledBackupStatement(t *testing.T, arg *jobspb.ExecutionArguments) string { var backup backuppb.ScheduledBackupExecutionArgs require.NoError(t, pbtypes.UnmarshalAny(arg.Args, &backup)) @@ -709,7 +719,7 @@ INSERT INTO t1 values (-1), (10), (-100); // We'll be manipulating schedule time via th.env, but we can't fool actual backup // when it comes to AsOf time. So, override AsOf backup clause to be the current time. th.cfg.TestingKnobs.(*jobs.TestingKnobs).OverrideAsOfClause = func(clause *tree.AsOfClause, _ time.Time) { - expr, err := tree.MakeDTimestampTZ(th.cfg.DB.Clock().PhysicalTime(), time.Microsecond) + expr, err := tree.MakeDTimestampTZ(th.cfg.DB.KV().Clock().PhysicalTime(), time.Microsecond) require.NoError(t, err) clause.Expr = expr } @@ -810,9 +820,10 @@ INSERT INTO t1 values (-1), (10), (-100); if inc != nil { // Once the full backup completes, the incremental one should no longer be paused. - loadedInc, err := jobs.LoadScheduledJob( - context.Background(), th.env, inc.ScheduleID(), th.cfg.InternalExecutor, nil) + loadedInc, err := jobs.ScheduledJobDB(th.internalDB()). + Load(context.Background(), th.env, inc.ScheduleID()) require.NoError(t, err) + require.False(t, loadedInc.IsPaused()) } @@ -899,7 +910,7 @@ func TestCreateBackupScheduleIfNotExists(t *testing.T) { const selectQuery = "SELECT label FROM [SHOW SCHEDULES] WHERE command->>'backup_statement' LIKE 'BACKUP%';" - rows, err := th.cfg.InternalExecutor.QueryBufferedEx( + rows, err := th.cfg.DB.Executor().QueryBufferedEx( context.Background(), "check-sched", nil, sessiondata.RootUserSessionDataOverride, selectQuery) @@ -912,7 +923,7 @@ func TestCreateBackupScheduleIfNotExists(t *testing.T) { th.sqlDB.Exec(t, fmt.Sprintf(createQuery, newScheduleLabel, collectionLocation)) - rows, err = th.cfg.InternalExecutor.QueryBufferedEx( + rows, err = th.cfg.DB.Executor().QueryBufferedEx( context.Background(), "check-sched2", nil, sessiondata.RootUserSessionDataOverride, selectQuery) @@ -963,7 +974,8 @@ INSERT INTO t values (1), (10), (100); // Adjust next run by the specified delta (which maybe negative). s := th.loadSchedule(t, id) s.SetNextRun(th.env.Now().Add(delta)) - require.NoError(t, s.Update(context.Background(), th.cfg.InternalExecutor, nil)) + schedules := jobs.ScheduledJobDB(th.internalDB()) + require.NoError(t, schedules.Update(context.Background(), s)) } // We'll be manipulating schedule time via th.env, but we can't fool actual backup @@ -971,7 +983,7 @@ INSERT INTO t values (1), (10), (100); useRealTimeAOST := func() func() { knobs := th.cfg.TestingKnobs.(*jobs.TestingKnobs) knobs.OverrideAsOfClause = func(clause *tree.AsOfClause, _ time.Time) { - expr, err := tree.MakeDTimestampTZ(th.cfg.DB.Clock().PhysicalTime(), time.Microsecond) + expr, err := tree.MakeDTimestampTZ(th.cfg.DB.KV().Clock().PhysicalTime(), time.Microsecond) require.NoError(t, err) clause.Expr = expr } @@ -1031,7 +1043,8 @@ INSERT INTO t values (1), (10), (100); s.SetScheduleDetails(jobspb.ScheduleDetails{ OnError: onError, }) - require.NoError(t, s.Update(context.Background(), th.cfg.InternalExecutor, nil)) + schedules := jobs.ScheduledJobDB(th.internalDB()) + require.NoError(t, schedules.Update(context.Background(), s)) } } @@ -1293,7 +1306,7 @@ func TestCreateScheduledBackupTelemetry(t *testing.T) { useRealTimeAOST := func() func() { knobs := th.cfg.TestingKnobs.(*jobs.TestingKnobs) knobs.OverrideAsOfClause = func(clause *tree.AsOfClause, stmtTimestamp time.Time) { - expr, err := tree.MakeDTimestampTZ(th.cfg.DB.Clock().PhysicalTime(), time.Microsecond) + expr, err := tree.MakeDTimestampTZ(th.cfg.DB.KV().Clock().PhysicalTime(), time.Microsecond) asOfInterval = expr.Time.UnixNano() - stmtTimestamp.UnixNano() require.NoError(t, err) clause.Expr = expr diff --git a/pkg/ccl/backupccl/restore_data_processor.go b/pkg/ccl/backupccl/restore_data_processor.go index 3d840f2abf34..a099d68c7a07 100644 --- a/pkg/ccl/backupccl/restore_data_processor.go +++ b/pkg/ccl/backupccl/restore_data_processor.go @@ -444,7 +444,7 @@ func (rd *restoreDataProcessor) processRestoreSpanEntry( var err error batcher, err = bulk.MakeSSTBatcher(ctx, "restore", - db, + db.KV(), evalCtx.Settings, disallowShadowingBelow, writeAtBatchTS, diff --git a/pkg/ccl/backupccl/restore_data_processor_test.go b/pkg/ccl/backupccl/restore_data_processor_test.go index ceb105736987..b597bd5c708d 100644 --- a/pkg/ccl/backupccl/restore_data_processor_test.go +++ b/pkg/ccl/backupccl/restore_data_processor_test.go @@ -34,6 +34,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" @@ -250,13 +251,11 @@ func runTestIngest(t *testing.T, init func(*cluster.Settings)) { evalCtx := eval.Context{Settings: s.ClusterSettings(), Tracer: s.AmbientCtx().Tracer} flowCtx := execinfra.FlowCtx{ Cfg: &execinfra.ServerConfig{ - DB: kvDB, + DB: s.InternalDB().(descs.DB), ExternalStorage: func(ctx context.Context, dest cloudpb.ExternalStorage, opts ...cloud.ExternalStorageOption) (cloud.ExternalStorage, error) { return cloud.MakeExternalStorage(ctx, dest, base.ExternalIODirConfig{}, s.ClusterSettings(), blobs.TestBlobServiceClient(s.ClusterSettings().ExternalIODir), - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ nil, /* limiters */ cloud.NilMetrics, opts...) diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go index 8002181d0aee..b59fbb89e543 100644 --- a/pkg/ccl/backupccl/restore_job.go +++ b/pkg/ccl/backupccl/restore_job.go @@ -52,11 +52,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scbackup" "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/stats" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -988,7 +988,7 @@ func createImportingDescriptors( if !details.PrepareCompleted { err := sql.DescsTxn(ctx, p.ExecCfg(), func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) error { // A couple of pieces of cleanup are required for multi-region databases. // First, we need to find all of the MULTIREGION_ENUMs types and remap the @@ -1049,7 +1049,7 @@ func createImportingDescriptors( ctx, desc.GetID(), regionConfig, - txn, + txn.KV(), p.ExecCfg(), descsCol, p.ExtendedEvalContext().Tracing.KVTracingEnabled(), @@ -1071,14 +1071,14 @@ func createImportingDescriptors( // Write the new descriptors which are set in the OFFLINE state. if err := ingesting.WriteDescriptors( - ctx, p.ExecCfg().Codec, txn, p.User(), descsCol, + ctx, p.ExecCfg().Codec, txn.KV(), p.User(), descsCol, databases, writtenSchemas, tables, writtenTypes, writtenFunctions, details.DescriptorCoverage, nil /* extra */, restoreTempSystemDB, ); err != nil { return errors.Wrapf(err, "restoring %d TableDescriptors from %d databases", len(tables), len(databases)) } - b := txn.NewBatch() + b := txn.KV().NewBatch() // For new schemas with existing parent databases, the schema map on the // database descriptor needs to be updated. @@ -1092,7 +1092,7 @@ func createImportingDescriptors( // Write the updated databases. for dbID, schemas := range existingDBsWithNewSchemas { log.Infof(ctx, "writing %d schema entries to database %d", len(schemas), dbID) - desc, err := descsCol.MutableByID(txn).Desc(ctx, dbID) + desc, err := descsCol.MutableByID(txn.KV()).Desc(ctx, dbID) if err != nil { return err } @@ -1112,7 +1112,7 @@ func createImportingDescriptors( // to the new tables being restored. for _, table := range mutableTables { // Collect all types used by this table. - dbDesc, err := descsCol.ByID(txn).WithoutDropped().Get().Database(ctx, table.GetParentID()) + dbDesc, err := descsCol.ByID(txn.KV()).WithoutDropped().Get().Database(ctx, table.GetParentID()) if err != nil { return err } @@ -1134,7 +1134,7 @@ func createImportingDescriptors( continue } // Otherwise, add a backreference to this table. - typDesc, err := descsCol.MutableByID(txn).Type(ctx, id) + typDesc, err := descsCol.MutableByID(txn.KV()).Type(ctx, id) if err != nil { return err } @@ -1146,7 +1146,7 @@ func createImportingDescriptors( } } } - if err := txn.Run(ctx, b); err != nil { + if err := txn.KV().Run(ctx, b); err != nil { return err } @@ -1158,7 +1158,7 @@ func createImportingDescriptors( if details.DescriptorCoverage != tree.AllDescriptors { for _, table := range tableDescs { if lc := table.GetLocalityConfig(); lc != nil { - desc, err := descsCol.ByID(txn).WithoutDropped().Get().Database(ctx, table.ParentID) + desc, err := descsCol.ByID(txn.KV()).WithoutDropped().Get().Database(ctx, table.ParentID) if err != nil { return err } @@ -1168,14 +1168,14 @@ func createImportingDescriptors( table.ID, table.ParentID) } - mutTable, err := descsCol.MutableByID(txn).Table(ctx, table.GetID()) + mutTable, err := descsCol.MutableByID(txn.KV()).Table(ctx, table.GetID()) if err != nil { return err } regionConfig, err := sql.SynthesizeRegionConfig( ctx, - txn, + txn.KV(), desc.GetID(), descsCol, sql.SynthesizeRegionConfigOptionIncludeOffline, @@ -1185,7 +1185,7 @@ func createImportingDescriptors( } if err := sql.ApplyZoneConfigForMultiRegionTable( ctx, - txn, + txn.KV(), p.ExecCfg(), p.ExtendedEvalContext().Tracing.KVTracingEnabled(), descsCol, @@ -1200,7 +1200,7 @@ func createImportingDescriptors( } if len(details.Tenants) > 0 { - initialTenantZoneConfig, err := sql.GetHydratedZoneConfigForTenantsRange(ctx, txn, descsCol) + initialTenantZoneConfig, err := sql.GetHydratedZoneConfigForTenantsRange(ctx, txn.KV(), descsCol) if err != nil { return err } @@ -1217,7 +1217,16 @@ func createImportingDescriptors( default: return errors.AssertionFailedf("unknown tenant state %v", tenant) } - if _, err := sql.CreateTenantRecord(ctx, p.ExecCfg(), txn, &tenant, initialTenantZoneConfig); err != nil { + spanConfigs := p.ExecCfg().SpanConfigKVAccessor.WithTxn(ctx, txn.KV()) + if _, err := sql.CreateTenantRecord( + ctx, + p.ExecCfg().Codec, + p.ExecCfg().Settings, + txn, + spanConfigs, + &tenant, + initialTenantZoneConfig, + ); err != nil { return err } } @@ -1240,7 +1249,7 @@ func createImportingDescriptors( } // Update the job once all descs have been prepared for ingestion. - err := r.job.SetDetails(ctx, txn, details) + err := r.job.WithTxn(txn).SetDetails(ctx, details) // Emit to the event log now that the job has finished preparing descs. emitRestoreJobEvent(ctx, p, jobs.StatusRunning, r.job) @@ -1515,8 +1524,12 @@ func (r *restoreResumer) doResume(ctx context.Context, execCtx interface{}) erro return err } - kmsEnv := backupencryption.MakeBackupKMSEnv(p.ExecCfg().Settings, &p.ExecCfg().ExternalIODirConfig, - p.ExecCfg().DB, p.User(), p.ExecCfg().InternalExecutor) + kmsEnv := backupencryption.MakeBackupKMSEnv( + p.ExecCfg().Settings, + &p.ExecCfg().ExternalIODirConfig, + p.ExecCfg().InternalDB, + p.User(), + ) backupManifests, latestBackupManifest, sqlDescs, memSize, err := loadBackupSQLDescs( ctx, &mem, p, details, details.Encryption, &kmsEnv, ) @@ -1579,10 +1592,12 @@ func (r *restoreResumer) doResume(ctx context.Context, execCtx interface{}) erro // public. // TODO (lucy): Ideally we'd just create the database in the public state in // the first place, as a special case. - publishDescriptors := func(ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, ie sqlutil.InternalExecutor) (err error) { - return r.publishDescriptors(ctx, txn, ie, p.ExecCfg(), p.User(), descsCol, details, nil) + publishDescriptors := func(ctx context.Context, txn descs.Txn) error { + return r.publishDescriptors( + ctx, p.ExecCfg().JobRegistry, p.ExecCfg().JobsKnobs(), txn, p.User(), details, nil, + ) } - if err := r.execCfg.InternalExecutorFactory.DescsTxnWithExecutor(ctx, r.execCfg.DB, nil /* sd */, publishDescriptors); err != nil { + if err := r.execCfg.InternalDB.DescsTxn(ctx, publishDescriptors); err != nil { return err } p.ExecCfg().JobRegistry.NotifyToAdoptJobs() @@ -1649,7 +1664,9 @@ func (r *restoreResumer) doResume(ctx context.Context, execCtx interface{}) erro resTotal.Add(res) if details.DescriptorCoverage == tree.AllDescriptors { - if err := r.restoreSystemTables(ctx, p.ExecCfg().DB, preData.systemTables); err != nil { + if err := r.restoreSystemTables( + ctx, p.ExecCfg().InternalDB, preData.systemTables, + ); err != nil { return err } // Reload the details as we may have updated the job. @@ -1710,7 +1727,7 @@ func (r *restoreResumer) doResume(ctx context.Context, execCtx interface{}) erro var devalidateIndexes map[descpb.ID][]descpb.IndexID if toValidate := len(details.RevalidateIndexes); toValidate > 0 { - if err := r.job.RunningStatus(ctx, nil /* txn */, func(_ context.Context, _ jobspb.Details) (jobs.RunningStatus, error) { + if err := r.job.NoTxn().RunningStatus(ctx, func(_ context.Context, _ jobspb.Details) (jobs.RunningStatus, error) { return jobs.RunningStatus(fmt.Sprintf("re-validating %d indexes", toValidate)), nil }); err != nil { return errors.Wrapf(err, "failed to update running status of job %d", errors.Safe(r.job.ID())) @@ -1722,10 +1739,13 @@ func (r *restoreResumer) doResume(ctx context.Context, execCtx interface{}) erro devalidateIndexes = bad } - publishDescriptors := func(ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, ie sqlutil.InternalExecutor) (err error) { - return r.publishDescriptors(ctx, txn, ie, p.ExecCfg(), p.User(), descsCol, details, devalidateIndexes) + publishDescriptors := func(ctx context.Context, txn descs.Txn) (err error) { + return r.publishDescriptors( + ctx, p.ExecCfg().JobRegistry, p.ExecCfg().JobsKnobs(), txn, p.User(), + details, devalidateIndexes, + ) } - if err := r.execCfg.InternalExecutorFactory.DescsTxnWithExecutor(ctx, r.execCfg.DB, nil /* sd */, publishDescriptors); err != nil { + if err := r.execCfg.InternalDB.DescsTxn(ctx, publishDescriptors); err != nil { return err } @@ -1748,7 +1768,9 @@ func (r *restoreResumer) doResume(ctx context.Context, execCtx interface{}) erro // includes the jobs that are being restored. As soon as we restore these // jobs, they become accessible to the user, and may start executing. We // need this to happen after the descriptors have been marked public. - if err := r.restoreSystemTables(ctx, p.ExecCfg().DB, mainData.systemTables); err != nil { + if err := r.restoreSystemTables( + ctx, p.ExecCfg().InternalDB, mainData.systemTables, + ); err != nil { return err } // Reload the details as we may have updated the job. @@ -1758,7 +1780,7 @@ func (r *restoreResumer) doResume(ctx context.Context, execCtx interface{}) erro return err } } else if isSystemUserRestore(details) { - if err := r.restoreSystemUsers(ctx, p.ExecCfg().DB, mainData.systemTables); err != nil { + if err := r.restoreSystemUsers(ctx, p.ExecCfg().InternalDB, mainData.systemTables); err != nil { return err } details = r.job.Details().(jobspb.RestoreDetails) @@ -1853,9 +1875,8 @@ func revalidateIndexes( // We don't actually need the 'historical' read the way the schema change does // since our table is offline. runner := descs.NewHistoricalInternalExecTxnRunner(hlc.Timestamp{}, func(ctx context.Context, fn descs.InternalExecFn) error { - return execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - ie := job.MakeSessionBoundInternalExecutor(sql.NewFakeSessionData(execCfg.SV())).(*sql.InternalExecutor) - return fn(ctx, txn, ie, nil /* descriptors */) + return execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return fn(ctx, txn, descs.FromTxn(txn)) }) }) @@ -2002,16 +2023,17 @@ func insertStats( restoreStatsInsertBatchSize = len(latestStats) } - if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - if err := stats.InsertNewStats(ctx, execCfg.Settings, execCfg.InternalExecutor, txn, - latestStats[:restoreStatsInsertBatchSize]); err != nil { + if err := execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + if err := stats.InsertNewStats( + ctx, execCfg.Settings, txn, latestStats[:restoreStatsInsertBatchSize], + ); err != nil { return errors.Wrapf(err, "inserting stats from backup") } // If this is the last batch, mark the stats insertion complete. if restoreStatsInsertBatchSize == len(latestStats) { details.StatsInserted = true - if err := job.SetDetails(ctx, txn, details); err != nil { + if err := job.WithTxn(txn).SetDetails(ctx, details); err != nil { return errors.Wrapf(err, "updating job marking stats insertion complete") } } @@ -2033,11 +2055,10 @@ func insertStats( // with a new value even if this transaction does not commit. func (r *restoreResumer) publishDescriptors( ctx context.Context, - txn *kv.Txn, - ie sqlutil.InternalExecutor, - execCfg *sql.ExecutorConfig, + jobsRegistry *jobs.Registry, + jobsKnobs *jobs.TestingKnobs, + txn descs.Txn, user username.SQLUsername, - descsCol *descs.Collection, details jobspb.RestoreDetails, devalidateIndexes map[descpb.ID][]descpb.IndexID, ) (err error) { @@ -2045,7 +2066,7 @@ func (r *restoreResumer) publishDescriptors( return nil } - if err := execCfg.JobRegistry.CheckPausepoint("restore.before_publishing_descriptors"); err != nil { + if err := jobsRegistry.CheckPausepoint("restore.before_publishing_descriptors"); err != nil { return err } @@ -2062,7 +2083,7 @@ func (r *restoreResumer) publishDescriptors( // Pre-fetch all the descriptors into the collection to avoid doing // round-trips per descriptor. - all, err := prefetchDescriptors(ctx, txn, descsCol, details) + all, err := prefetchDescriptors(ctx, txn.KV(), txn.Descriptors(), details) if err != nil { return err } @@ -2077,7 +2098,7 @@ func (r *restoreResumer) publishDescriptors( // Go through the descriptors and find any declarative schema change jobs // affecting them. if err := scbackup.CreateDeclarativeSchemaChangeJobs( - ctx, r.execCfg.JobRegistry, txn, ie, all, + ctx, r.execCfg.JobRegistry, txn, all, ); err != nil { return err } @@ -2115,8 +2136,8 @@ func (r *restoreResumer) publishDescriptors( if mutTable.HasRowLevelTTL() { j, err := sql.CreateRowLevelTTLScheduledJob( ctx, - execCfg, - txn, + jobsKnobs, + jobs.ScheduledJobTxn(txn), user, mutTable.GetID(), mutTable.GetRowLevelTTL(), @@ -2163,17 +2184,15 @@ func (r *restoreResumer) publishDescriptors( fn := all.LookupDescriptor(details.FunctionDescs[i].GetID()).(catalog.FunctionDescriptor) newFunctions = append(newFunctions, fn.FuncDesc()) } - b := txn.NewBatch() + b := txn.KV().NewBatch() if err := all.ForEachDescriptor(func(desc catalog.Descriptor) error { d := desc.(catalog.MutableDescriptor) d.SetPublic() - return descsCol.WriteDescToBatch( - ctx, kvTrace, d, b, - ) + return txn.Descriptors().WriteDescToBatch(ctx, kvTrace, d, b) }); err != nil { return err } - if err := txn.Run(ctx, b); err != nil { + if err := txn.KV().Run(ctx, b); err != nil { return errors.Wrap(err, "publishing tables") } @@ -2183,7 +2202,9 @@ func (r *restoreResumer) publishDescriptors( // If the tenant was backed up in an `ACTIVE` state then we must activate // the tenant as the final step of the restore. The tenant has already // been created at an earlier stage in the restore in an `ADD` state. - if err := sql.ActivateTenant(ctx, r.execCfg, txn, tenant.ID); err != nil { + if err := sql.ActivateTenant( + ctx, r.execCfg.Settings, r.execCfg.Codec, txn, tenant.ID, + ); err != nil { return err } case descpb.TenantInfo_DROP, descpb.TenantInfo_ADD: @@ -2201,7 +2222,7 @@ func (r *restoreResumer) publishDescriptors( details.SchemaDescs = newSchemas details.DatabaseDescs = newDBs details.FunctionDescs = newFunctions - if err := r.job.SetDetails(ctx, txn, details); err != nil { + if err := r.job.WithTxn(txn).SetDetails(ctx, details); err != nil { return errors.Wrap(err, "updating job details after publishing tables") } @@ -2268,7 +2289,7 @@ func emitRestoreJobEvent( ) { // Emit to the event log now that we have completed the prepare step. var restoreEvent eventpb.Restore - if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := p.ExecCfg().InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { return sql.LogEventForJobs(ctx, p.ExecCfg(), txn, &restoreEvent, int64(job.ID()), job.Payload(), p.User(), status) }); err != nil { @@ -2306,8 +2327,8 @@ func (r *restoreResumer) OnFailOrCancel( logJobCompletion(ctx, restoreJobEventType, r.job.ID(), false, jobErr) execCfg := execCtx.(sql.JobExecContext).ExecCfg() - if err := execCfg.InternalExecutorFactory.DescsTxnWithExecutor(ctx, execCfg.DB, p.SessionData(), func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, ie sqlutil.InternalExecutor, + if err := execCfg.InternalDB.Txn(ctx, func( + ctx context.Context, txn isql.Txn, ) error { for _, tenant := range details.Tenants { tenant.State = descpb.TenantInfo_DROP @@ -2318,25 +2339,29 @@ func (r *restoreResumer) OnFailOrCancel( } } - if err := r.dropDescriptors(ctx, execCfg.JobRegistry, execCfg.Codec, txn, descsCol, ie); err != nil { + if err := r.dropDescriptors( + ctx, execCfg.JobRegistry, execCfg.Codec, txn, descs.FromTxn(txn), + ); err != nil { return err } if details.DescriptorCoverage == tree.AllDescriptors { // We've dropped defaultdb and postgres in the planning phase, we must // recreate them now if the full cluster restore failed. - _, err := ie.Exec(ctx, "recreate-defaultdb", txn, "CREATE DATABASE IF NOT EXISTS defaultdb") + _, err := txn.Exec(ctx, "recreate-defaultdb", txn.KV(), + "CREATE DATABASE IF NOT EXISTS defaultdb") if err != nil { return err } - _, err = ie.Exec(ctx, "recreate-postgres", txn, "CREATE DATABASE IF NOT EXISTS postgres") + _, err = txn.Exec(ctx, "recreate-postgres", txn.KV(), + "CREATE DATABASE IF NOT EXISTS postgres") if err != nil { return err } } return nil - }); err != nil { + }, isql.WithSessionData(p.SessionData())); err != nil { return err } @@ -2360,9 +2385,8 @@ func (r *restoreResumer) dropDescriptors( ctx context.Context, jr *jobs.Registry, codec keys.SQLCodec, - txn *kv.Txn, + txn isql.Txn, descsCol *descs.Collection, - ie sqlutil.InternalExecutor, ) error { details := r.job.Details().(jobspb.RestoreDetails) @@ -2372,14 +2396,13 @@ func (r *restoreResumer) dropDescriptors( return nil } - b := txn.NewBatch() + b := txn.KV().NewBatch() const kvTrace = false - // Collect the tables into mutable versions. mutableTables := make([]*tabledesc.Mutable, len(details.TableDescs)) for i := range details.TableDescs { var err error - mutableTables[i], err = descsCol.MutableByID(txn).Table(ctx, details.TableDescs[i].ID) + mutableTables[i], err = descsCol.MutableByID(txn.KV()).Table(ctx, details.TableDescs[i].ID) if err != nil { return err } @@ -2398,7 +2421,7 @@ func (r *restoreResumer) dropDescriptors( // Remove any back references installed from existing types to tables being restored. if err := r.removeExistingTypeBackReferences( - ctx, txn, descsCol, b, mutableTables, &details, + ctx, txn.KV(), descsCol, b, mutableTables, &details, ); err != nil { return err } @@ -2408,6 +2431,8 @@ func (r *restoreResumer) dropDescriptors( // Set the drop time as 1 (ns in Unix time), so that the table gets GC'd // immediately. dropTime := int64(1) + scheduledJobs := jobs.ScheduledJobTxn(txn) + env := sql.JobSchedulerEnv(r.execCfg.JobsKnobs()) for i := range mutableTables { tableToDrop := mutableTables[i] tablesToGC = append(tablesToGC, tableToDrop.ID) @@ -2417,9 +2442,7 @@ func (r *restoreResumer) dropDescriptors( if tableToDrop.HasRowLevelTTL() { scheduleID := tableToDrop.RowLevelTTL.ScheduleID if scheduleID != 0 { - if err := sql.DeleteSchedule( - ctx, r.execCfg, txn, scheduleID, - ); err != nil { + if err := scheduledJobs.DeleteByID(ctx, env, scheduleID); err != nil { return err } } @@ -2434,7 +2457,9 @@ func (r *restoreResumer) dropDescriptors( // // NB: We can't set GC TTLs for non-system tenants currently. if codec.ForSystemTenant() { - if err := setGCTTLForDroppingTable(ctx, txn, descsCol, tableToDrop, ie); err != nil { + if err := setGCTTLForDroppingTable( + ctx, txn, descsCol, tableToDrop, + ); err != nil { return errors.Wrapf(err, "setting low GC TTL for table %q", tableToDrop.GetName()) } } @@ -2458,7 +2483,7 @@ func (r *restoreResumer) dropDescriptors( // TypeDescriptors don't have a GC job process, so we can just write them // as dropped here. typDesc := details.TypeDescs[i] - mutType, err := descsCol.MutableByID(txn).Type(ctx, typDesc.ID) + mutType, err := descsCol.MutableByID(txn.KV()).Type(ctx, typDesc.ID) if err != nil { return err } @@ -2474,7 +2499,7 @@ func (r *restoreResumer) dropDescriptors( for i := range details.FunctionDescs { fnDesc := details.FunctionDescs[i] - mutFn, err := descsCol.MutableByID(txn).Function(ctx, fnDesc.ID) + mutFn, err := descsCol.MutableByID(txn.KV()).Function(ctx, fnDesc.ID) if err != nil { return err } @@ -2520,7 +2545,7 @@ func (r *restoreResumer) dropDescriptors( for _, fn := range details.FunctionDescs { ignoredChildDescIDs[fn.ID] = struct{}{} } - all, err := descsCol.GetAllDescriptors(ctx, txn) + all, err := descsCol.GetAllDescriptors(ctx, txn.KV()) if err != nil { return err } @@ -2536,7 +2561,7 @@ func (r *restoreResumer) dropDescriptors( dbsWithDeletedSchemas := make(map[descpb.ID]dbWithDeletedSchemas) for _, schemaDesc := range details.SchemaDescs { // We need to ignore descriptors we just added since we haven't committed the txn that deletes these. - isSchemaEmpty, err := isSchemaEmpty(ctx, txn, schemaDesc.GetID(), allDescs, ignoredChildDescIDs) + isSchemaEmpty, err := isSchemaEmpty(ctx, txn.KV(), schemaDesc.GetID(), allDescs, ignoredChildDescIDs) if err != nil { return errors.Wrapf(err, "checking if schema %s is empty during restore cleanup", schemaDesc.GetName()) } @@ -2546,13 +2571,13 @@ func (r *restoreResumer) dropDescriptors( continue } - mutSchema, err := descsCol.MutableByID(txn).Desc(ctx, schemaDesc.GetID()) + mutSchema, err := descsCol.MutableByID(txn.KV()).Desc(ctx, schemaDesc.GetID()) if err != nil { return err } entry, hasEntry := dbsWithDeletedSchemas[schemaDesc.GetParentID()] if !hasEntry { - mutParent, err := descsCol.MutableByID(txn).Desc(ctx, schemaDesc.GetParentID()) + mutParent, err := descsCol.MutableByID(txn.KV()).Desc(ctx, schemaDesc.GetParentID()) if err != nil { return err } @@ -2614,7 +2639,7 @@ func (r *restoreResumer) dropDescriptors( for _, dbDesc := range details.DatabaseDescs { // We need to ignore descriptors we just added since we haven't committed the txn that deletes these. - isDBEmpty, err := isDatabaseEmpty(ctx, txn, dbDesc.GetID(), allDescs, ignoredChildDescIDs) + isDBEmpty, err := isDatabaseEmpty(ctx, txn.KV(), dbDesc.GetID(), allDescs, ignoredChildDescIDs) if err != nil { return errors.Wrapf(err, "checking if database %s is empty during restore cleanup", dbDesc.GetName()) } @@ -2623,7 +2648,7 @@ func (r *restoreResumer) dropDescriptors( continue } - db, err := descsCol.MutableByID(txn).Desc(ctx, dbDesc.GetID()) + db, err := descsCol.MutableByID(txn.KV()).Desc(ctx, dbDesc.GetID()) if err != nil { return err } @@ -2660,7 +2685,7 @@ func (r *restoreResumer) dropDescriptors( } } - if err := txn.Run(ctx, b); err != nil { + if err := txn.KV().Run(ctx, b); err != nil { return errors.Wrap(err, "dropping tables created at the start of restore caused by fail/cancel") } @@ -2668,21 +2693,17 @@ func (r *restoreResumer) dropDescriptors( } func setGCTTLForDroppingTable( - ctx context.Context, - txn *kv.Txn, - descsCol *descs.Collection, - tableToDrop *tabledesc.Mutable, - ie sqlutil.InternalExecutor, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, tableToDrop *tabledesc.Mutable, ) error { log.VInfof(ctx, 2, "lowering TTL for table %q (%d)", tableToDrop.GetName(), tableToDrop.GetID()) // We get a mutable descriptor here because we are going to construct a // synthetic descriptor collection in which they are online. - dbDesc, err := descsCol.ByID(txn).Get().Database(ctx, tableToDrop.GetParentID()) + dbDesc, err := descsCol.ByID(txn.KV()).Get().Database(ctx, tableToDrop.GetParentID()) if err != nil { return err } - schemaDesc, err := descsCol.ByID(txn).Get().Schema(ctx, tableToDrop.GetParentSchemaID()) + schemaDesc, err := descsCol.ByID(txn.KV()).Get().Schema(ctx, tableToDrop.GetParentSchemaID()) if err != nil { return err } @@ -2709,8 +2730,8 @@ func setGCTTLForDroppingTable( } alterStmt := fmt.Sprintf("ALTER TABLE %s CONFIGURE ZONE USING gc.ttlseconds = 1", tableName.FQString()) - return ie.WithSyntheticDescriptors(syntheticDescriptors, func() error { - _, err := ie.Exec(ctx, "set-low-gcttl", txn, alterStmt) + return txn.WithSyntheticDescriptors(syntheticDescriptors, func() error { + _, err := txn.Exec(ctx, "set-low-gcttl", txn.KV(), alterStmt) return err }) } @@ -2799,14 +2820,13 @@ type systemTableNameWithConfig struct { // which are in a backup of system.users but do not currently exist (ignoring those who do) // and re-grant roles for users if the backup has system.role_members. func (r *restoreResumer) restoreSystemUsers( - ctx context.Context, db *kv.DB, systemTables []catalog.TableDescriptor, + ctx context.Context, db isql.DB, systemTables []catalog.TableDescriptor, ) error { - executor := r.execCfg.InternalExecutor - return db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + return db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { selectNonExistentUsers := "SELECT * FROM crdb_temp_system.users temp " + "WHERE NOT EXISTS (SELECT * FROM system.users u WHERE temp.username = u.username)" - users, err := executor.QueryBuffered(ctx, "get-users", - txn, selectNonExistentUsers) + users, err := txn.QueryBuffered(ctx, "get-users", + txn.KV(), selectNonExistentUsers) if err != nil { return err } @@ -2823,7 +2843,7 @@ func (r *restoreResumer) restoreSystemUsers( return err } args[3] = id - if _, err = executor.Exec(ctx, "insert-non-existent-users", txn, insertUser, + if _, err = txn.Exec(ctx, "insert-non-existent-users", txn.KV(), insertUser, args...); err != nil { return err } @@ -2834,8 +2854,8 @@ func (r *restoreResumer) restoreSystemUsers( if hasSystemRoleMembersTable(systemTables) { selectNonExistentRoleMembers := "SELECT * FROM crdb_temp_system.role_members temp_rm WHERE " + "NOT EXISTS (SELECT * FROM system.role_members rm WHERE temp_rm.role = rm.role AND temp_rm.member = rm.member)" - roleMembers, err := executor.QueryBuffered(ctx, "get-role-members", - txn, selectNonExistentRoleMembers) + roleMembers, err := txn.QueryBuffered(ctx, "get-role-members", + txn.KV(), selectNonExistentRoleMembers) if err != nil { return err } @@ -2854,7 +2874,7 @@ VALUES ($1, $2, $3, (SELECT user_id FROM system.users WHERE username = $1), (SEL if _, ok := newUsernames[member.String()]; ok { role := tree.MustBeDString(roleMember[0]) isAdmin := tree.MustBeDBool(roleMember[2]) - if _, err := executor.Exec(ctx, "insert-non-existent-role-members", txn, + if _, err := txn.Exec(ctx, "insert-non-existent-role-members", txn.KV(), insertRoleMember, role, member, isAdmin, ); err != nil { return err @@ -2866,7 +2886,7 @@ VALUES ($1, $2, $3, (SELECT user_id FROM system.users WHERE username = $1), (SEL if hasSystemRoleOptionsTable(systemTables) { selectNonExistentRoleOptions := "SELECT * FROM crdb_temp_system.role_options temp_ro WHERE " + "NOT EXISTS (SELECT * FROM system.role_options ro WHERE temp_ro.username = ro.username AND temp_ro.option = ro.option)" - roleOptions, err := executor.QueryBuffered(ctx, "get-role-options", txn, selectNonExistentRoleOptions) + roleOptions, err := txn.QueryBuffered(ctx, "get-role-options", txn.KV(), selectNonExistentRoleOptions) if err != nil { return err } @@ -2883,7 +2903,7 @@ VALUES ($1, $2, $3, (SELECT user_id FROM system.users WHERE username = $1), (SEL if roleOptionsHasIDColumn { args = append(args, roleID) } - if _, err = executor.Exec(ctx, "insert-non-existent-role-options", txn, + if _, err = txn.Exec(ctx, "insert-non-existent-role-options", txn.KV(), insertRoleOption, args...); err != nil { return err } @@ -2915,7 +2935,7 @@ func hasSystemTableByName(name string, systemTables []catalog.TableDescriptor) b // restoreSystemTables atomically replaces the contents of the system tables // with the data from the restored system tables. func (r *restoreResumer) restoreSystemTables( - ctx context.Context, db *kv.DB, tables []catalog.TableDescriptor, + ctx context.Context, db isql.DB, tables []catalog.TableDescriptor, ) error { details := r.job.Details().(jobspb.RestoreDetails) if details.SystemTablesMigrated == nil { @@ -2954,9 +2974,10 @@ func (r *restoreResumer) restoreSystemTables( continue } - if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - if err := systemTable.config.migrationFunc(ctx, r.execCfg, txn, - systemTable.stagingTableName, details.DescriptorRewrites); err != nil { + if err := db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + if err := systemTable.config.migrationFunc( + ctx, txn, systemTable.stagingTableName, details.DescriptorRewrites, + ); err != nil { return err } @@ -2964,23 +2985,26 @@ func (r *restoreResumer) restoreSystemTables( // restarts don't try to import data over our migrated data. This would // fail since the restored data would shadow the migrated keys. details.SystemTablesMigrated[systemTable.systemTableName] = true - return r.job.SetDetails(ctx, txn, details) + return r.job.WithTxn(txn).SetDetails(ctx, details) }); err != nil { return err } } - if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - txn.SetDebugName("system-restore-txn") + if err := db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + txn.KV().SetDebugName("system-restore-txn") restoreFunc := defaultSystemTableRestoreFunc if systemTable.config.customRestoreFunc != nil { restoreFunc = systemTable.config.customRestoreFunc log.Eventf(ctx, "using custom restore function for table %s", systemTable.systemTableName) } - + deps := customRestoreFuncDeps{ + settings: r.execCfg.Settings, + codec: r.execCfg.Codec, + } log.Eventf(ctx, "restoring system table %s", systemTable.systemTableName) - err := restoreFunc(ctx, r.execCfg, txn, systemTable.systemTableName, systemTable.stagingTableName) + err := restoreFunc(ctx, deps, txn, systemTable.systemTableName, systemTable.stagingTableName) if err != nil { return errors.Wrapf(err, "restoring system table %s", systemTable.systemTableName) } @@ -3000,7 +3024,7 @@ func (r *restoreResumer) restoreSystemTables( } func (r *restoreResumer) cleanupTempSystemTables(ctx context.Context) error { - executor := r.execCfg.InternalExecutor + executor := r.execCfg.InternalDB.Executor() // Check if the temp system database has already been dropped. This can happen // if the restore job fails after the system database has cleaned up. checkIfDatabaseExists := "SELECT database_name FROM [SHOW DATABASES] WHERE database_name=$1" diff --git a/pkg/ccl/backupccl/restore_old_versions_test.go b/pkg/ccl/backupccl/restore_old_versions_test.go index 759c8122eeca..62710c119fd4 100644 --- a/pkg/ccl/backupccl/restore_old_versions_test.go +++ b/pkg/ccl/backupccl/restore_old_versions_test.go @@ -21,7 +21,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql" @@ -30,6 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descbuilder" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/datapathutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -904,13 +904,13 @@ func TestRestoreWithDroppedSchemaCorruption(t *testing.T) { // Read descriptor without validation. execCfg := s.ExecutorConfig().(sql.ExecutorConfig) hasSameNameSchema := func(dbName string) (exists bool) { - require.NoError(t, sql.DescsTxn(ctx, &execCfg, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { + require.NoError(t, sql.DescsTxn(ctx, &execCfg, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { // Using this method to avoid validation. - id, err := col.LookupDatabaseID(ctx, txn, dbName) + id, err := col.LookupDatabaseID(ctx, txn.KV(), dbName) if err != nil { return err } - res, err := txn.Get(ctx, catalogkeys.MakeDescMetadataKey(execCfg.Codec, id)) + res, err := txn.KV().Get(ctx, catalogkeys.MakeDescMetadataKey(execCfg.Codec, id)) if err != nil { return err } @@ -1050,45 +1050,45 @@ func fullClusterRestoreUsersWithoutIDs(exportDir string) func(t *testing.T) { sqlDB.Exec(t, fmt.Sprintf("RESTORE FROM '%s'", localFoo)) - sqlDB.CheckQueryResults(t, `SELECT username, "hashedPassword", "isRole", user_id FROM system.users`, [][]string{ - {"admin", "", "true", "2"}, - {"root", "", "false", "1"}, - {"testrole", "NULL", "true", "100"}, - {"testuser", "NULL", "false", "101"}, - {"testuser2", "NULL", "false", "102"}, - {"testuser3", "NULL", "false", "103"}, - {"testuser4", "NULL", "false", "104"}, + sqlDB.CheckQueryResults(t, `SELECT username, "hashedPassword", "isRole", user_id >= 100 FROM system.users`, [][]string{ + {"admin", "", "true", "false"}, + {"root", "", "false", "false"}, + {"testrole", "NULL", "true", "true"}, + {"testuser", "NULL", "false", "true"}, + {"testuser2", "NULL", "false", "true"}, + {"testuser3", "NULL", "false", "true"}, + {"testuser4", "NULL", "false", "true"}, }) - sqlDB.CheckQueryResults(t, `SELECT * FROM system.role_options`, [][]string{ - {"testrole", "NOLOGIN", "NULL", "100"}, - {"testuser", "CREATEROLE", "NULL", "101"}, - {"testuser", "VALID UNTIL", "2021-01-10 00:00:00+00:00", "101"}, - {"testuser2", "CONTROLCHANGEFEED", "NULL", "102"}, - {"testuser2", "CONTROLJOB", "NULL", "102"}, - {"testuser2", "CREATEDB", "NULL", "102"}, - {"testuser2", "CREATELOGIN", "NULL", "102"}, - {"testuser2", "NOLOGIN", "NULL", "102"}, - {"testuser2", "VIEWACTIVITY", "NULL", "102"}, - {"testuser3", "CANCELQUERY", "NULL", "103"}, - {"testuser3", "MODIFYCLUSTERSETTING", "NULL", "103"}, - {"testuser3", "VIEWACTIVITYREDACTED", "NULL", "103"}, - {"testuser3", "VIEWCLUSTERSETTING", "NULL", "103"}, - {"testuser4", "NOSQLLOGIN", "NULL", "104"}, + sqlDB.CheckQueryResults(t, `SELECT username, option, value FROM system.role_options`, [][]string{ + {"testrole", "NOLOGIN", "NULL"}, + {"testuser", "CREATEROLE", "NULL"}, + {"testuser", "VALID UNTIL", "2021-01-10 00:00:00+00:00"}, + {"testuser2", "CONTROLCHANGEFEED", "NULL"}, + {"testuser2", "CONTROLJOB", "NULL"}, + {"testuser2", "CREATEDB", "NULL"}, + {"testuser2", "CREATELOGIN", "NULL"}, + {"testuser2", "NOLOGIN", "NULL"}, + {"testuser2", "VIEWACTIVITY", "NULL"}, + {"testuser3", "CANCELQUERY", "NULL"}, + {"testuser3", "MODIFYCLUSTERSETTING", "NULL"}, + {"testuser3", "VIEWACTIVITYREDACTED", "NULL"}, + {"testuser3", "VIEWCLUSTERSETTING", "NULL"}, + {"testuser4", "NOSQLLOGIN", "NULL"}, }) // Verify that the next user we create uses the next biggest ID. sqlDB.Exec(t, "CREATE USER testuser5") - sqlDB.CheckQueryResults(t, `SELECT username, "hashedPassword", "isRole", user_id FROM system.users`, [][]string{ - {"admin", "", "true", "2"}, - {"root", "", "false", "1"}, - {"testrole", "NULL", "true", "100"}, - {"testuser", "NULL", "false", "101"}, - {"testuser2", "NULL", "false", "102"}, - {"testuser3", "NULL", "false", "103"}, - {"testuser4", "NULL", "false", "104"}, - {"testuser5", "NULL", "false", "105"}, + sqlDB.CheckQueryResults(t, `SELECT username, "hashedPassword", "isRole" FROM system.users`, [][]string{ + {"admin", "", "true"}, + {"root", "", "false"}, + {"testrole", "NULL", "true"}, + {"testuser", "NULL", "false"}, + {"testuser2", "NULL", "false"}, + {"testuser3", "NULL", "false"}, + {"testuser4", "NULL", "false"}, + {"testuser5", "NULL", "false"}, }) } } diff --git a/pkg/ccl/backupccl/restore_planning.go b/pkg/ccl/backupccl/restore_planning.go index 7f695cc6724a..2a51efa522ef 100644 --- a/pkg/ccl/backupccl/restore_planning.go +++ b/pkg/ccl/backupccl/restore_planning.go @@ -51,6 +51,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" "github.com/cockroachdb/cockroach/pkg/sql/exprutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice" @@ -58,8 +59,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/roleoption" "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/syntheticprivilege" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -283,10 +284,12 @@ func allocateDescriptorRewrites( // Fail fast if the necessary databases don't exist or are otherwise // incompatible with this restore. - if err := sql.DescsTxn(ctx, p.ExecCfg(), func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { + if err := func() error { + txn := p.InternalSQLTxn() + col := txn.Descriptors() // Check that any DBs being restored do _not_ exist. for name := range restoreDBNames { - dbID, err := col.LookupDatabaseID(ctx, txn, name) + dbID, err := col.LookupDatabaseID(ctx, txn.KV(), name) if err != nil { return err } @@ -302,7 +305,7 @@ func allocateDescriptorRewrites( continue } - targetDB, err := resolveTargetDB(ctx, txn, p, databasesByID, intoDB, descriptorCoverage, sc) + targetDB, err := resolveTargetDB(ctx, databasesByID, intoDB, descriptorCoverage, sc) if err != nil { return err } @@ -311,7 +314,7 @@ func allocateDescriptorRewrites( needsNewParentIDs[targetDB] = append(needsNewParentIDs[targetDB], sc.ID) } else { // Look up the parent database's ID. - parentID, parentDB, err := getDatabaseIDAndDesc(ctx, txn, col, targetDB) + parentID, parentDB, err := getDatabaseIDAndDesc(ctx, txn.KV(), col, targetDB) if err != nil { return err } @@ -323,7 +326,7 @@ func allocateDescriptorRewrites( } // See if there is an existing schema with the same name. - id, err := col.LookupSchemaID(ctx, txn, parentID, sc.Name) + id, err := col.LookupSchemaID(ctx, txn.KV(), parentID, sc.Name) if err != nil { return err } @@ -333,7 +336,7 @@ func allocateDescriptorRewrites( } else { // If we found an existing schema, then we need to remap all references // to this schema to the existing one. - desc, err := col.ByID(txn).Get().Schema(ctx, id) + desc, err := col.ByID(txn.KV()).Get().Schema(ctx, id) if err != nil { return err } @@ -352,8 +355,7 @@ func allocateDescriptorRewrites( continue } - targetDB, err := resolveTargetDB(ctx, txn, p, databasesByID, intoDB, descriptorCoverage, - table) + targetDB, err := resolveTargetDB(ctx, databasesByID, intoDB, descriptorCoverage, table) if err != nil { return err } @@ -363,7 +365,7 @@ func allocateDescriptorRewrites( } else { var parentID descpb.ID { - newParentID, err := col.LookupDatabaseID(ctx, txn, targetDB) + newParentID, err := col.LookupDatabaseID(ctx, txn.KV(), targetDB) if err != nil { return err } @@ -376,13 +378,13 @@ func allocateDescriptorRewrites( // Check that the table name is _not_ in use. // This would fail the CPut later anyway, but this yields a prettier error. tableName := tree.NewUnqualifiedTableName(tree.Name(table.GetName())) - err := descs.CheckObjectNameCollision(ctx, col, txn, parentID, table.GetParentSchemaID(), tableName) + err := descs.CheckObjectNameCollision(ctx, col, txn.KV(), parentID, table.GetParentSchemaID(), tableName) if err != nil { return err } // Check privileges. - parentDB, err := col.ByID(txn).Get().Database(ctx, parentID) + parentDB, err := col.ByID(txn.KV()).Get().Database(ctx, parentID) if err != nil { return errors.Wrapf(err, "failed to lookup parent DB %d", errors.Safe(parentID)) @@ -397,7 +399,7 @@ func allocateDescriptorRewrites( // We're restoring a table and not its parent database. We may block // restoring multi-region tables to multi-region databases since // regions may mismatch. - if err := checkMultiRegionCompatible(ctx, txn, col, table, parentDB); err != nil { + if err := checkMultiRegionCompatible(ctx, txn.KV(), col, table, parentDB); err != nil { return pgerror.WithCandidateCode(err, pgcode.FeatureNotSupported) } @@ -423,7 +425,7 @@ func allocateDescriptorRewrites( continue } - targetDB, err := resolveTargetDB(ctx, txn, p, databasesByID, intoDB, descriptorCoverage, typ) + targetDB, err := resolveTargetDB(ctx, databasesByID, intoDB, descriptorCoverage, typ) if err != nil { return err } @@ -438,7 +440,7 @@ func allocateDescriptorRewrites( } // Look up the parent database's ID. - parentID, err := col.LookupDatabaseID(ctx, txn, targetDB) + parentID, err := col.LookupDatabaseID(ctx, txn.KV(), targetDB) if err != nil { return err } @@ -447,7 +449,7 @@ func allocateDescriptorRewrites( targetDB, typ.Name) } // Check privileges on the parent DB. - parentDB, err := col.ByID(txn).Get().Database(ctx, parentID) + parentDB, err := col.ByID(txn.KV()).Get().Database(ctx, parentID) if err != nil { return errors.Wrapf(err, "failed to lookup parent DB %d", errors.Safe(parentID)) @@ -465,7 +467,7 @@ func allocateDescriptorRewrites( desc, err := descs.GetDescriptorCollidingWithObjectName( ctx, col, - txn, + txn.KV(), parentID, getParentSchemaID(typ), typ.Name, @@ -491,7 +493,7 @@ func allocateDescriptorRewrites( // Ensure that there isn't a collision with the array type name. arrTyp := typesByID[typ.ArrayTypeID] typeName := tree.NewUnqualifiedTypeName(arrTyp.GetName()) - err = descs.CheckObjectNameCollision(ctx, col, txn, parentID, getParentSchemaID(typ), typeName) + err = descs.CheckObjectNameCollision(ctx, col, txn.KV(), parentID, getParentSchemaID(typ), typeName) if err != nil { return errors.Wrapf(err, "name collision for %q's array type", typ.Name) } @@ -554,7 +556,7 @@ func allocateDescriptorRewrites( return errors.AssertionFailedf("function descriptors seen when restoring tables") } - targetDB, err := resolveTargetDB(ctx, txn, p, databasesByID, intoDB, descriptorCoverage, function) + targetDB, err := resolveTargetDB(ctx, databasesByID, intoDB, descriptorCoverage, function) if err != nil { return err } @@ -566,7 +568,7 @@ func allocateDescriptorRewrites( } } return nil - }); err != nil { + }(); err != nil { return nil, err } @@ -706,27 +708,24 @@ func getDatabaseIDAndDesc( // If we're doing a full cluster restore - to treat defaultdb and postgres // as regular databases, we drop them before restoring them again in the // restore. -func dropDefaultUserDBs(ctx context.Context, execCfg *sql.ExecutorConfig) error { - return execCfg.InternalExecutorFactory.DescsTxnWithExecutor(ctx, execCfg.DB, nil /* session data */, func( - ctx context.Context, txn *kv.Txn, _ *descs.Collection, ie sqlutil.InternalExecutor, - ) error { - _, err := ie.Exec(ctx, "drop-defaultdb", txn, "DROP DATABASE IF EXISTS defaultdb") - if err != nil { - return err - } - - _, err = ie.Exec(ctx, "drop-postgres", txn, "DROP DATABASE IF EXISTS postgres") - if err != nil { - return err - } - return nil - }) +func dropDefaultUserDBs(ctx context.Context, txn isql.Txn) error { + if _, err := txn.ExecEx( + ctx, "drop-defaultdb", txn.KV(), + sessiondata.NodeUserSessionDataOverride, + "DROP DATABASE IF EXISTS defaultdb", + ); err != nil { + return err + } + _, err := txn.ExecEx( + ctx, "drop-postgres", txn.KV(), + sessiondata.NodeUserSessionDataOverride, + "DROP DATABASE IF EXISTS postgres", + ) + return err } func resolveTargetDB( ctx context.Context, - txn *kv.Txn, - p sql.PlanHookState, databasesByID map[descpb.ID]*dbdesc.Mutable, intoDB string, descriptorCoverage tree.DescriptorCoverage, @@ -1388,7 +1387,7 @@ func doRestorePlan( if restoreStmt.DescriptorCoverage == tree.AllDescriptors { // We do this before resolving the backup manifest since resolving the // backup manifest can take a while. - if err := checkForConflictingDescriptors(ctx, p.ExecCfg()); err != nil { + if err := checkForConflictingDescriptors(ctx, p.InternalSQLTxn()); err != nil { return err } } @@ -1458,8 +1457,9 @@ func doRestorePlan( }() ioConf := baseStores[0].ExternalIOConf() - kmsEnv := backupencryption.MakeBackupKMSEnv(p.ExecCfg().Settings, &ioConf, - p.ExecCfg().DB, p.User(), p.ExecCfg().InternalExecutor) + kmsEnv := backupencryption.MakeBackupKMSEnv( + p.ExecCfg().Settings, &ioConf, p.ExecCfg().InternalDB, p.User(), + ) var encryption *jobspb.BackupEncryptionOptions if restoreStmt.Options.EncryptionPassphrase != nil { @@ -1658,8 +1658,9 @@ func doRestorePlan( if len(tenants) != 1 { return errors.Errorf("%q option can only be used when restoring a single tenant", restoreOptAsTenant) } - res, err := p.ExecCfg().InternalExecutor.QueryRow( + res, err := p.InternalSQLTxn().QueryRowEx( ctx, "restore-lookup-tenant", p.Txn(), + sessiondata.NodeUserSessionDataOverride, `SELECT active FROM system.tenants WHERE id = $1`, newTenantID.ToUint64(), ) if err != nil { @@ -1676,8 +1677,9 @@ func doRestorePlan( oldTenantID = &old } else { for _, i := range tenants { - res, err := p.ExecCfg().InternalExecutor.QueryRow( + res, err := p.InternalSQLTxn().QueryRowEx( ctx, "restore-lookup-tenant", p.Txn(), + sessiondata.NodeUserSessionDataOverride, `SELECT active FROM system.tenants WHERE id = $1`, i.ID, ) if err != nil { @@ -1748,7 +1750,7 @@ func doRestorePlan( // This is done so that they can be restored the same way any other user // defined database would be restored from the backup. if restoreStmt.DescriptorCoverage == tree.AllDescriptors { - if err := dropDefaultUserDBs(ctx, p.ExecCfg()); err != nil { + if err := dropDefaultUserDBs(ctx, p.InternalSQLTxn()); err != nil { return err } } @@ -1886,7 +1888,8 @@ func doRestorePlan( // We do not wait for the job to finish. jobID := p.ExecCfg().JobRegistry.MakeJobID() _, err := p.ExecCfg().JobRegistry.CreateAdoptableJobWithTxn( - ctx, jr, jobID, p.Txn()) + ctx, jr, jobID, p.InternalSQLTxn(), + ) if err != nil { return err } @@ -1913,7 +1916,7 @@ func doRestorePlan( } }() jobID := p.ExecCfg().JobRegistry.MakeJobID() - if err := p.ExecCfg().JobRegistry.CreateStartableJobWithTxn(ctx, &sj, jobID, plannerTxn, jr); err != nil { + if err := p.ExecCfg().JobRegistry.CreateStartableJobWithTxn(ctx, &sj, jobID, p.InternalSQLTxn(), jr); err != nil { return err } @@ -1925,6 +1928,19 @@ func doRestorePlan( }(); err != nil { return err } + // Release all descriptor leases here. We need to do this because we're + // about to kick off a job which is going to potentially rewrite every + // descriptor. Note that we committed the underlying transaction in the + // above closure -- so we're not using any leases anymore, but we might + // be holding some because some sql queries might have been executed by + // this transaction (indeed some certainly were when we created the job + // we're going to run). + // + // This is all a bit of a hack to deal with the fact that we want to + // return results as part of this statement and the usual machinery for + // releasing leases assumes that that does not happen during statement + // execution. + p.InternalSQLTxn().Descriptors().ReleaseAll(ctx) collectRestoreTelemetry(ctx, sj.ID(), restoreDetails, intoDB, newDBName, subdir, restoreStmt, descsByTablePattern, restoreDBs, asOfInterval, debugPauseOn, p.SessionData().ApplicationName) if err := sj.Start(ctx); err != nil { @@ -1968,19 +1984,13 @@ func collectRestoreTelemetry( // create a conflict when doing a full cluster restore. // // Because we remap all descriptors, we only care about namespace conflicts. -func checkForConflictingDescriptors(ctx context.Context, execCfg *sql.ExecutorConfig) error { +func checkForConflictingDescriptors(ctx context.Context, txn descs.Txn) error { var allDescs []catalog.Descriptor - if err := sql.DescsTxn(ctx, execCfg, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) (err error) { - txn.SetDebugName("count-user-descs") - all, err := col.GetAllDescriptors(ctx, txn) - if err != nil { - return err - } - allDescs = all.OrderedDescriptors() - return err - }); err != nil { + all, err := txn.Descriptors().GetAllDescriptors(ctx, txn.KV()) + if err != nil { return errors.Wrap(err, "looking up user descriptors during restore") } + allDescs = all.OrderedDescriptors() if allUserDescs := filteredUserCreatedDescriptors(allDescs); len(allUserDescs) > 0 { userDescriptorNames := make([]string, 0, 20) for i, desc := range allUserDescs { diff --git a/pkg/ccl/backupccl/restore_schema_change_creation.go b/pkg/ccl/backupccl/restore_schema_change_creation.go index 092ca88d2099..a2a6159887d0 100644 --- a/pkg/ccl/backupccl/restore_schema_change_creation.go +++ b/pkg/ccl/backupccl/restore_schema_change_creation.go @@ -16,12 +16,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql/catalog" descpb "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" ) @@ -121,7 +121,7 @@ func createTypeChangeJobFromDesc( ctx context.Context, jr *jobs.Registry, codec keys.SQLCodec, - txn *kv.Txn, + txn isql.Txn, user username.SQLUsername, typ catalog.TypeDescriptor, ) error { @@ -162,7 +162,7 @@ func createSchemaChangeJobsFromMutations( ctx context.Context, jr *jobs.Registry, codec keys.SQLCodec, - txn *kv.Txn, + txn isql.Txn, username username.SQLUsername, tableDesc *tabledesc.Mutable, ) error { diff --git a/pkg/ccl/backupccl/schedule_exec.go b/pkg/ccl/backupccl/schedule_exec.go index 0e9efbe960ec..9c7ac275187f 100644 --- a/pkg/ccl/backupccl/schedule_exec.go +++ b/pkg/ccl/backupccl/schedule_exec.go @@ -15,14 +15,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/backupccl/backuppb" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/scheduledjobs/schedulebase" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" @@ -50,12 +49,12 @@ var _ jobs.ScheduledJobExecutor = &scheduledBackupExecutor{} // ExecuteJob implements jobs.ScheduledJobExecutor interface. func (e *scheduledBackupExecutor) ExecuteJob( ctx context.Context, + txn isql.Txn, cfg *scheduledjobs.JobExecutionConfig, env scheduledjobs.JobSchedulerEnv, - sj *jobs.ScheduledJob, - txn *kv.Txn, + schedule *jobs.ScheduledJob, ) error { - if err := e.executeBackup(ctx, cfg, sj, txn); err != nil { + if err := e.executeBackup(ctx, cfg, schedule, txn); err != nil { e.metrics.NumFailed.Inc(1) return err } @@ -64,7 +63,7 @@ func (e *scheduledBackupExecutor) ExecuteJob( } func (e *scheduledBackupExecutor) executeBackup( - ctx context.Context, cfg *scheduledjobs.JobExecutionConfig, sj *jobs.ScheduledJob, txn *kv.Txn, + ctx context.Context, cfg *scheduledjobs.JobExecutionConfig, sj *jobs.ScheduledJob, txn isql.Txn, ) error { backupStmt, err := extractBackupStatement(sj) if err != nil { @@ -96,7 +95,7 @@ func (e *scheduledBackupExecutor) executeBackup( sj.ScheduleID(), tree.AsString(backupStmt)) // Invoke backup plan hook. - hook, cleanup := cfg.PlanHookMaker("exec-backup", txn, sj.Owner()) + hook, cleanup := cfg.PlanHookMaker("exec-backup", txn.KV(), sj.Owner()) defer cleanup() if knobs, ok := cfg.TestingKnobs.(*jobs.TestingKnobs); ok { @@ -114,7 +113,7 @@ func (e *scheduledBackupExecutor) executeBackup( } func invokeBackup( - ctx context.Context, backupFn sql.PlanHookRowFn, registry *jobs.Registry, txn *kv.Txn, + ctx context.Context, backupFn sql.PlanHookRowFn, registry *jobs.Registry, txn isql.Txn, ) (eventpb.RecoveryEvent, error) { resultCh := make(chan tree.Datums) // No need to close g := ctxgroup.WithContext(ctx) @@ -157,18 +156,17 @@ func planBackup( // NotifyJobTermination implements jobs.ScheduledJobExecutor interface. func (e *scheduledBackupExecutor) NotifyJobTermination( ctx context.Context, + txn isql.Txn, jobID jobspb.JobID, jobStatus jobs.Status, details jobspb.Details, env scheduledjobs.JobSchedulerEnv, schedule *jobs.ScheduledJob, - ex sqlutil.InternalExecutor, - txn *kv.Txn, ) error { if jobStatus == jobs.StatusSucceeded { e.metrics.NumSucceeded.Inc(1) log.Infof(ctx, "backup job %d scheduled by %d succeeded", jobID, schedule.ScheduleID()) - return e.backupSucceeded(ctx, schedule, details, env, ex, txn) + return e.backupSucceeded(ctx, jobs.ScheduledJobTxn(txn), schedule, details, env) } e.metrics.NumFailed.Inc(1) @@ -181,12 +179,7 @@ func (e *scheduledBackupExecutor) NotifyJobTermination( } func (e *scheduledBackupExecutor) GetCreateScheduleStatement( - ctx context.Context, - env scheduledjobs.JobSchedulerEnv, - txn *kv.Txn, - descsCol *descs.Collection, - sj *jobs.ScheduledJob, - ex sqlutil.InternalExecutor, + ctx context.Context, txn isql.Txn, env scheduledjobs.JobSchedulerEnv, sj *jobs.ScheduledJob, ) (string, error) { backupNode, err := extractBackupStatement(sj) if err != nil { @@ -204,7 +197,8 @@ func (e *scheduledBackupExecutor) GetCreateScheduleStatement( // Check if sj has a dependent full or incremental schedule associated with it. var dependentSchedule *jobs.ScheduledJob if args.DependentScheduleID != 0 { - dependentSchedule, err = jobs.LoadScheduledJob(ctx, env, args.DependentScheduleID, ex, txn) + dependentSchedule, err = jobs.ScheduledJobTxn(txn). + Load(ctx, env, args.DependentScheduleID) if err != nil { return "", err } @@ -331,11 +325,10 @@ func (e *scheduledBackupExecutor) GetCreateScheduleStatement( func (e *scheduledBackupExecutor) backupSucceeded( ctx context.Context, + txn jobs.ScheduledJobStorage, schedule *jobs.ScheduledJob, details jobspb.Details, env scheduledjobs.JobSchedulerEnv, - ex sqlutil.InternalExecutor, - txn *kv.Txn, ) error { args := &backuppb.ScheduledBackupExecutionArgs{} if err := pbtypes.UnmarshalAny(schedule.ExecutionArgs().Args, args); err != nil { @@ -352,7 +345,7 @@ func (e *scheduledBackupExecutor) backupSucceeded( return nil } - s, err := jobs.LoadScheduledJob(ctx, env, args.UnpauseOnSuccess, ex, txn) + s, err := txn.Load(ctx, env, args.UnpauseOnSuccess) if err != nil { if jobs.HasScheduledJobNotFoundError(err) { log.Warningf(ctx, "cannot find schedule %d to unpause; it may have been dropped", @@ -367,7 +360,7 @@ func (e *scheduledBackupExecutor) backupSucceeded( return err } } - if err := s.Update(ctx, ex, txn); err != nil { + if err := txn.Update(ctx, s); err != nil { return err } @@ -427,7 +420,7 @@ func unlinkOrDropDependentSchedule( ctx context.Context, scheduleControllerEnv scheduledjobs.ScheduleControllerEnv, env scheduledjobs.JobSchedulerEnv, - txn *kv.Txn, + txn isql.Txn, args *backuppb.ScheduledBackupExecutionArgs, ) (int, error) { if args.DependentScheduleID == 0 { @@ -435,8 +428,9 @@ func unlinkOrDropDependentSchedule( } // Load the dependent schedule. - dependentSj, dependentArgs, err := getScheduledBackupExecutionArgsFromSchedule(ctx, env, txn, - scheduleControllerEnv.InternalExecutor().(*sql.InternalExecutor), args.DependentScheduleID) + dependentSj, dependentArgs, err := getScheduledBackupExecutionArgsFromSchedule( + ctx, env, jobs.ScheduledJobTxn(txn), args.DependentScheduleID, + ) if err != nil { if jobs.HasScheduledJobNotFoundError(err) { log.Warningf(ctx, "failed to resolve dependent schedule %d", args.DependentScheduleID) @@ -452,12 +446,12 @@ func unlinkOrDropDependentSchedule( return 0, err } dependentSj.SetExecutionDetails(dependentSj.ExecutorType(), jobspb.ExecutionArguments{Args: any}) - if err := dependentSj.Delete(ctx, scheduleControllerEnv.InternalExecutor(), txn); err != nil { + + if err := jobs.ScheduledJobTxn(txn).Delete(ctx, dependentSj); err != nil { return 0, err } - return 1, releaseProtectedTimestamp(ctx, txn, scheduleControllerEnv.PTSProvider(), - dependentArgs.ProtectedTimestampRecord) + return 1, releaseProtectedTimestamp(ctx, scheduleControllerEnv.PTSProvider(), dependentArgs.ProtectedTimestampRecord) } // Clear the DependentID field since we are dropping the record associated @@ -468,8 +462,11 @@ func unlinkOrDropDependentSchedule( if err != nil { return 0, err } - dependentSj.SetExecutionDetails(dependentSj.ExecutorType(), jobspb.ExecutionArguments{Args: any}) - return 0, dependentSj.Update(ctx, scheduleControllerEnv.InternalExecutor(), txn) + dependentSj.SetExecutionDetails( + dependentSj.ExecutorType(), jobspb.ExecutionArguments{Args: any}, + ) + + return 0, jobs.ScheduledJobTxn(txn).Update(ctx, dependentSj) } // OnDrop implements the ScheduledJobController interface. @@ -482,7 +479,7 @@ func (e *scheduledBackupExecutor) OnDrop( scheduleControllerEnv scheduledjobs.ScheduleControllerEnv, env scheduledjobs.JobSchedulerEnv, sj *jobs.ScheduledJob, - txn *kv.Txn, + txn isql.Txn, descsCol *descs.Collection, ) (int, error) { args := &backuppb.ScheduledBackupExecutionArgs{} @@ -496,14 +493,14 @@ func (e *scheduledBackupExecutor) OnDrop( return dependentRowsDropped, errors.Wrap(err, "failed to unlink dependent schedule") } - return dependentRowsDropped, releaseProtectedTimestamp(ctx, txn, scheduleControllerEnv.PTSProvider(), - args.ProtectedTimestampRecord) + pts := scheduleControllerEnv.PTSProvider() + return dependentRowsDropped, releaseProtectedTimestamp(ctx, pts, args.ProtectedTimestampRecord) } // getBackupFnTelemetry collects the telemetry from the dry-run backup // corresponding to backupFnResult. func getBackupFnTelemetry( - ctx context.Context, registry *jobs.Registry, txn *kv.Txn, backupFnResult tree.Datums, + ctx context.Context, registry *jobs.Registry, txn isql.Txn, backupFnResult tree.Datums, ) eventpb.RecoveryEvent { if registry == nil { return eventpb.RecoveryEvent{} diff --git a/pkg/ccl/backupccl/schedule_pts_chaining.go b/pkg/ccl/backupccl/schedule_pts_chaining.go index 61da83fd6420..86853c1aa9ac 100644 --- a/pkg/ccl/backupccl/schedule_pts_chaining.go +++ b/pkg/ccl/backupccl/schedule_pts_chaining.go @@ -16,12 +16,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/jobs/jobsprotectedts" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" roachpb "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -94,13 +94,13 @@ func maybeUpdateSchedulePTSRecord( } } - return exec.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + return exec.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { // We cannot rely on b.job containing created_by_id because on job // resumption the registry does not populate the resumers' CreatedByInfo. - datums, err := exec.InternalExecutor.QueryRowEx( + datums, err := txn.QueryRowEx( ctx, "lookup-schedule-info", - txn, + txn.KV(), sessiondata.NodeUserSessionDataOverride, fmt.Sprintf( "SELECT created_by_id FROM %s WHERE id=$1 AND created_by_type=$2", @@ -115,9 +115,11 @@ func maybeUpdateSchedulePTSRecord( return nil } + schedules := jobs.ScheduledJobTxn(txn) scheduleID := int64(tree.MustBeDInt(datums[0])) - sj, args, err := getScheduledBackupExecutionArgsFromSchedule(ctx, env, txn, - exec.InternalExecutor, scheduleID) + sj, args, err := getScheduledBackupExecutionArgsFromSchedule( + ctx, env, schedules, scheduleID, + ) if err != nil { return errors.Wrap(err, "load scheduled job") } @@ -136,23 +138,23 @@ func maybeUpdateSchedulePTSRecord( return err } sj.SetExecutionDetails(sj.ExecutorType(), jobspb.ExecutionArguments{Args: any}) - return sj.Update(ctx, exec.InternalExecutor, txn) + return jobs.ScheduledJobTxn(txn).Update(ctx, sj) } if backupDetails.SchedulePTSChainingRecord == nil { return errors.AssertionFailedf( "scheduled backup is chaining protected timestamp records but no chaining action was specified") } - + pts := exec.ProtectedTimestampProvider.WithTxn(txn) switch args.BackupType { case backuppb.ScheduledBackupExecutionArgs_INCREMENTAL: if backupDetails.SchedulePTSChainingRecord.Action != jobspb.SchedulePTSChainingRecord_UPDATE { return errors.AssertionFailedf("incremental backup has unexpected chaining action %d on"+ " backup job details", backupDetails.SchedulePTSChainingRecord.Action) } - if err := manageIncrementalBackupPTSChaining(ctx, + if err := manageIncrementalBackupPTSChaining(ctx, pts, backupDetails.SchedulePTSChainingRecord.ProtectedTimestampRecord, - backupDetails.EndTime, exec, txn, scheduleID); err != nil { + backupDetails.EndTime, scheduleID); err != nil { return errors.Wrap(err, "failed to manage chaining of pts record during a inc backup") } case backuppb.ScheduledBackupExecutionArgs_FULL: @@ -160,7 +162,7 @@ func maybeUpdateSchedulePTSRecord( return errors.AssertionFailedf("full backup has unexpected chaining action %d on"+ " backup job details", backupDetails.SchedulePTSChainingRecord.Action) } - if err := manageFullBackupPTSChaining(ctx, env, txn, backupDetails, exec, args); err != nil { + if err := manageFullBackupPTSChaining(ctx, pts, schedules, env, backupDetails, args); err != nil { return errors.Wrap(err, "failed to manage chaining of pts record during a full backup") } } @@ -172,16 +174,17 @@ func maybeUpdateSchedulePTSRecord( // schedule owned protected timestamp record on completion of a full backup. func manageFullBackupPTSChaining( ctx context.Context, + pts protectedts.Storage, + schedules jobs.ScheduledJobStorage, env scheduledjobs.JobSchedulerEnv, - txn *kv.Txn, backupDetails jobspb.BackupDetails, - exec *sql.ExecutorConfig, fullScheduleArgs *backuppb.ScheduledBackupExecutionArgs, ) error { // Let's resolve the dependent incremental schedule as the first step. If the // schedule has been dropped then we can avoid doing unnecessary work. - incSj, incArgs, err := getScheduledBackupExecutionArgsFromSchedule(ctx, env, txn, - exec.InternalExecutor, fullScheduleArgs.DependentScheduleID) + incSj, incArgs, err := getScheduledBackupExecutionArgsFromSchedule( + ctx, env, schedules, fullScheduleArgs.DependentScheduleID, + ) if err != nil { if jobs.HasScheduledJobNotFoundError(err) { log.Warningf(ctx, "could not find dependent schedule with id %d", @@ -193,7 +196,7 @@ func manageFullBackupPTSChaining( // Resolve the target that needs to be protected on this execution of the // scheduled backup. - targetToProtect, deprecatedSpansToProtect, err := getTargetProtectedByBackup(ctx, backupDetails, txn, exec) + targetToProtect, deprecatedSpansToProtect, err := getTargetProtectedByBackup(ctx, pts, backupDetails) if err != nil { return errors.Wrap(err, "getting target to protect") } @@ -215,8 +218,10 @@ func manageFullBackupPTSChaining( // inc schedule ID as the records' Meta. This ensures that even if the full // schedule is dropped, the reconciliation job will not release the pts // record stored on the inc schedule, and the chaining will continue. - ptsRecord, err := protectTimestampRecordForSchedule(ctx, targetToProtect, deprecatedSpansToProtect, - backupDetails.EndTime, incSj.ScheduleID(), exec, txn) + ptsRecord, err := protectTimestampRecordForSchedule( + ctx, pts, targetToProtect, deprecatedSpansToProtect, + backupDetails.EndTime, incSj.ScheduleID(), + ) if err != nil { return errors.Wrap(err, "protect pts record for schedule") } @@ -235,8 +240,10 @@ func manageFullBackupPTSChaining( // about to release. Already running incremental backup jobs would have // written their own pts record during planning, and should complete // successfully. - if err := releaseProtectedTimestamp(ctx, txn, exec.ProtectedTimestampProvider, - backupDetails.SchedulePTSChainingRecord.ProtectedTimestampRecord); err != nil { + if err := releaseProtectedTimestamp( + ctx, pts, + backupDetails.SchedulePTSChainingRecord.ProtectedTimestampRecord, + ); err != nil { return errors.Wrap(err, "release pts record for schedule") } @@ -247,7 +254,7 @@ func manageFullBackupPTSChaining( return err } incSj.SetExecutionDetails(incSj.ExecutorType(), jobspb.ExecutionArguments{Args: any}) - return incSj.Update(ctx, exec.InternalExecutor, txn) + return schedules.Update(ctx, incSj) } // manageFullBackupPTSChaining implements is responsible for managing the @@ -255,17 +262,15 @@ func manageFullBackupPTSChaining( // backup. func manageIncrementalBackupPTSChaining( ctx context.Context, + pts protectedts.Storage, ptsRecordID *uuid.UUID, tsToProtect hlc.Timestamp, - exec *sql.ExecutorConfig, - txn *kv.Txn, scheduleID int64, ) error { if ptsRecordID == nil { return errors.AssertionFailedf("unexpected nil pts record id on incremental schedule %d", scheduleID) } - err := exec.ProtectedTimestampProvider.UpdateTimestamp(ctx, txn, *ptsRecordID, - tsToProtect) + err := pts.UpdateTimestamp(ctx, *ptsRecordID, tsToProtect) // If we cannot find the pts record to update it is possible that a concurrent // full backup has released the record, and written a new record on the // incremental schedule. This should only happen if this is an "overhang" @@ -282,14 +287,13 @@ func manageIncrementalBackupPTSChaining( } func getTargetProtectedByBackup( - ctx context.Context, backupDetails jobspb.BackupDetails, txn *kv.Txn, exec *sql.ExecutorConfig, + ctx context.Context, pts protectedts.Storage, backupDetails jobspb.BackupDetails, ) (target *ptpb.Target, deprecatedSpans []roachpb.Span, err error) { if backupDetails.ProtectedTimestampRecord == nil { return nil, nil, nil } - ptsRecord, err := exec.ProtectedTimestampProvider.GetRecord(ctx, txn, - *backupDetails.ProtectedTimestampRecord) + ptsRecord, err := pts.GetRecord(ctx, *backupDetails.ProtectedTimestampRecord) if err != nil { return nil, nil, err } @@ -299,15 +303,15 @@ func getTargetProtectedByBackup( func protectTimestampRecordForSchedule( ctx context.Context, + pts protectedts.Storage, targetToProtect *ptpb.Target, deprecatedSpansToProtect roachpb.Spans, tsToProtect hlc.Timestamp, scheduleID int64, - exec *sql.ExecutorConfig, - txn *kv.Txn, ) (uuid.UUID, error) { protectedtsID := uuid.MakeV4() - rec := jobsprotectedts.MakeRecord(protectedtsID, scheduleID, tsToProtect, deprecatedSpansToProtect, - jobsprotectedts.Schedules, targetToProtect) - return protectedtsID, exec.ProtectedTimestampProvider.Protect(ctx, txn, rec) + return protectedtsID, pts.Protect(ctx, jobsprotectedts.MakeRecord( + protectedtsID, scheduleID, tsToProtect, deprecatedSpansToProtect, + jobsprotectedts.Schedules, targetToProtect, + )) } diff --git a/pkg/ccl/backupccl/schedule_pts_chaining_test.go b/pkg/ccl/backupccl/schedule_pts_chaining_test.go index 8dd7bbd83517..62b7c58c2116 100644 --- a/pkg/ccl/backupccl/schedule_pts_chaining_test.go +++ b/pkg/ccl/backupccl/schedule_pts_chaining_test.go @@ -19,10 +19,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobsprotectedts" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" - "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -74,9 +73,9 @@ func checkPTSRecord( ) { var ptsRecord *ptpb.Record var err error - require.NoError(t, th.server.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - ptsRecord, err = th.server.ExecutorConfig().(sql.ExecutorConfig).ProtectedTimestampProvider. - GetRecord(context.Background(), txn, id) + require.NoError(t, th.internalDB().Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + ptsRecord, err = th.protectedTimestamps().WithTxn(txn). + GetRecord(context.Background(), id) require.NoError(t, err) return nil })) @@ -104,7 +103,7 @@ INSERT INTO t values (1), (10), (100); backupAsOfTimes := make([]time.Time, 0) th.cfg.TestingKnobs.(*jobs.TestingKnobs).OverrideAsOfClause = func(clause *tree.AsOfClause, _ time.Time) { - backupAsOfTime := th.cfg.DB.Clock().PhysicalTime() + backupAsOfTime := th.cfg.DB.KV().Clock().PhysicalTime() expr, err := tree.MakeDTimestampTZ(backupAsOfTime, time.Microsecond) require.NoError(t, err) clause.Expr = expr @@ -150,9 +149,11 @@ INSERT INTO t values (1), (10), (100); defer cleanupSchedules() defer func() { backupAsOfTimes = backupAsOfTimes[:0] }() + schedules := jobs.ScheduledJobDB(th.internalDB()) fullSchedule := th.loadSchedule(t, fullID) - _, fullArgs, err := getScheduledBackupExecutionArgsFromSchedule(ctx, th.env, nil, - th.server.InternalExecutor().(*sql.InternalExecutor), fullID) + _, fullArgs, err := getScheduledBackupExecutionArgsFromSchedule( + ctx, th.env, schedules, fullID, + ) require.NoError(t, err) // Force full backup to execute (this unpauses incremental). @@ -160,8 +161,9 @@ INSERT INTO t values (1), (10), (100); // Check that there is no PTS record on the full schedule. incSchedule := th.loadSchedule(t, incID) - _, incArgs, err := getScheduledBackupExecutionArgsFromSchedule(ctx, th.env, nil, - th.server.InternalExecutor().(*sql.InternalExecutor), incID) + _, incArgs, err := getScheduledBackupExecutionArgsFromSchedule( + ctx, th.env, schedules, incID, + ) require.NoError(t, err) require.Nil(t, fullArgs.ProtectedTimestampRecord) @@ -182,7 +184,7 @@ INSERT INTO t values (1), (10), (100); // to re-run the full schedule. incSchedule = th.loadSchedule(t, incSchedule.ScheduleID()) incSchedule.Pause() - require.NoError(t, incSchedule.Update(context.Background(), th.cfg.InternalExecutor, nil)) + require.NoError(t, schedules.Update(ctx, incSchedule)) clearSuccessfulJobEntryForSchedule(t, fullSchedule) @@ -193,15 +195,16 @@ INSERT INTO t values (1), (10), (100); // Check that the pts record on the inc schedule has been overwritten with a new // record written by the full backup. incSchedule = th.loadSchedule(t, incSchedule.ScheduleID()) - _, incArgs, err = getScheduledBackupExecutionArgsFromSchedule(ctx, th.env, nil, - th.server.InternalExecutor().(*sql.InternalExecutor), incID) + _, incArgs, err = getScheduledBackupExecutionArgsFromSchedule( + ctx, th.env, schedules, incID, + ) require.NoError(t, err) require.NotEqual(t, *ptsOnIncID, *incArgs.ProtectedTimestampRecord) // Check that the old pts record has been released. - require.NoError(t, th.cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - _, err := th.server.ExecutorConfig().(sql.ExecutorConfig).ProtectedTimestampProvider.GetRecord( - ctx, txn, *ptsOnIncID) + require.NoError(t, th.cfg.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + pts := th.protectedTimestamps().WithTxn(txn) + _, err := pts.GetRecord(ctx, *ptsOnIncID) require.True(t, errors.Is(err, protectedts.ErrNotExists)) return nil })) @@ -227,7 +230,7 @@ INSERT INTO t values (1), (10), (100); backupAsOfTimes := make([]time.Time, 0) th.cfg.TestingKnobs.(*jobs.TestingKnobs).OverrideAsOfClause = func(clause *tree.AsOfClause, _ time.Time) { - backupAsOfTime := th.cfg.DB.Clock().PhysicalTime() + backupAsOfTime := th.cfg.DB.KV().Clock().PhysicalTime() expr, err := tree.MakeDTimestampTZ(backupAsOfTime, time.Microsecond) require.NoError(t, err) clause.Expr = expr @@ -265,9 +268,12 @@ INSERT INTO t values (1), (10), (100); defer cleanupSchedules() defer func() { backupAsOfTimes = backupAsOfTimes[:0] }() + schedules := jobs.ScheduledJobDB(th.internalDB()) + fullSchedule := th.loadSchedule(t, fullID) - _, fullArgs, err := getScheduledBackupExecutionArgsFromSchedule(ctx, th.env, nil, - th.server.InternalExecutor().(*sql.InternalExecutor), fullID) + _, fullArgs, err := getScheduledBackupExecutionArgsFromSchedule( + ctx, th.env, schedules, fullID, + ) require.NoError(t, err) // Force full backup to execute (this unpauses incremental). runSchedule(t, fullSchedule) @@ -277,8 +283,9 @@ INSERT INTO t values (1), (10), (100); // Check that there is a PTS record on the incremental schedule. incSchedule := th.loadSchedule(t, incID) - _, incArgs, err := getScheduledBackupExecutionArgsFromSchedule(ctx, th.env, nil, - th.server.InternalExecutor().(*sql.InternalExecutor), incID) + _, incArgs, err := getScheduledBackupExecutionArgsFromSchedule( + ctx, th.env, schedules, incID, + ) require.NoError(t, err) ptsOnIncID := incArgs.ProtectedTimestampRecord require.NotNil(t, ptsOnIncID) @@ -287,14 +294,13 @@ INSERT INTO t values (1), (10), (100); th.sqlDB.Exec(t, `DROP SCHEDULE $1`, fullID) - _, err = jobs.LoadScheduledJob( - context.Background(), th.env, incID, th.cfg.InternalExecutor, nil) + _, err = schedules.Load(ctx, th.env, incID) require.Error(t, err) // Check that the incremental schedule's PTS is dropped - require.NoError(t, th.cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - _, err := th.server.ExecutorConfig().(sql.ExecutorConfig).ProtectedTimestampProvider.GetRecord( - ctx, txn, *ptsOnIncID) + require.NoError(t, th.cfg.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + pts := th.protectedTimestamps().WithTxn(txn) + _, err := pts.GetRecord(ctx, *ptsOnIncID) require.True(t, errors.Is(err, protectedts.ErrNotExists)) return nil })) @@ -319,7 +325,7 @@ INSERT INTO t values (1), (10), (100); backupAsOfTimes := make([]time.Time, 0) th.cfg.TestingKnobs.(*jobs.TestingKnobs).OverrideAsOfClause = func(clause *tree.AsOfClause, _ time.Time) { - backupAsOfTime := th.cfg.DB.Clock().PhysicalTime() + backupAsOfTime := th.cfg.DB.KV().Clock().PhysicalTime() expr, err := tree.MakeDTimestampTZ(backupAsOfTime, time.Microsecond) require.NoError(t, err) clause.Expr = expr @@ -346,9 +352,11 @@ INSERT INTO t values (1), (10), (100); // Check that the incremental schedule has a protected timestamp record // written on it by the full schedule. + schedules := jobs.ScheduledJobDB(th.internalDB()) incSchedule := th.loadSchedule(t, incID) - _, incArgs, err := getScheduledBackupExecutionArgsFromSchedule(ctx, th.env, nil, - th.server.InternalExecutor().(*sql.InternalExecutor), incID) + _, incArgs, err := getScheduledBackupExecutionArgsFromSchedule( + ctx, th.env, schedules, incID, + ) require.NoError(t, err) ptsOnIncID := incArgs.ProtectedTimestampRecord require.NotNil(t, ptsOnIncID) @@ -364,8 +372,9 @@ INSERT INTO t values (1), (10), (100); require.Zero(t, numRows) // Also ensure that the full schedule doesn't have DependentID set anymore. - _, fullArgs, err := getScheduledBackupExecutionArgsFromSchedule(ctx, th.env, nil, - th.server.InternalExecutor().(*sql.InternalExecutor), fullID) + _, fullArgs, err := getScheduledBackupExecutionArgsFromSchedule( + ctx, th.env, schedules, fullID, + ) require.NoError(t, err) require.Zero(t, fullArgs.DependentScheduleID) } @@ -388,7 +397,7 @@ INSERT INTO t select x, y from generate_series(1, 100) as g(x), generate_series( backupAsOfTimes := make([]time.Time, 0) th.cfg.TestingKnobs.(*jobs.TestingKnobs).OverrideAsOfClause = func(clause *tree.AsOfClause, _ time.Time) { - backupAsOfTime := th.cfg.DB.Clock().PhysicalTime() + backupAsOfTime := th.cfg.DB.KV().Clock().PhysicalTime() expr, err := tree.MakeDTimestampTZ(backupAsOfTime, time.Microsecond) require.NoError(t, err) clause.Expr = expr diff --git a/pkg/ccl/backupccl/show.go b/pkg/ccl/backupccl/show.go index c25ee86891b0..ea7e4d419fe6 100644 --- a/pkg/ccl/backupccl/show.go +++ b/pkg/ccl/backupccl/show.go @@ -317,8 +317,12 @@ func showBackupPlanHook( defer encStore.Close() } var encryption *jobspb.BackupEncryptionOptions - kmsEnv := backupencryption.MakeBackupKMSEnv(p.ExecCfg().Settings, - &p.ExecCfg().ExternalIODirConfig, p.ExecCfg().DB, p.User(), p.ExecCfg().InternalExecutor) + kmsEnv := backupencryption.MakeBackupKMSEnv( + p.ExecCfg().Settings, + &p.ExecCfg().ExternalIODirConfig, + p.ExecCfg().InternalDB, + p.User(), + ) showEncErr := `If you are running SHOW BACKUP exclusively on an incremental backup, you must pass the 'encryption_info_dir' parameter that points to the directory of your full backup` if passphrase, ok := opts[backupencryption.BackupOptEncPassphrase]; ok { diff --git a/pkg/ccl/backupccl/split_and_scatter_processor.go b/pkg/ccl/backupccl/split_and_scatter_processor.go index 5020a304e998..eaef2eb4ff86 100644 --- a/pkg/ccl/backupccl/split_and_scatter_processor.go +++ b/pkg/ccl/backupccl/split_and_scatter_processor.go @@ -227,7 +227,7 @@ func newSplitAndScatterProcessor( return nil, err } - scatterer := makeSplitAndScatterer(db, kr) + scatterer := makeSplitAndScatterer(db.KV(), kr) if spec.ValidateOnly { nodeID, _ := flowCtx.NodeID.OptionalNodeID() scatterer = noopSplitAndScatterer{nodeID} diff --git a/pkg/ccl/backupccl/split_and_scatter_processor_test.go b/pkg/ccl/backupccl/split_and_scatter_processor_test.go index f525000c1c29..dcf10bf73aea 100644 --- a/pkg/ccl/backupccl/split_and_scatter_processor_test.go +++ b/pkg/ccl/backupccl/split_and_scatter_processor_test.go @@ -19,6 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catenumpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/rowflow" @@ -65,7 +66,6 @@ func TestSplitAndScatterProcessor(t *testing.T) { tc := testcluster.StartTestCluster(t, 3 /* nodes */, base.TestClusterArgs{}) defer tc.Stopper().Stop(context.Background()) - kvDB := tc.Server(0).DB() testCases := []struct { name string @@ -232,7 +232,7 @@ func TestSplitAndScatterProcessor(t *testing.T) { flowCtx := execinfra.FlowCtx{ Cfg: &execinfra.ServerConfig{ Settings: st, - DB: kvDB, + DB: tc.Server(0).InternalDB().(descs.DB), Codec: keys.SystemSQLCodec, Stopper: tc.Stopper(), }, diff --git a/pkg/ccl/backupccl/system_schema.go b/pkg/ccl/backupccl/system_schema.go index bec3759e3485..170c8125f875 100644 --- a/pkg/ccl/backupccl/system_schema.go +++ b/pkg/ccl/backupccl/system_schema.go @@ -18,13 +18,14 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descidgen" descpb "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" @@ -78,11 +79,11 @@ type systemBackupConfiguration struct { // migrationFunc performs the necessary migrations on the system table data in // the crdb_temp staging table before it is loaded into the actual system // table. - migrationFunc func(ctx context.Context, execCtx *sql.ExecutorConfig, txn *kv.Txn, tempTableName string, rekeys jobspb.DescRewriteMap) error + migrationFunc func(ctx context.Context, txn isql.Txn, tempTableName string, rekeys jobspb.DescRewriteMap) error // customRestoreFunc is responsible for restoring the data from a table that // holds the restore system table data into the given system table. If none // is provided then `defaultRestoreFunc` is used. - customRestoreFunc func(ctx context.Context, execCtx *sql.ExecutorConfig, txn *kv.Txn, systemTableName, tempTableName string) error + customRestoreFunc func(ctx context.Context, deps customRestoreFuncDeps, txn isql.Txn, systemTableName, tempTableName string) error // The following fields are for testing. @@ -90,6 +91,11 @@ type systemBackupConfiguration struct { expectMissingInSystemTenant bool } +type customRestoreFuncDeps struct { + settings *cluster.Settings + codec keys.SQLCodec +} + // roleIDSequenceRestoreOrder is set to 1 since it must be after system.users // which has the default 0. const roleIDSequenceRestoreOrder = 1 @@ -98,19 +104,15 @@ const roleIDSequenceRestoreOrder = 1 // be overwritten with the system table's // systemBackupConfiguration.customRestoreFunc. func defaultSystemTableRestoreFunc( - ctx context.Context, - execCfg *sql.ExecutorConfig, - txn *kv.Txn, - systemTableName, tempTableName string, + ctx context.Context, _ customRestoreFuncDeps, txn isql.Txn, systemTableName, tempTableName string, ) error { - executor := execCfg.InternalExecutor deleteQuery := fmt.Sprintf("DELETE FROM system.%s WHERE true", systemTableName) opName := systemTableName + "-data-deletion" log.Eventf(ctx, "clearing data from system table %s with query %q", systemTableName, deleteQuery) - _, err := executor.Exec(ctx, opName, txn, deleteQuery) + _, err := txn.Exec(ctx, opName, txn.KV(), deleteQuery) if err != nil { return errors.Wrapf(err, "deleting data from system.%s", systemTableName) } @@ -118,7 +120,7 @@ func defaultSystemTableRestoreFunc( restoreQuery := fmt.Sprintf("INSERT INTO system.%s (SELECT * FROM %s);", systemTableName, tempTableName) opName = systemTableName + "-data-insert" - if _, err := executor.Exec(ctx, opName, txn, restoreQuery); err != nil { + if _, err := txn.Exec(ctx, opName, txn.KV(), restoreQuery); err != nil { return errors.Wrapf(err, "inserting data to system.%s", systemTableName) } @@ -132,15 +134,15 @@ func defaultSystemTableRestoreFunc( // into a non-system tenant. func tenantSettingsTableRestoreFunc( ctx context.Context, - execCfg *sql.ExecutorConfig, - txn *kv.Txn, + deps customRestoreFuncDeps, + txn isql.Txn, systemTableName, tempTableName string, ) error { - if execCfg.Codec.ForSystemTenant() { - return defaultSystemTableRestoreFunc(ctx, execCfg, txn, systemTableName, tempTableName) + if deps.codec.ForSystemTenant() { + return defaultSystemTableRestoreFunc(ctx, deps, txn, systemTableName, tempTableName) } - if count, err := queryTableRowCount(ctx, execCfg.InternalExecutor, txn, tempTableName); err == nil && count > 0 { + if count, err := queryTableRowCount(ctx, txn, tempTableName); err == nil && count > 0 { log.Warningf(ctx, "skipping restore of %d entries in system.tenant_settings table", count) } else if err != nil { log.Warningf(ctx, "skipping restore of entries in system.tenant_settings table (count failed: %s)", err.Error()) @@ -148,11 +150,9 @@ func tenantSettingsTableRestoreFunc( return nil } -func queryTableRowCount( - ctx context.Context, ie *sql.InternalExecutor, txn *kv.Txn, tableName string, -) (int64, error) { +func queryTableRowCount(ctx context.Context, txn isql.Txn, tableName string) (int64, error) { countQuery := fmt.Sprintf("SELECT count(1) FROM %s", tableName) - row, err := ie.QueryRow(ctx, fmt.Sprintf("count-%s", tableName), txn, countQuery) + row, err := txn.QueryRow(ctx, fmt.Sprintf("count-%s", tableName), txn.KV(), countQuery) if err != nil { return 0, errors.Wrapf(err, "counting rows in %q", tableName) } @@ -166,24 +166,23 @@ func queryTableRowCount( func usersRestoreFunc( ctx context.Context, - execCfg *sql.ExecutorConfig, - txn *kv.Txn, + deps customRestoreFuncDeps, + txn isql.Txn, systemTableName, tempTableName string, ) (retErr error) { - if !execCfg.Settings.Version.IsActive(ctx, clusterversion.V22_2RoleOptionsTableHasIDColumn) { + if !deps.settings.Version.IsActive(ctx, clusterversion.V22_2RoleOptionsTableHasIDColumn) { return defaultSystemTableRestoreFunc( - ctx, execCfg, txn, systemTableName, tempTableName, + ctx, deps, txn, systemTableName, tempTableName, ) } - executor := execCfg.InternalExecutor - hasIDColumn, err := tableHasColumnName(ctx, txn, executor, tempTableName, "user_id") + hasIDColumn, err := tableHasColumnName(ctx, txn, tempTableName, "user_id") if err != nil { return err } if hasIDColumn { return defaultSystemTableRestoreFunc( - ctx, execCfg, txn, systemTableName, tempTableName, + ctx, deps, txn, systemTableName, tempTableName, ) } @@ -192,13 +191,13 @@ func usersRestoreFunc( log.Eventf(ctx, "clearing data from system table %s with query %q", systemTableName, deleteQuery) - _, err = executor.Exec(ctx, opName, txn, deleteQuery) + _, err = txn.Exec(ctx, opName, txn.KV(), deleteQuery) if err != nil { return errors.Wrapf(err, "deleting data from system.%s", systemTableName) } - it, err := executor.QueryIteratorEx(ctx, "query-system-users-in-backup", - txn, sessiondata.NodeUserSessionDataOverride, + it, err := txn.QueryIteratorEx(ctx, "query-system-users-in-backup", + txn.KV(), sessiondata.NodeUserSessionDataOverride, fmt.Sprintf(`SELECT * FROM %s`, tempTableName)) if err != nil { return err @@ -226,7 +225,7 @@ func usersRestoreFunc( } else if username == "admin" { id = 2 } else { - id, err = descidgen.GenerateUniqueRoleID(ctx, execCfg.DB, execCfg.Codec) + id, err = descidgen.GenerateUniqueRoleIDInTxn(ctx, txn.KV(), deps.codec) if err != nil { return err } @@ -235,7 +234,7 @@ func usersRestoreFunc( restoreQuery := fmt.Sprintf("INSERT INTO system.%s VALUES ($1, $2, $3, $4)", systemTableName) opName = systemTableName + "-data-insert" - if _, err := executor.Exec(ctx, opName, txn, restoreQuery, username, password, isRole, id); err != nil { + if _, err := txn.Exec(ctx, opName, txn.KV(), restoreQuery, username, password, isRole, id); err != nil { return errors.Wrapf(err, "inserting data to system.%s", systemTableName) } } @@ -244,36 +243,34 @@ func usersRestoreFunc( func roleMembersRestoreFunc( ctx context.Context, - execCfg *sql.ExecutorConfig, - txn *kv.Txn, + deps customRestoreFuncDeps, + txn isql.Txn, systemTableName, tempTableName string, ) error { - if !execCfg.Settings.Version.IsActive(ctx, clusterversion.V23_1RoleMembersTableHasIDColumns) { - return defaultSystemTableRestoreFunc(ctx, execCfg, txn, systemTableName, tempTableName) + if !deps.settings.Version.IsActive(ctx, clusterversion.V23_1RoleMembersTableHasIDColumns) { + return defaultSystemTableRestoreFunc(ctx, deps, txn, systemTableName, tempTableName) } - executor := execCfg.InternalExecutor - // It's enough to just check if role_id exists since member_id was added at // the same time. - hasIDColumns, err := tableHasColumnName(ctx, txn, executor, tempTableName, "role_id") + hasIDColumns, err := tableHasColumnName(ctx, txn, tempTableName, "role_id") if err != nil { return err } if hasIDColumns { - return defaultSystemTableRestoreFunc(ctx, execCfg, txn, systemTableName, tempTableName) + return defaultSystemTableRestoreFunc(ctx, deps, txn, systemTableName, tempTableName) } deleteQuery := fmt.Sprintf("DELETE FROM system.%s WHERE true", systemTableName) log.Eventf(ctx, "clearing data from system table %s with query %q", systemTableName, deleteQuery) - _, err = executor.Exec(ctx, systemTableName+"-data-deletion", txn, deleteQuery) + _, err = txn.Exec(ctx, systemTableName+"-data-deletion", txn.KV(), deleteQuery) if err != nil { return errors.Wrapf(err, "deleting data from system.%s", systemTableName) } - roleMembers, err := executor.QueryBufferedEx(ctx, systemTableName+"-query-all-rows", - txn, sessiondata.NodeUserSessionDataOverride, + roleMembers, err := txn.QueryBufferedEx(ctx, systemTableName+"-query-all-rows", + txn.KV(), sessiondata.NodeUserSessionDataOverride, fmt.Sprintf(`SELECT * FROM %s`, tempTableName), ) if err != nil { @@ -287,8 +284,8 @@ VALUES ($1, $2, $3, (SELECT user_id FROM system.users WHERE username = $1), (SEL role := tree.MustBeDString(roleMember[0]) member := tree.MustBeDString(roleMember[1]) isAdmin := tree.MustBeDBool(roleMember[2]) - if _, err := executor.ExecEx(ctx, systemTableName+"-data-insert", - txn, sessiondata.NodeUserSessionDataOverride, + if _, err := txn.ExecEx(ctx, systemTableName+"-data-insert", + txn.KV(), sessiondata.NodeUserSessionDataOverride, restoreQuery, role, member, isAdmin, ); err != nil { return errors.Wrapf(err, "inserting data to system.%s", systemTableName) @@ -300,18 +297,17 @@ VALUES ($1, $2, $3, (SELECT user_id FROM system.users WHERE username = $1), (SEL func roleOptionsRestoreFunc( ctx context.Context, - execCfg *sql.ExecutorConfig, - txn *kv.Txn, + deps customRestoreFuncDeps, + txn isql.Txn, systemTableName, tempTableName string, ) (retErr error) { - executor := execCfg.InternalExecutor - hasIDColumn, err := tableHasColumnName(ctx, txn, executor, tempTableName, "user_id") + hasIDColumn, err := tableHasColumnName(ctx, txn, tempTableName, "user_id") if err != nil { return err } if hasIDColumn { return defaultSystemTableRestoreFunc( - ctx, execCfg, txn, systemTableName, tempTableName, + ctx, deps, txn, systemTableName, tempTableName, ) } @@ -320,13 +316,13 @@ func roleOptionsRestoreFunc( log.Eventf(ctx, "clearing data from system table %s with query %q", systemTableName, deleteQuery) - _, err = executor.Exec(ctx, opName, txn, deleteQuery) + _, err = txn.Exec(ctx, opName, txn.KV(), deleteQuery) if err != nil { return errors.Wrapf(err, "deleting data from system.%s", systemTableName) } - it, err := executor.QueryIteratorEx(ctx, "query-system-users-in-backup", - txn, sessiondata.NodeUserSessionDataOverride, + it, err := txn.QueryIteratorEx(ctx, "query-system-users-in-backup", + txn.KV(), sessiondata.NodeUserSessionDataOverride, fmt.Sprintf(`SELECT * FROM %s`, tempTableName)) if err != nil { return err @@ -354,7 +350,7 @@ func roleOptionsRestoreFunc( } else if username == "admin" { id = 2 } else { - row, err := executor.QueryRow(ctx, `get-user-id`, txn, `SELECT user_id FROM system.users WHERE username = $1`, username) + row, err := txn.QueryRow(ctx, `get-user-id`, txn.KV(), `SELECT user_id FROM system.users WHERE username = $1`, username) if err != nil { return err } @@ -365,7 +361,7 @@ func roleOptionsRestoreFunc( restoreQuery := fmt.Sprintf("INSERT INTO system.%s VALUES ($1, $2, $3, $4)", systemTableName) opName = systemTableName + "-data-insert" - if _, err := executor.Exec(ctx, opName, txn, restoreQuery, username, option, val, id); err != nil { + if _, err := txn.Exec(ctx, opName, txn.KV(), restoreQuery, username, option, val, id); err != nil { return errors.Wrapf(err, "inserting data to system.%s", systemTableName) } } @@ -373,14 +369,10 @@ func roleOptionsRestoreFunc( } func tableHasColumnName( - ctx context.Context, - txn *kv.Txn, - executor *sql.InternalExecutor, - tableName string, - columnName string, + ctx context.Context, txn isql.Txn, tableName string, columnName string, ) (bool, error) { hasColumnQuery := fmt.Sprintf(`SELECT EXISTS (SELECT 1 FROM [SHOW COLUMNS FROM %s] WHERE column_name = '%s')`, tableName, columnName) - row, err := executor.QueryRow(ctx, "has-column", txn, hasColumnQuery) + row, err := txn.QueryRow(ctx, "has-column", txn.KV(), hasColumnQuery) if err != nil { return false, err } @@ -392,18 +384,17 @@ func tableHasColumnName( // version. func settingsRestoreFunc( ctx context.Context, - execCfg *sql.ExecutorConfig, - txn *kv.Txn, + deps customRestoreFuncDeps, + txn isql.Txn, systemTableName, tempTableName string, ) error { - executor := execCfg.InternalExecutor deleteQuery := fmt.Sprintf("DELETE FROM system.%s WHERE name <> 'version'", systemTableName) opName := systemTableName + "-data-deletion" log.Eventf(ctx, "clearing data from system table %s with query %q", systemTableName, deleteQuery) - _, err := executor.Exec(ctx, opName, txn, deleteQuery) + _, err := txn.Exec(ctx, opName, txn.KV(), deleteQuery) if err != nil { return errors.Wrapf(err, "deleting data from system.%s", systemTableName) } @@ -411,7 +402,7 @@ func settingsRestoreFunc( restoreQuery := fmt.Sprintf("INSERT INTO system.%s (SELECT * FROM %s WHERE name <> 'version');", systemTableName, tempTableName) opName = systemTableName + "-data-insert" - if _, err := executor.Exec(ctx, opName, txn, restoreQuery); err != nil { + if _, err := txn.Exec(ctx, opName, txn.KV(), restoreQuery); err != nil { return errors.Wrapf(err, "inserting data to system.%s", systemTableName) } return nil @@ -419,12 +410,12 @@ func settingsRestoreFunc( func roleIDSeqRestoreFunc( ctx context.Context, - execCfg *sql.ExecutorConfig, - txn *kv.Txn, + deps customRestoreFuncDeps, + txn isql.Txn, systemTableName, tempTableName string, ) error { - datums, err := execCfg.InternalExecutor.QueryRowEx( - ctx, "role-id-seq-custom-restore", txn, + datums, err := txn.QueryRowEx( + ctx, "role-id-seq-custom-restore", txn.KV(), sessiondata.NodeUserSessionDataOverride, `SELECT max(user_id) FROM system.users`, ) @@ -432,7 +423,7 @@ func roleIDSeqRestoreFunc( return err } max := tree.MustBeDOid(datums[0]) - return execCfg.DB.Put(ctx, execCfg.Codec.SequenceKey(keys.RoleIDSequenceID), max.Oid+1) + return txn.KV().Put(ctx, deps.codec.SequenceKey(keys.RoleIDSequenceID), max.Oid+1) } // systemTableBackupConfiguration is a map from every systemTable present in the @@ -492,12 +483,12 @@ var systemTableBackupConfiguration = map[string]systemBackupConfiguration{ // synthesized schedule rows in the real schedule table when we otherwise // clean it out, and skipping TTL rows when we copy from the restored // schedule table. - customRestoreFunc: func(ctx context.Context, execCfg *sql.ExecutorConfig, txn *kv.Txn, _, tempTableName string) error { + customRestoreFunc: func(ctx context.Context, _ customRestoreFuncDeps, txn isql.Txn, _, tempTableName string) error { execType := tree.ScheduledRowLevelTTLExecutor.InternalName() const deleteQuery = "DELETE FROM system.scheduled_jobs WHERE executor_type <> $1" - if _, err := execCfg.InternalExecutor.Exec( - ctx, "restore-scheduled_jobs-delete", txn, deleteQuery, execType, + if _, err := txn.Exec( + ctx, "restore-scheduled_jobs-delete", txn.KV(), deleteQuery, execType, ); err != nil { return errors.Wrapf(err, "deleting existing scheduled_jobs") } @@ -507,8 +498,8 @@ var systemTableBackupConfiguration = map[string]systemBackupConfiguration{ tempTableName, ) - if _, err := execCfg.InternalExecutor.Exec( - ctx, "restore-scheduled_jobs-insert", txn, restoreQuery, execType, + if _, err := txn.Exec( + ctx, "restore-scheduled_jobs-insert", txn.KV(), restoreQuery, execType, ); err != nil { return err } @@ -624,16 +615,14 @@ var systemTableBackupConfiguration = map[string]systemBackupConfiguration{ func rekeySystemTable( colName string, -) func(context.Context, *sql.ExecutorConfig, *kv.Txn, string, jobspb.DescRewriteMap) error { - return func(ctx context.Context, execCtx *sql.ExecutorConfig, txn *kv.Txn, tempTableName string, rekeys jobspb.DescRewriteMap) error { +) func(context.Context, isql.Txn, string, jobspb.DescRewriteMap) error { + return func(ctx context.Context, txn isql.Txn, tempTableName string, rekeys jobspb.DescRewriteMap) error { toRekey := make(descpb.IDs, 0, len(rekeys)) for i := range rekeys { toRekey = append(toRekey, i) } sort.Sort(toRekey) - executor := execCtx.InternalExecutor - // We will update every ID in the table from an old value to a new value // below, but as we do so there could be yet-to-be-updated rows with old-IDs // at the new IDs which would cause a uniqueness violation before we proceed @@ -665,7 +654,9 @@ func rekeySystemTable( fmt.Fprintf(&q, "WHEN %s = %d THEN %d\n", colName, old, rekeys[old].ID+offset) } fmt.Fprintf(&q, "ELSE %s END)::%s", colName, typ) - if _, err := executor.Exec(ctx, fmt.Sprintf("remap-%s", tempTableName), txn, q.String()); err != nil { + if _, err := txn.Exec( + ctx, fmt.Sprintf("remap-%s", tempTableName), txn.KV(), q.String(), + ); err != nil { return errors.Wrapf(err, "remapping IDs %s", tempTableName) } @@ -677,16 +668,16 @@ func rekeySystemTable( // ID system tables that we do not restore directly, and thus have no entry // in our remapping, but the configuration of them (comments, zones, etc) is // expected to be restored. - if _, err := executor.Exec(ctx, fmt.Sprintf("remap-remove-%s", tempTableName), txn, + if _, err := txn.Exec(ctx, fmt.Sprintf("remap-remove-%s", tempTableName), txn.KV(), fmt.Sprintf("DELETE FROM %s WHERE %s >= 50 AND %s < %d", tempTableName, colName, colName, offset), ); err != nil { return errors.Wrapf(err, "remapping IDs %s", tempTableName) } // Now slide remapped the IDs back down by offset, to their intended values. - if _, err := executor.Exec(ctx, + if _, err := txn.Exec(ctx, fmt.Sprintf("remap-%s-deoffset", tempTableName), - txn, + txn.KV(), fmt.Sprintf("UPDATE %s SET %s = (%s::int - %d)::%s WHERE %s::int >= %d", tempTableName, colName, colName, offset, typ, colName, offset), ); err != nil { return errors.Wrapf(err, "remapping %s; removing offset", tempTableName) @@ -717,9 +708,9 @@ func GetSystemTableIDsToExcludeFromClusterBackup( systemTableIDsToExclude := make(map[descpb.ID]struct{}) for systemTableName, backupConfig := range systemTableBackupConfiguration { if backupConfig.shouldIncludeInClusterBackup == optOutOfClusterBackup { - err := sql.DescsTxn(ctx, execCfg, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { + err := sql.DescsTxn(ctx, execCfg, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { tn := tree.MakeTableNameWithSchema("system", tree.PublicSchemaName, tree.Name(systemTableName)) - _, desc, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).MaybeGet(), &tn) + _, desc, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).MaybeGet(), &tn) isNotFoundErr := errors.Is(err, catalog.ErrDescriptorNotFound) if err != nil && !isNotFoundErr { return err diff --git a/pkg/ccl/changefeedccl/BUILD.bazel b/pkg/ccl/changefeedccl/BUILD.bazel index e6be2561f146..0115ab8756ae 100644 --- a/pkg/ccl/changefeedccl/BUILD.bazel +++ b/pkg/ccl/changefeedccl/BUILD.bazel @@ -89,6 +89,7 @@ go_library( "//pkg/sql/execinfrapb", "//pkg/sql/exprutil", "//pkg/sql/importer", + "//pkg/sql/isql", "//pkg/sql/parser", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", @@ -107,7 +108,6 @@ go_library( "//pkg/sql/sem/volatility", "//pkg/sql/sessiondata", "//pkg/sql/sessiondatapb", - "//pkg/sql/sqlutil", "//pkg/sql/syntheticprivilege", "//pkg/sql/types", "//pkg/util", @@ -222,7 +222,7 @@ go_test( "//pkg/kv/kvserver", "//pkg/kv/kvserver/kvserverbase", "//pkg/kv/kvserver/protectedts", - "//pkg/kv/kvserver/protectedts/ptpb", + "//pkg/kv/kvserver/protectedts/ptstorage", "//pkg/roachpb", "//pkg/scheduledjobs", "//pkg/scheduledjobs/schedulebase", @@ -252,6 +252,7 @@ go_test( "//pkg/sql/execinfrapb", "//pkg/sql/flowinfra", "//pkg/sql/importer", + "//pkg/sql/isql", "//pkg/sql/parser", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", diff --git a/pkg/ccl/changefeedccl/alter_changefeed_stmt.go b/pkg/ccl/changefeedccl/alter_changefeed_stmt.go index 2c41155f1cd0..6d3d3981c243 100644 --- a/pkg/ccl/changefeedccl/alter_changefeed_stmt.go +++ b/pkg/ccl/changefeedccl/alter_changefeed_stmt.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobsauth" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql" @@ -27,6 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver" "github.com/cockroachdb/cockroach/pkg/sql/exprutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -82,8 +82,6 @@ func alterChangefeedPlanHook( return nil, nil, nil, false, nil } - lockForUpdate := false - fn := func(ctx context.Context, _ []sql.PlanNode, resultsCh chan<- tree.Datums) error { if err := validateSettings(ctx, p); err != nil { return err @@ -95,7 +93,7 @@ func alterChangefeedPlanHook( } jobID := jobspb.JobID(tree.MustBeDInt(typedExpr)) - job, err := p.ExecCfg().JobRegistry.LoadJobWithTxn(ctx, jobID, p.Txn()) + job, err := p.ExecCfg().JobRegistry.LoadJobWithTxn(ctx, jobID, p.InternalSQLTxn()) if err != nil { err = errors.Wrapf(err, `could not load job with job id %d`, jobID) return err @@ -195,17 +193,19 @@ func alterChangefeedPlanHook( newPayload.Description = jobRecord.Description newPayload.DescriptorIDs = jobRecord.DescriptorIDs - err = p.ExecCfg().JobRegistry.UpdateJobWithTxn(ctx, jobID, p.Txn(), lockForUpdate, func( - txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, + j, err := p.ExecCfg().JobRegistry.LoadJobWithTxn(ctx, jobID, p.InternalSQLTxn()) + if err != nil { + return err + } + if err := j.WithTxn(p.InternalSQLTxn()).Update(ctx, func( + txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, ) error { ju.UpdatePayload(&newPayload) if newProgress != nil { ju.UpdateProgress(newProgress) } return nil - }) - - if err != nil { + }); err != nil { return err } diff --git a/pkg/ccl/changefeedccl/authorization.go b/pkg/ccl/changefeedccl/authorization.go index 59ec236993b9..2c40cfbd483c 100644 --- a/pkg/ccl/changefeedccl/authorization.go +++ b/pkg/ccl/changefeedccl/authorization.go @@ -114,7 +114,7 @@ func authorizeUserToCreateChangefeed( return errors.Newf("failed to parse url %s", sinkURI) } if uri.Scheme == changefeedbase.SinkSchemeExternalConnection { - ec, err := externalconn.LoadExternalConnection(ctx, uri.Host, p.ExecCfg().InternalExecutor, p.Txn()) + ec, err := externalconn.LoadExternalConnection(ctx, uri.Host, p.InternalSQLTxn()) if err != nil { return errors.Wrap(err, "failed to load external connection object") } diff --git a/pkg/ccl/changefeedccl/cdceval/BUILD.bazel b/pkg/ccl/changefeedccl/cdceval/BUILD.bazel index 2dc7f7edc7a1..a518b6a38528 100644 --- a/pkg/ccl/changefeedccl/cdceval/BUILD.bazel +++ b/pkg/ccl/changefeedccl/cdceval/BUILD.bazel @@ -21,7 +21,6 @@ go_library( "//pkg/ccl/changefeedccl/changefeedbase", "//pkg/clusterversion", "//pkg/jobs/jobspb", - "//pkg/kv", "//pkg/roachpb", "//pkg/security/username", "//pkg/sql", @@ -31,6 +30,7 @@ go_library( "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/descs", "//pkg/sql/execinfra", + "//pkg/sql/isql", "//pkg/sql/parser", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", diff --git a/pkg/ccl/changefeedccl/cdceval/plan.go b/pkg/ccl/changefeedccl/cdceval/plan.go index c8d685d84160..0fe820e6bd40 100644 --- a/pkg/ccl/changefeedccl/cdceval/plan.go +++ b/pkg/ccl/changefeedccl/cdceval/plan.go @@ -14,13 +14,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/changefeedbase" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -168,23 +168,22 @@ func withPlanner( sd sessiondatapb.SessionData, fn func(ctx context.Context, execCtx sql.JobExecContext, cleanup func()) error, ) error { - return sql.DescsTxn(ctx, execCfg, - func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - if err := txn.SetFixedTimestamp(ctx, schemaTS); err != nil { - return err - } + return sql.DescsTxn(ctx, execCfg, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + if err := txn.KV().SetFixedTimestamp(ctx, schemaTS); err != nil { + return err + } - // Current implementation relies on row-by-row evaluation; - // so, ensure vectorized engine is off. - sd.VectorizeMode = sessiondatapb.VectorizeOff - planner, cleanup := sql.NewInternalPlanner( - "cdc-expr", txn, - user, - &sql.MemoryMetrics{}, // TODO(yevgeniy): Use appropriate metrics. - execCfg, - sd, - sql.WithDescCollection(col), - ) - return fn(ctx, planner.(sql.JobExecContext), cleanup) - }) + // Current implementation relies on row-by-row evaluation; + // so, ensure vectorized engine is off. + sd.VectorizeMode = sessiondatapb.VectorizeOff + planner, cleanup := sql.NewInternalPlanner( + "cdc-expr", txn.KV(), + user, + &sql.MemoryMetrics{}, // TODO(yevgeniy): Use appropriate metrics. + execCfg, + sd, + sql.WithDescCollection(col), + ) + return fn(ctx, planner.(sql.JobExecContext), cleanup) + }) } diff --git a/pkg/ccl/changefeedccl/cdcevent/rowfetcher_test.go b/pkg/ccl/changefeedccl/cdcevent/rowfetcher_test.go index 292973db9ff9..77e7b92ca083 100644 --- a/pkg/ccl/changefeedccl/cdcevent/rowfetcher_test.go +++ b/pkg/ccl/changefeedccl/cdcevent/rowfetcher_test.go @@ -70,7 +70,7 @@ func TestRowFetcherCache(t *testing.T) { rfCache, err := newRowFetcherCache(ctx, serverCfg.Codec, serverCfg.LeaseManager.(*lease.Manager), serverCfg.CollectionFactory, - serverCfg.DB, + serverCfg.DB.KV(), targets) if err != nil { t.Fatal(err) diff --git a/pkg/ccl/changefeedccl/cdctest/BUILD.bazel b/pkg/ccl/changefeedccl/cdctest/BUILD.bazel index 7555a3595208..e93e5f525ffb 100644 --- a/pkg/ccl/changefeedccl/cdctest/BUILD.bazel +++ b/pkg/ccl/changefeedccl/cdctest/BUILD.bazel @@ -19,12 +19,12 @@ go_library( "//pkg/jobs", "//pkg/jobs/jobspb", "//pkg/keys", - "//pkg/kv", "//pkg/kv/kvclient/rangefeed", "//pkg/roachpb", "//pkg/sql", "//pkg/sql/catalog", "//pkg/sql/catalog/descs", + "//pkg/sql/isql", "//pkg/sql/sem/tree", "//pkg/testutils/sqlutils", "//pkg/util", diff --git a/pkg/ccl/changefeedccl/cdctest/row.go b/pkg/ccl/changefeedccl/cdctest/row.go index 7a0137a3ed54..7598ed5136f5 100644 --- a/pkg/ccl/changefeedccl/cdctest/row.go +++ b/pkg/ccl/changefeedccl/cdctest/row.go @@ -14,12 +14,12 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangefeed" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -102,8 +102,8 @@ func GetHydratedTableDescriptor( execCfg := execCfgI.(sql.ExecutorConfig) require.NoError(t, sql.DescsTxn(context.Background(), &execCfg, - func(ctx context.Context, txn *kv.Txn, col *descs.Collection) (err error) { - _, td, err = descs.PrefixAndTable(ctx, col.ByName(txn).Get(), tree.NewTableNameWithSchema(dbName, scName, tableName)) + func(ctx context.Context, txn isql.Txn, col *descs.Collection) (err error) { + _, td, err = descs.PrefixAndTable(ctx, col.ByName(txn.KV()).Get(), tree.NewTableNameWithSchema(dbName, scName, tableName)) return err })) require.NotNil(t, td) diff --git a/pkg/ccl/changefeedccl/changefeed_dist.go b/pkg/ccl/changefeedccl/changefeed_dist.go index da886357a610..617f9925875e 100644 --- a/pkg/ccl/changefeedccl/changefeed_dist.go +++ b/pkg/ccl/changefeedccl/changefeed_dist.go @@ -25,6 +25,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/physicalplan" @@ -139,17 +140,17 @@ func fetchTableDescriptors( var targetDescs []catalog.TableDescriptor fetchSpans := func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { targetDescs = make([]catalog.TableDescriptor, 0, targets.NumUniqueTables()) - if err := txn.SetFixedTimestamp(ctx, ts); err != nil { + if err := txn.KV().SetFixedTimestamp(ctx, ts); err != nil { return err } // Note that all targets are currently guaranteed to have a Table ID // and lie within the primary index span. Deduplication is important // here as requesting the same span twice will deadlock. return targets.EachTableID(func(id catid.DescID) error { - tableDesc, err := descriptors.ByID(txn).WithoutNonPublic().Get().Table(ctx, id) + tableDesc, err := descriptors.ByID(txn.KV()).WithoutNonPublic().Get().Table(ctx, id) if err != nil { return err } diff --git a/pkg/ccl/changefeedccl/changefeed_processors.go b/pkg/ccl/changefeedccl/changefeed_processors.go index db9202bc7441..404f4e3a34aa 100644 --- a/pkg/ccl/changefeedccl/changefeed_processors.go +++ b/pkg/ccl/changefeedccl/changefeed_processors.go @@ -20,13 +20,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/schemafeed" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" @@ -363,9 +363,9 @@ func (ca *changeAggregator) makeKVFeedCfg( return kvfeed.Config{ Writer: buf, Settings: cfg.Settings, - DB: cfg.DB, + DB: cfg.DB.KV(), Codec: cfg.Codec, - Clock: cfg.DB.Clock(), + Clock: cfg.DB.KV().Clock(), Gossip: cfg.Gossip, Spans: spans, CheckpointSpans: ca.spec.Checkpoint.Spans, @@ -1226,8 +1226,8 @@ func (cf *changeFrontier) checkpointJobProgress( var updateSkipped error if cf.js.job != nil { - if err := cf.js.job.Update(cf.Ctx(), nil, func( - txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, + if err := cf.js.job.NoTxn().Update(cf.Ctx(), func( + txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, ) error { // If we're unable to update the job due to the job state, such as during // pause-requested, simply skip the checkpoint @@ -1292,7 +1292,7 @@ func (cf *changeFrontier) checkpointJobProgress( // the changefeed's targets to the current highwater mark. The record is // cleared during changefeedResumer.OnFailOrCancel func (cf *changeFrontier) manageProtectedTimestamps( - ctx context.Context, txn *kv.Txn, progress *jobspb.ChangefeedProgress, + ctx context.Context, txn isql.Txn, progress *jobspb.ChangefeedProgress, ) error { ptsUpdateInterval := changefeedbase.ProtectTimestampInterval.Get(&cf.flowCtx.Cfg.Settings.SV) if timeutil.Since(cf.lastProtectedTimestampUpdate) < ptsUpdateInterval { @@ -1300,7 +1300,7 @@ func (cf *changeFrontier) manageProtectedTimestamps( } cf.lastProtectedTimestampUpdate = timeutil.Now() - pts := cf.flowCtx.Cfg.ProtectedTimestampProvider + pts := cf.flowCtx.Cfg.ProtectedTimestampProvider.WithTxn(txn) // Create / advance the protected timestamp record to the highwater mark highWater := cf.frontier.Frontier() @@ -1311,12 +1311,12 @@ func (cf *changeFrontier) manageProtectedTimestamps( recordID := progress.ProtectedTimestampRecord if recordID == uuid.Nil { ptr := createProtectedTimestampRecord(ctx, cf.flowCtx.Codec(), cf.spec.JobID, AllTargets(cf.spec.Feed), highWater, progress) - if err := pts.Protect(ctx, txn, ptr); err != nil { + if err := pts.Protect(ctx, ptr); err != nil { return err } } else { log.VEventf(ctx, 2, "updating protected timestamp %v at %v", recordID, highWater) - if err := pts.UpdateTimestamp(ctx, txn, recordID, highWater); err != nil { + if err := pts.UpdateTimestamp(ctx, recordID, highWater); err != nil { return err } } diff --git a/pkg/ccl/changefeedccl/changefeed_stmt.go b/pkg/ccl/changefeedccl/changefeed_stmt.go index f86856dce563..a16a7df2a91e 100644 --- a/pkg/ccl/changefeedccl/changefeed_stmt.go +++ b/pkg/ccl/changefeedccl/changefeed_stmt.go @@ -39,6 +39,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/exprutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice" @@ -279,14 +280,14 @@ func changefeedPlanHook( // must have specified transaction to use, and is responsible for committing // transaction. - txn := p.ExtendedEvalContext().Txn - _, err := p.ExecCfg().JobRegistry.CreateAdoptableJobWithTxn(ctx, *jr, jobID, txn) + _, err := p.ExecCfg().JobRegistry.CreateAdoptableJobWithTxn(ctx, *jr, jobID, p.InternalSQLTxn()) if err != nil { return err } if ptr != nil { - if err := p.ExecCfg().ProtectedTimestampProvider.Protect(ctx, txn, ptr); err != nil { + pts := p.ExecCfg().ProtectedTimestampProvider.WithTxn(p.InternalSQLTxn()) + if err := pts.Protect(ctx, ptr); err != nil { return err } } @@ -301,12 +302,12 @@ func changefeedPlanHook( } } - if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := p.ExecCfg().InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { if err := p.ExecCfg().JobRegistry.CreateStartableJobWithTxn(ctx, &sj, jobID, txn, *jr); err != nil { return err } if ptr != nil { - return p.ExecCfg().ProtectedTimestampProvider.Protect(ctx, txn, ptr) + return p.ExecCfg().ProtectedTimestampProvider.WithTxn(txn).Protect(ctx, ptr) } return nil }); err != nil { @@ -956,7 +957,7 @@ func validateDetailsAndOptions( // TODO(yevgeniy): Add virtual column support. func validateAndNormalizeChangefeedExpression( ctx context.Context, - execCtx sql.JobExecContext, + execCtx sql.PlanHookState, opts changefeedbase.StatementOptions, sc *tree.SelectClause, descriptors map[tree.TablePattern]catalog.Descriptor, @@ -991,7 +992,7 @@ func (b *changefeedResumer) setJobRunningStatus( } status := jobs.RunningStatus(fmt.Sprintf(fmtOrMsg, args...)) - if err := b.job.RunningStatus(ctx, nil, + if err := b.job.NoTxn().RunningStatus(ctx, func(_ context.Context, _ jobspb.Details) (jobs.RunningStatus, error) { return status, nil }, @@ -1040,8 +1041,8 @@ func (b *changefeedResumer) handleChangefeedError( const errorFmt = "job failed (%v) but is being paused because of %s=%s" errorMessage := fmt.Sprintf(errorFmt, changefeedErr, changefeedbase.OptOnError, changefeedbase.OptOnErrorPause) - return b.job.PauseRequested(ctx, nil /* txn */, func(ctx context.Context, - planHookState interface{}, txn *kv.Txn, progress *jobspb.Progress) error { + return b.job.NoTxn().PauseRequestedWithFunc(ctx, func(ctx context.Context, + planHookState interface{}, txn isql.Txn, progress *jobspb.Progress) error { err := b.OnPauseRequest(ctx, jobExec, txn, progress) if err != nil { return err @@ -1136,8 +1137,12 @@ func (b *changefeedResumer) OnFailOrCancel( exec := jobExec.(sql.JobExecContext) execCfg := exec.ExecCfg() progress := b.job.Progress() - b.maybeCleanUpProtectedTimestamp(ctx, execCfg.DB, execCfg.ProtectedTimestampProvider, - progress.GetChangefeed().ProtectedTimestampRecord) + b.maybeCleanUpProtectedTimestamp( + ctx, + execCfg.InternalDB, + execCfg.ProtectedTimestampProvider, + progress.GetChangefeed().ProtectedTimestampRecord, + ) // If this job has failed (not canceled), increment the counter. if jobs.HasErrJobCanceled( @@ -1154,13 +1159,13 @@ func (b *changefeedResumer) OnFailOrCancel( // Try to clean up a protected timestamp created by the changefeed. func (b *changefeedResumer) maybeCleanUpProtectedTimestamp( - ctx context.Context, db *kv.DB, pts protectedts.Storage, ptsID uuid.UUID, + ctx context.Context, db isql.DB, pts protectedts.Manager, ptsID uuid.UUID, ) { if ptsID == uuid.Nil { return } - if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return pts.Release(ctx, txn, ptsID) + if err := db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return pts.WithTxn(txn).Release(ctx, ptsID) }); err != nil && !errors.Is(err, protectedts.ErrNotExists) { // NB: The record should get cleaned up by the reconciliation loop. // No good reason to cause more trouble by returning an error here. @@ -1174,7 +1179,7 @@ var _ jobs.PauseRequester = (*changefeedResumer)(nil) // OnPauseRequest implements jobs.PauseRequester. If this changefeed is being // paused, we may want to clear the protected timestamp record. func (b *changefeedResumer) OnPauseRequest( - ctx context.Context, jobExec interface{}, txn *kv.Txn, progress *jobspb.Progress, + ctx context.Context, jobExec interface{}, txn isql.Txn, progress *jobspb.Progress, ) error { details := b.job.Details().(jobspb.ChangefeedDetails) @@ -1185,7 +1190,8 @@ func (b *changefeedResumer) OnPauseRequest( // Release existing pts record to avoid a single changefeed left on pause // resulting in storage issues if cp.ProtectedTimestampRecord != uuid.Nil { - if err := execCfg.ProtectedTimestampProvider.Release(ctx, txn, cp.ProtectedTimestampRecord); err != nil { + pts := execCfg.ProtectedTimestampProvider.WithTxn(txn) + if err := pts.Release(ctx, cp.ProtectedTimestampRecord); err != nil { log.Warningf(ctx, "failed to release protected timestamp %v: %v", cp.ProtectedTimestampRecord, err) } else { cp.ProtectedTimestampRecord = uuid.Nil @@ -1199,9 +1205,9 @@ func (b *changefeedResumer) OnPauseRequest( if resolved == nil { return nil } - pts := execCfg.ProtectedTimestampProvider + pts := execCfg.ProtectedTimestampProvider.WithTxn(txn) ptr := createProtectedTimestampRecord(ctx, execCfg.Codec, b.job.ID(), AllTargets(details), *resolved, cp) - return pts.Protect(ctx, txn, ptr) + return pts.Protect(ctx, ptr) } return nil @@ -1407,7 +1413,7 @@ func maybeUpgradePreProductionReadyExpression( const useReadLock = false if err := jobExec.ExecCfg().JobRegistry.UpdateJobWithTxn(ctx, jobID, nil, useReadLock, - func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + func(txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { payload := md.Payload payload.Details = jobspb.WrapPayloadDetails(details) ju.UpdatePayload(payload) diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go index 0181a5e7f5c6..833196a8dbe9 100644 --- a/pkg/ccl/changefeedccl/changefeed_test.go +++ b/pkg/ccl/changefeedccl/changefeed_test.go @@ -48,7 +48,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" - "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptstorage" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server" @@ -5259,7 +5259,9 @@ func TestChangefeedProtectedTimestampOnPause(t *testing.T) { ctx := context.Background() serverCfg := s.Server.DistSQLServer().(*distsql.ServerImpl).ServerConfig jr := serverCfg.JobRegistry - pts := serverCfg.ProtectedTimestampProvider + pts := ptstorage.WithDatabase( + serverCfg.ProtectedTimestampProvider, serverCfg.DB, + ) feedJob := foo.(cdctest.EnterpriseTestFeed) require.NoError(t, feedJob.Pause()) @@ -5270,11 +5272,8 @@ func TestChangefeedProtectedTimestampOnPause(t *testing.T) { details := progress.Details.(*jobspb.Progress_Changefeed).Changefeed if shouldPause { require.NotEqual(t, uuid.Nil, details.ProtectedTimestampRecord) - var r *ptpb.Record - require.NoError(t, serverCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { - r, err = pts.GetRecord(ctx, txn, details.ProtectedTimestampRecord) - return err - })) + r, err := pts.GetRecord(ctx, details.ProtectedTimestampRecord) + require.NoError(t, err) require.True(t, r.Timestamp.LessEq(*progress.GetHighWater())) } else { require.Equal(t, uuid.Nil, details.ProtectedTimestampRecord) @@ -5289,15 +5288,11 @@ func TestChangefeedProtectedTimestampOnPause(t *testing.T) { j, err := jr.LoadJob(ctx, feedJob.JobID()) require.NoError(t, err) details := j.Progress().Details.(*jobspb.Progress_Changefeed).Changefeed - - err = serverCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { - r, err := pts.GetRecord(ctx, txn, details.ProtectedTimestampRecord) - if err != nil || r.Timestamp.Less(resolvedTs) { - return fmt.Errorf("expected protected timestamp record %v to have timestamp greater than %v", r, resolvedTs) - } - return nil - }) - return err + r, err := pts.GetRecord(ctx, details.ProtectedTimestampRecord) + if err != nil || r.Timestamp.Less(resolvedTs) { + return fmt.Errorf("expected protected timestamp record %v to have timestamp greater than %v", r, resolvedTs) + } + return nil }) } } diff --git a/pkg/ccl/changefeedccl/scheduled_changefeed.go b/pkg/ccl/changefeedccl/scheduled_changefeed.go index 55895518f170..ba0294bf56e3 100644 --- a/pkg/ccl/changefeedccl/scheduled_changefeed.go +++ b/pkg/ccl/changefeedccl/scheduled_changefeed.go @@ -20,22 +20,20 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/scheduledjobs/schedulebase" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/exprutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -76,12 +74,12 @@ var _ jobs.ScheduledJobExecutor = (*scheduledChangefeedExecutor)(nil) // ExecuteJob implements jobs.ScheduledJobExecutor interface. func (s *scheduledChangefeedExecutor) ExecuteJob( ctx context.Context, + txn isql.Txn, cfg *scheduledjobs.JobExecutionConfig, env scheduledjobs.JobSchedulerEnv, sj *jobs.ScheduledJob, - txn *kv.Txn, ) error { - if err := s.executeChangefeed(ctx, cfg, sj, txn); err != nil { + if err := s.executeChangefeed(ctx, txn, cfg, sj); err != nil { s.metrics.NumFailed.Inc(1) return err } @@ -92,13 +90,12 @@ func (s *scheduledChangefeedExecutor) ExecuteJob( // NotifyJobTermination implements jobs.ScheduledJobExecutor interface. func (s *scheduledChangefeedExecutor) NotifyJobTermination( ctx context.Context, + txn isql.Txn, jobID jobspb.JobID, jobStatus jobs.Status, details jobspb.Details, env scheduledjobs.JobSchedulerEnv, schedule *jobs.ScheduledJob, - ex sqlutil.InternalExecutor, - txn *kv.Txn, ) error { if jobStatus == jobs.StatusSucceeded { s.metrics.NumSucceeded.Inc(1) @@ -122,12 +119,7 @@ func (s *scheduledChangefeedExecutor) Metrics() metric.Struct { // GetCreateScheduleStatement implements jobs.ScheduledJobExecutor interface. func (s *scheduledChangefeedExecutor) GetCreateScheduleStatement( - ctx context.Context, - env scheduledjobs.JobSchedulerEnv, - txn *kv.Txn, - descsCol *descs.Collection, - sj *jobs.ScheduledJob, - ex sqlutil.InternalExecutor, + ctx context.Context, txn isql.Txn, env scheduledjobs.JobSchedulerEnv, sj *jobs.ScheduledJob, ) (string, error) { changefeedNode, err := extractChangefeedStatement(sj) if err != nil { @@ -174,7 +166,7 @@ func (s *scheduledChangefeedExecutor) GetCreateScheduleStatement( // executeChangefeed runs the changefeed. func (s *scheduledChangefeedExecutor) executeChangefeed( - ctx context.Context, cfg *scheduledjobs.JobExecutionConfig, sj *jobs.ScheduledJob, txn *kv.Txn, + ctx context.Context, txn isql.Txn, cfg *scheduledjobs.JobExecutionConfig, sj *jobs.ScheduledJob, ) error { changefeedStmt, err := extractChangefeedStatement(sj) if err != nil { @@ -197,7 +189,7 @@ func (s *scheduledChangefeedExecutor) executeChangefeed( sj.ScheduleID(), tree.AsString(changefeedStmt)) // Invoke changefeed plan hook. - hook, cleanup := cfg.PlanHookMaker("exec-changefeed", txn, sj.Owner()) + hook, cleanup := cfg.PlanHookMaker("exec-changefeed", txn.KV(), sj.Owner()) defer cleanup() changefeedFn, err := planCreateChangefeed(ctx, hook.(sql.PlanHookState), changefeedStmt) if err != nil { @@ -547,7 +539,7 @@ func doCreateChangefeedSchedule( resultsCh chan<- tree.Datums, ) error { - env := sql.JobSchedulerEnv(p.ExecCfg()) + env := sql.JobSchedulerEnv(p.ExecCfg().JobsKnobs()) if knobs, ok := p.ExecCfg().DistSQLSrv.TestingKnobs.JobsTestingKnobs.(*jobs.TestingKnobs); ok { if knobs.JobSchedulerEnv != nil { @@ -625,7 +617,7 @@ func doCreateChangefeedSchedule( es.SetNextRun(*firstRun) } - if err := es.Create(ctx, p.ExecCfg().InternalExecutor, p.Txn()); err != nil { + if err := jobs.ScheduledJobTxn(p.InternalSQLTxn()).Create(ctx, es); err != nil { return err } diff --git a/pkg/ccl/changefeedccl/scheduled_changefeed_test.go b/pkg/ccl/changefeedccl/scheduled_changefeed_test.go index 74eed255caee..7e169f9fe733 100644 --- a/pkg/ccl/changefeedccl/scheduled_changefeed_test.go +++ b/pkg/ccl/changefeedccl/scheduled_changefeed_test.go @@ -140,7 +140,7 @@ func (h *testHelper) createChangefeedSchedule( return nil, err } // Query system.scheduled_job table and load those schedules. - datums, cols, err := h.cfg.InternalExecutor.QueryRowExWithCols( + datums, cols, err := h.cfg.DB.Executor().QueryRowExWithCols( context.Background(), "sched-load", nil, sessiondata.RootUserSessionDataOverride, "SELECT * FROM system.scheduled_jobs WHERE schedule_id = $1", @@ -355,7 +355,7 @@ func TestCreateChangefeedScheduleIfNotExists(t *testing.T) { const selectQuery = "SELECT label FROM [SHOW SCHEDULES FOR CHANGEFEED]" - rows, err := th.cfg.InternalExecutor.QueryBufferedEx( + rows, err := th.cfg.DB.Executor().QueryBufferedEx( context.Background(), "check-sched", nil, sessiondata.RootUserSessionDataOverride, selectQuery) @@ -368,7 +368,7 @@ func TestCreateChangefeedScheduleIfNotExists(t *testing.T) { th.sqlDB.Exec(t, fmt.Sprintf(createQuery, newScheduleLabel)) - rows, err = th.cfg.InternalExecutor.QueryBufferedEx( + rows, err = th.cfg.DB.Executor().QueryBufferedEx( context.Background(), "check-sched2", nil, sessiondata.RootUserSessionDataOverride, selectQuery) diff --git a/pkg/ccl/changefeedccl/schemafeed/schema_feed.go b/pkg/ccl/changefeedccl/schemafeed/schema_feed.go index e30e6f34dfa4..b569c3d82b5f 100644 --- a/pkg/ccl/changefeedccl/schemafeed/schema_feed.go +++ b/pkg/ccl/changefeedccl/schemafeed/schema_feed.go @@ -83,16 +83,14 @@ func New( tolerances changefeedbase.CanHandle, ) SchemaFeed { m := &schemaFeed{ - filter: schemaChangeEventFilters[events], - db: cfg.DB, - clock: cfg.DB.Clock(), - settings: cfg.Settings, - targets: targets, - leaseMgr: cfg.LeaseManager.(*lease.Manager), - collectionFactory: cfg.CollectionFactory, - internalExecutorFactory: cfg.InternalExecutorFactory, - metrics: metrics, - tolerances: tolerances, + filter: schemaChangeEventFilters[events], + db: cfg.DB, + clock: cfg.DB.KV().Clock(), + settings: cfg.Settings, + targets: targets, + leaseMgr: cfg.LeaseManager.(*lease.Manager), + metrics: metrics, + tolerances: tolerances, } m.mu.previousTableVersion = make(map[descpb.ID]catalog.TableDescriptor) m.mu.highWater = initialHighwater @@ -112,7 +110,7 @@ func New( // lowest timestamp where at least one table doesn't meet the invariant. type schemaFeed struct { filter tableEventFilter - db *kv.DB + db descs.DB clock *hlc.Clock settings *cluster.Settings targets changefeedbase.Targets @@ -122,9 +120,7 @@ type schemaFeed struct { // TODO(ajwerner): Should this live underneath the FilterFunc? // Should there be another function to decide whether to update the // lease manager? - leaseMgr *lease.Manager - collectionFactory *descs.CollectionFactory - internalExecutorFactory descs.TxnManager + leaseMgr *lease.Manager mu struct { syncutil.Mutex @@ -264,15 +260,16 @@ func (tf *schemaFeed) primeInitialTableDescs(ctx context.Context) error { tf.mu.Unlock() var initialDescs []catalog.Descriptor initialTableDescsFn := func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn descs.Txn, ) error { + descriptors := txn.Descriptors() initialDescs = initialDescs[:0] - if err := txn.SetFixedTimestamp(ctx, initialTableDescTs); err != nil { + if err := txn.KV().SetFixedTimestamp(ctx, initialTableDescTs); err != nil { return err } // Note that all targets are currently guaranteed to be tables. return tf.targets.EachTableID(func(id descpb.ID) error { - tableDesc, err := descriptors.ByID(txn).WithoutNonPublic().Get().Table(ctx, id) + tableDesc, err := descriptors.ByID(txn.KV()).WithoutNonPublic().Get().Table(ctx, id) if err != nil { return err } @@ -281,7 +278,7 @@ func (tf *schemaFeed) primeInitialTableDescs(ctx context.Context) error { }) } - if err := tf.internalExecutorFactory.DescsTxn(ctx, tf.db, initialTableDescsFn); err != nil { + if err := tf.db.DescsTxn(ctx, initialTableDescsFn); err != nil { return err } @@ -632,7 +629,7 @@ func (tf *schemaFeed) fetchDescriptorVersions( codec := tf.leaseMgr.Codec() start := timeutil.Now() res, err := fetchDescriptorsWithPriorityOverride( - ctx, tf.settings, tf.db.NonTransactionalSender(), codec, startTS, endTS) + ctx, tf.settings, tf.db.KV().NonTransactionalSender(), codec, startTS, endTS) if log.ExpensiveLogEnabled(ctx, 2) { log.Infof(ctx, `fetched table descs (%s,%s] took %s err=%s`, startTS, endTS, timeutil.Since(start), err) } diff --git a/pkg/ccl/changefeedccl/sink.go b/pkg/ccl/changefeedccl/sink.go index 3f602e30f7f1..ad64b7e85941 100644 --- a/pkg/ccl/changefeedccl/sink.go +++ b/pkg/ccl/changefeedccl/sink.go @@ -232,8 +232,10 @@ func getSink( }) case u.Scheme == changefeedbase.SinkSchemeExternalConnection: return validateOptionsAndMakeSink(changefeedbase.ExternalConnectionValidOptions, func() (Sink, error) { - return makeExternalConnectionSink(ctx, sinkURL{URL: u}, user, serverCfg.DB, - serverCfg.Executor, serverCfg, feedCfg, timestampOracle, jobID, m) + return makeExternalConnectionSink( + ctx, sinkURL{URL: u}, user, serverCfg.DB, + serverCfg, feedCfg, timestampOracle, jobID, m, + ) }) case u.Scheme == "": return nil, errors.Errorf(`no scheme found for sink URL %q`, feedCfg.SinkURI) diff --git a/pkg/ccl/changefeedccl/sink_cloudstorage_test.go b/pkg/ccl/changefeedccl/sink_cloudstorage_test.go index 4fe5f1d88c97..6e234dc1fe82 100644 --- a/pkg/ccl/changefeedccl/sink_cloudstorage_test.go +++ b/pkg/ccl/changefeedccl/sink_cloudstorage_test.go @@ -175,9 +175,7 @@ func TestCloudStorageSink(t *testing.T) { return cloud.ExternalStorageFromURI(ctx, uri, base.ExternalIODirConfig{}, settings, clientFactory, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ nil, /* limiters */ cloud.NilMetrics, opts...) diff --git a/pkg/ccl/changefeedccl/sink_external_connection.go b/pkg/ccl/changefeedccl/sink_external_connection.go index 57f5df0cafb5..8dcc75fd47fb 100644 --- a/pkg/ccl/changefeedccl/sink_external_connection.go +++ b/pkg/ccl/changefeedccl/sink_external_connection.go @@ -16,11 +16,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/cloud/externalconn" "github.com/cockroachdb/cockroach/pkg/cloud/externalconn/connectionpb" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/errors" ) @@ -28,8 +27,7 @@ func makeExternalConnectionSink( ctx context.Context, u sinkURL, user username.SQLUsername, - db *kv.DB, - ie sqlutil.InternalExecutor, + db isql.DB, serverCfg *execinfra.ServerConfig, // TODO(cdc): Replace jobspb.ChangefeedDetails with ChangefeedConfig. feedCfg jobspb.ChangefeedDetails, @@ -46,9 +44,9 @@ func makeExternalConnectionSink( // Retrieve the external connection object from the system table. var ec externalconn.ExternalConnection - if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { var err error - ec, err = externalconn.LoadExternalConnection(ctx, externalConnectionName, ie, txn) + ec, err = externalconn.LoadExternalConnection(ctx, externalConnectionName, txn) return err }); err != nil { return nil, errors.Wrap(err, "failed to load external connection object") diff --git a/pkg/ccl/changefeedccl/testfeed_test.go b/pkg/ccl/changefeedccl/testfeed_test.go index 09977a9966be..a27f066faac4 100644 --- a/pkg/ccl/changefeedccl/testfeed_test.go +++ b/pkg/ccl/changefeedccl/testfeed_test.go @@ -37,9 +37,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/kvevent" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/distsql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -350,7 +350,7 @@ func (r *reportErrorResumer) OnFailOrCancel( // OnPauseRequest implements PauseRequester interface. func (r *reportErrorResumer) OnPauseRequest( - ctx context.Context, execCtx interface{}, txn *kv.Txn, details *jobspb.Progress, + ctx context.Context, execCtx interface{}, txn isql.Txn, details *jobspb.Progress, ) error { return r.wrapped.(*changefeedResumer).OnPauseRequest(ctx, execCtx, txn, details) } diff --git a/pkg/ccl/jobsccl/jobsprotectedtsccl/BUILD.bazel b/pkg/ccl/jobsccl/jobsprotectedtsccl/BUILD.bazel index 165b9754f22f..39ded068bbfd 100644 --- a/pkg/ccl/jobsccl/jobsprotectedtsccl/BUILD.bazel +++ b/pkg/ccl/jobsccl/jobsprotectedtsccl/BUILD.bazel @@ -16,7 +16,6 @@ go_test( "//pkg/jobs/jobspb", "//pkg/jobs/jobsprotectedts", "//pkg/keys", - "//pkg/kv", "//pkg/kv/kvserver/protectedts", "//pkg/kv/kvserver/protectedts/ptpb", "//pkg/roachpb", @@ -28,6 +27,7 @@ go_test( "//pkg/settings/cluster", "//pkg/sql", "//pkg/sql/catalog/descpb", + "//pkg/sql/isql", "//pkg/testutils", "//pkg/testutils/serverutils", "//pkg/testutils/sqlutils", diff --git a/pkg/ccl/jobsccl/jobsprotectedtsccl/jobs_protected_ts_test.go b/pkg/ccl/jobsccl/jobsprotectedtsccl/jobs_protected_ts_test.go index 8a50476137a0..6de7c4c66bd5 100644 --- a/pkg/ccl/jobsccl/jobsprotectedtsccl/jobs_protected_ts_test.go +++ b/pkg/ccl/jobsccl/jobsprotectedtsccl/jobs_protected_ts_test.go @@ -19,7 +19,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/jobs/jobsprotectedts" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -28,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" @@ -92,10 +92,11 @@ func testJobsProtectedTimestamp( DescriptorIDs: []descpb.ID{42}, } } + insqlDB := execCfg.InternalDB mkJobAndRecord := func() (j *jobs.Job, rec *ptpb.Record) { ts := clock.Now() jobID := jr.MakeJobID() - require.NoError(t, execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + require.NoError(t, insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { if j, err = jr.CreateJobWithTxn(ctx, mkJobRec(), jobID, txn); err != nil { return err } @@ -103,21 +104,21 @@ func testJobsProtectedTimestamp( targetToProtect := ptpb.MakeClusterTarget() rec = jobsprotectedts.MakeRecord(uuid.MakeV4(), int64(jobID), ts, deprecatedSpansToProtect, jobsprotectedts.Jobs, targetToProtect) - return ptp.Protect(ctx, txn, rec) + return ptp.WithTxn(txn).Protect(ctx, rec) })) return j, rec } jMovedToFailed, recMovedToFailed := mkJobAndRecord() - require.NoError(t, execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { return jr.Failed(ctx, txn, jMovedToFailed.ID(), io.ErrUnexpectedEOF) })) jFinished, recFinished := mkJobAndRecord() - require.NoError(t, execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { return jr.Succeeded(ctx, txn, jFinished.ID()) })) _, recRemains := mkJobAndRecord() - ensureNotExists := func(ctx context.Context, txn *kv.Txn, ptsID uuid.UUID) (err error) { - _, err = ptp.GetRecord(ctx, txn, ptsID) + ensureNotExists := func(ctx context.Context, txn isql.Txn, ptsID uuid.UUID) (err error) { + _, err = ptp.WithTxn(txn).GetRecord(ctx, ptsID) if err == nil { return errors.New("found pts record, waiting for ErrNotExists") } @@ -127,14 +128,14 @@ func testJobsProtectedTimestamp( return errors.Wrap(err, "waiting for ErrNotExists") } testutils.SucceedsSoon(t, func() (err error) { - return execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + return insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { if err := ensureNotExists(ctx, txn, recMovedToFailed.ID.GetUUID()); err != nil { return err } if err := ensureNotExists(ctx, txn, recFinished.ID.GetUUID()); err != nil { return err } - _, err := ptp.GetRecord(ctx, txn, recRemains.ID.GetUUID()) + _, err := ptp.WithTxn(txn).GetRecord(ctx, recRemains.ID.GetUUID()) require.NoError(t, err) return err }) @@ -212,6 +213,7 @@ func testSchedulesProtectedTimestamp( ) { t.Helper() + insqlDB := execCfg.InternalDB mkScheduledJobRec := func(scheduleLabel string) *jobs.ScheduledJob { j := jobs.NewScheduledJob(scheduledjobs.ProdJobSchedulerEnv) j.SetScheduleLabel(scheduleLabel) @@ -225,24 +227,25 @@ func testSchedulesProtectedTimestamp( ts := clock.Now() var rec *ptpb.Record var sj *jobs.ScheduledJob - require.NoError(t, execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + require.NoError(t, insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { + schedules := jobs.ScheduledJobTxn(txn) sj = mkScheduledJobRec(scheduleLabel) - require.NoError(t, sj.Create(ctx, execCfg.InternalExecutor, txn)) + require.NoError(t, schedules.Create(ctx, sj)) deprecatedSpansToProtect := roachpb.Spans{{Key: keys.MinKey, EndKey: keys.MaxKey}} targetToProtect := ptpb.MakeClusterTarget() rec = jobsprotectedts.MakeRecord(uuid.MakeV4(), sj.ScheduleID(), ts, deprecatedSpansToProtect, jobsprotectedts.Schedules, targetToProtect) - return ptp.Protect(ctx, txn, rec) + return ptp.WithTxn(txn).Protect(ctx, rec) })) return sj, rec } sjDropped, recScheduleDropped := mkScheduleAndRecord("drop") - _, err := execCfg.InternalExecutor.Exec(ctx, "drop-schedule", nil, + _, err := insqlDB.Executor().Exec(ctx, "drop-schedule", nil, `DROP SCHEDULE $1`, sjDropped.ScheduleID()) require.NoError(t, err) _, recSchedule := mkScheduleAndRecord("do-not-drop") - ensureNotExists := func(ctx context.Context, txn *kv.Txn, ptsID uuid.UUID) (err error) { - _, err = ptp.GetRecord(ctx, txn, ptsID) + ensureNotExists := func(ctx context.Context, txn isql.Txn, ptsID uuid.UUID) (err error) { + _, err = ptp.WithTxn(txn).GetRecord(ctx, ptsID) if err == nil { return errors.New("found pts record, waiting for ErrNotExists") } @@ -252,11 +255,11 @@ func testSchedulesProtectedTimestamp( return errors.Wrap(err, "waiting for ErrNotExists") } testutils.SucceedsSoon(t, func() (err error) { - return execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + return insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { if err := ensureNotExists(ctx, txn, recScheduleDropped.ID.GetUUID()); err != nil { return err } - _, err := ptp.GetRecord(ctx, txn, recSchedule.ID.GetUUID()) + _, err := ptp.WithTxn(txn).GetRecord(ctx, recSchedule.ID.GetUUID()) require.NoError(t, err) return err }) diff --git a/pkg/ccl/multiregionccl/BUILD.bazel b/pkg/ccl/multiregionccl/BUILD.bazel index e6cbd56a474d..bfb12a1bba53 100644 --- a/pkg/ccl/multiregionccl/BUILD.bazel +++ b/pkg/ccl/multiregionccl/BUILD.bazel @@ -69,6 +69,7 @@ go_test( "//pkg/sql/catalog/desctestutils", "//pkg/sql/enum", "//pkg/sql/execinfra", + "//pkg/sql/isql", "//pkg/sql/parser", "//pkg/sql/rowenc", "//pkg/sql/sem/tree", diff --git a/pkg/ccl/multiregionccl/datadriven_test.go b/pkg/ccl/multiregionccl/datadriven_test.go index 2b76854eda31..e85d4690fdfa 100644 --- a/pkg/ccl/multiregionccl/datadriven_test.go +++ b/pkg/ccl/multiregionccl/datadriven_test.go @@ -19,7 +19,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvbase" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" @@ -27,6 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -796,8 +796,8 @@ func lookupTable(ec *sql.ExecutorConfig, database, table string) (catalog.TableD err = sql.DescsTxn( context.Background(), ec, - func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - _, desc, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).MaybeGet(), tbName) + func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + _, desc, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).MaybeGet(), tbName) if err != nil { return err } diff --git a/pkg/ccl/multiregionccl/region_util_test.go b/pkg/ccl/multiregionccl/region_util_test.go index 2b767c39c002..ab667c891822 100644 --- a/pkg/ccl/multiregionccl/region_util_test.go +++ b/pkg/ccl/multiregionccl/region_util_test.go @@ -14,12 +14,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/ccl/multiregionccl/multiregionccltestutils" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -42,12 +42,12 @@ func TestGetLocalityRegionEnumPhysicalRepresentation(t *testing.T) { tDB.Exec(t, `CREATE DATABASE foo PRIMARY REGION "us-east1" REGIONS "us-east1", "us-east2", "us-east3"`) s0 := tc.ServerTyped(0) - ief := s0.InternalExecutorFactory().(descs.TxnManager) + idb := s0.InternalDB().(descs.DB) dbID := descpb.ID(sqlutils.QueryDatabaseID(t, sqlDB, "foo")) t.Run("with locality that exists", func(t *testing.T) { regionEnum, err := sql.GetLocalityRegionEnumPhysicalRepresentation( - ctx, ief, s0.DB(), dbID, roachpb.Locality{ + ctx, idb, dbID, roachpb.Locality{ Tiers: []roachpb.Tier{{Key: "region", Value: "us-east2"}}, }, ) @@ -60,7 +60,7 @@ func TestGetLocalityRegionEnumPhysicalRepresentation(t *testing.T) { t.Run("with non-existent locality", func(t *testing.T) { regionEnum, err := sql.GetLocalityRegionEnumPhysicalRepresentation( - ctx, ief, s0.DB(), dbID, roachpb.Locality{ + ctx, idb, dbID, roachpb.Locality{ Tiers: []roachpb.Tier{{Key: "region", Value: "europe-west1"}}, }, ) @@ -74,7 +74,7 @@ func TestGetLocalityRegionEnumPhysicalRepresentation(t *testing.T) { t.Run("without locality", func(t *testing.T) { regionEnum, err := sql.GetLocalityRegionEnumPhysicalRepresentation( - ctx, ief, s0.DB(), dbID, roachpb.Locality{}) + ctx, idb, dbID, roachpb.Locality{}) require.NoError(t, err) // Fallback to primary region is locality information is missing. @@ -97,8 +97,10 @@ func TestGetRegionEnumRepresentations(t *testing.T) { tDB.Exec(t, `CREATE DATABASE foo PRIMARY REGION "us-east1" REGIONS "us-east1", "us-east2", "us-east3"`) dbID := descpb.ID(sqlutils.QueryDatabaseID(t, sqlDB, "foo")) - err := sql.TestingDescsTxn(ctx, tc.Server(0), func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - enumReps, primaryRegion, err := sql.GetRegionEnumRepresentations(ctx, txn, dbID, col) + err := sql.TestingDescsTxn(ctx, tc.Server(0), func( + ctx context.Context, txn isql.Txn, col *descs.Collection, + ) error { + enumReps, primaryRegion, err := sql.GetRegionEnumRepresentations(ctx, txn.KV(), dbID, col) require.NoError(t, err) require.Equal(t, catpb.RegionName("us-east1"), primaryRegion) @@ -122,12 +124,12 @@ func getEnumMembers( ) map[string][]byte { t.Helper() enumMembers := make(map[string][]byte) - err := sql.TestingDescsTxn(ctx, ts, func(ctx context.Context, txn *kv.Txn, descsCol *descs.Collection) error { - dbDesc, err := descsCol.ByIDWithLeased(txn).WithoutNonPublic().Get().Database(ctx, dbID) + err := sql.TestingDescsTxn(ctx, ts, func(ctx context.Context, txn isql.Txn, descsCol *descs.Collection) error { + dbDesc, err := descsCol.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Database(ctx, dbID) require.NoError(t, err) regionEnumID, err := dbDesc.MultiRegionEnumID() require.NoError(t, err) - regionEnumDesc, err := descsCol.ByIDWithLeased(txn).WithoutNonPublic().Get().Type(ctx, regionEnumID) + regionEnumDesc, err := descsCol.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Type(ctx, regionEnumID) require.NoError(t, err) for ord := 0; ord < regionEnumDesc.NumEnumMembers(); ord++ { enumMembers[regionEnumDesc.GetMemberLogicalRepresentation(ord)] = regionEnumDesc.GetMemberPhysicalRepresentation(ord) diff --git a/pkg/ccl/multitenantccl/tenantcostserver/BUILD.bazel b/pkg/ccl/multitenantccl/tenantcostserver/BUILD.bazel index 8d99ff9b6f8c..32655f8f15fa 100644 --- a/pkg/ccl/multitenantccl/tenantcostserver/BUILD.bazel +++ b/pkg/ccl/multitenantccl/tenantcostserver/BUILD.bazel @@ -21,12 +21,11 @@ go_library( "//pkg/server", "//pkg/settings", "//pkg/settings/cluster", - "//pkg/sql", + "//pkg/sql/isql", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/util/buildutil", "//pkg/util/log", "//pkg/util/metric", @@ -55,8 +54,7 @@ go_test( "//pkg/security/securityassets", "//pkg/security/securitytest", "//pkg/server", - "//pkg/sql", - "//pkg/sql/sqlutil", + "//pkg/sql/isql", "//pkg/testutils", "//pkg/testutils/datapathutils", "//pkg/testutils/metrictestutils", diff --git a/pkg/ccl/multitenantccl/tenantcostserver/configure.go b/pkg/ccl/multitenantccl/tenantcostserver/configure.go index 1117df71210d..767733509b92 100644 --- a/pkg/ccl/multitenantccl/tenantcostserver/configure.go +++ b/pkg/ccl/multitenantccl/tenantcostserver/configure.go @@ -12,13 +12,12 @@ import ( "context" "time" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/errors" ) @@ -26,8 +25,7 @@ import ( // of the TenantUsageServer interface; see that for more details. func (s *instance) ReconfigureTokenBucket( ctx context.Context, - txn *kv.Txn, - ie sqlutil.InternalExecutor, + txn isql.Txn, tenantID roachpb.TenantID, availableRU float64, refillRate float64, @@ -35,11 +33,11 @@ func (s *instance) ReconfigureTokenBucket( asOf time.Time, asOfConsumedRequestUnits float64, ) error { - if err := s.checkTenantID(ctx, txn, ie, tenantID); err != nil { + if err := s.checkTenantID(ctx, txn, tenantID); err != nil { return err } h := makeSysTableHelper(ctx, tenantID) - state, err := h.readTenantState(txn, ie) + state, err := h.readTenantState(txn) if err != nil { return err } @@ -49,7 +47,7 @@ func (s *instance) ReconfigureTokenBucket( ctx, tenantID, availableRU, refillRate, maxBurstRU, asOf, asOfConsumedRequestUnits, now, state.Consumption.RU, ) - if err := h.updateTenantState(state, ie, txn); err != nil { + if err := h.updateTenantState(txn, state); err != nil { return err } return nil @@ -57,10 +55,10 @@ func (s *instance) ReconfigureTokenBucket( // checkTenantID verifies that the tenant exists and is active. func (s *instance) checkTenantID( - ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, tenantID roachpb.TenantID, + ctx context.Context, txn isql.Txn, tenantID roachpb.TenantID, ) error { - row, err := ie.QueryRowEx( - ctx, "check-tenant", txn, sessiondata.NodeUserSessionDataOverride, + row, err := txn.QueryRowEx( + ctx, "check-tenant", txn.KV(), sessiondata.NodeUserSessionDataOverride, `SELECT active FROM system.tenants WHERE id = $1`, tenantID.ToUint64(), ) if err != nil { diff --git a/pkg/ccl/multitenantccl/tenantcostserver/server.go b/pkg/ccl/multitenantccl/tenantcostserver/server.go index ffbd569923de..3a4911e90013 100644 --- a/pkg/ccl/multitenantccl/tenantcostserver/server.go +++ b/pkg/ccl/multitenantccl/tenantcostserver/server.go @@ -16,14 +16,14 @@ import ( "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/cockroach/pkg/util/timeutil" ) type instance struct { db *kv.DB - ief sqlutil.InternalExecutorFactory + ief isql.DB metrics Metrics timeSource timeutil.TimeSource settings *cluster.Settings @@ -41,10 +41,7 @@ var instanceInactivity = settings.RegisterDurationSetting( ) func newInstance( - settings *cluster.Settings, - db *kv.DB, - ief sqlutil.InternalExecutorFactory, - timeSource timeutil.TimeSource, + settings *cluster.Settings, db *kv.DB, ief isql.DB, timeSource timeutil.TimeSource, ) *instance { res := &instance{ db: db, @@ -67,7 +64,7 @@ func init() { server.NewTenantUsageServer = func( settings *cluster.Settings, db *kv.DB, - ief sqlutil.InternalExecutorFactory, + ief isql.DB, ) multitenant.TenantUsageServer { return newInstance(settings, db, ief, timeutil.DefaultTimeSource{}) } diff --git a/pkg/ccl/multitenantccl/tenantcostserver/server_test.go b/pkg/ccl/multitenantccl/tenantcostserver/server_test.go index 03c8b5f0fd8b..241cd4b29f98 100644 --- a/pkg/ccl/multitenantccl/tenantcostserver/server_test.go +++ b/pkg/ccl/multitenantccl/tenantcostserver/server_test.go @@ -23,8 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/multitenant" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/sql" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/datapathutils" "github.com/cockroachdb/cockroach/pkg/testutils/metrictestutils" @@ -85,7 +84,7 @@ func (ts *testState) start(t *testing.T) { ts.tenantUsage = tenantcostserver.NewInstance( ts.s.ClusterSettings(), ts.kvDB, - ts.s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory), + ts.s.InternalDB().(isql.DB), ts.clock, ) ts.metricsReg = metric.NewRegistry() @@ -244,14 +243,13 @@ func (ts *testState) configure(t *testing.T, d *datadriven.TestData) string { if err := yaml.UnmarshalStrict([]byte(d.Input), &args); err != nil { d.Fatalf(t, "failed to parse request yaml: %v", err) } - ief := ts.s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory) - if err := ief.TxnWithExecutor(context.Background(), ts.kvDB, nil /* sessionData */, func( - ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, + db := ts.s.InternalDB().(isql.DB) + if err := db.Txn(context.Background(), func( + ctx context.Context, txn isql.Txn, ) error { return ts.tenantUsage.ReconfigureTokenBucket( ctx, txn, - ie, roachpb.MustMakeTenantID(tenantID), args.AvailableRU, args.RefillRate, @@ -267,16 +265,19 @@ func (ts *testState) configure(t *testing.T, d *datadriven.TestData) string { // inspect shows all the metadata for a tenant (specified in a tenant=X // argument), in a user-friendly format. -func (ts *testState) inspect(t *testing.T, d *datadriven.TestData) string { +func (ts *testState) inspect(t *testing.T, d *datadriven.TestData) (res string) { tenantID := ts.tenantID(t, d) - res, err := tenantcostserver.InspectTenantMetadata( - context.Background(), - ts.s.InternalExecutor().(*sql.InternalExecutor), - nil, /* txn */ - roachpb.MustMakeTenantID(tenantID), - timeFormat, - ) - if err != nil { + if err := ts.s.InternalDB().(isql.DB).Txn(context.Background(), func( + ctx context.Context, txn isql.Txn, + ) (err error) { + res, err = tenantcostserver.InspectTenantMetadata( + context.Background(), + txn, + roachpb.MustMakeTenantID(tenantID), + timeFormat, + ) + return err + }); err != nil { d.Fatalf(t, "error inspecting tenant state: %v", err) } return res diff --git a/pkg/ccl/multitenantccl/tenantcostserver/system_table.go b/pkg/ccl/multitenantccl/tenantcostserver/system_table.go index c26562599a35..729a4ba84127 100644 --- a/pkg/ccl/multitenantccl/tenantcostserver/system_table.go +++ b/pkg/ccl/multitenantccl/tenantcostserver/system_table.go @@ -18,12 +18,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/ccl/multitenantccl/tenantcostserver/tenanttokenbucket" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/buildutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" @@ -126,12 +124,10 @@ func makeSysTableHelper(ctx context.Context, tenantID roachpb.TenantID) sysTable // // If the table was not initialized for the tenant, the tenant stats will not be // Present. -func (h *sysTableHelper) readTenantState( - txn *kv.Txn, ie sqlutil.InternalExecutor, -) (tenant tenantState, _ error) { +func (h *sysTableHelper) readTenantState(txn isql.Txn) (tenant tenantState, _ error) { // We could use a simplified query, but the benefit will be marginal and // this is not used in the hot path. - tenant, _, err := h.readTenantAndInstanceState(txn, ie, 0 /* instanceID */) + tenant, _, err := h.readTenantAndInstanceState(txn, 0 /* instanceID */) return tenant, err } @@ -144,13 +140,13 @@ func (h *sysTableHelper) readTenantState( // If the instance is not in the current active set (according to the table), // the instance state will not be Present. func (h *sysTableHelper) readTenantAndInstanceState( - txn *kv.Txn, ie sqlutil.InternalExecutor, instanceID base.SQLInstanceID, + txn isql.Txn, instanceID base.SQLInstanceID, ) (tenant tenantState, instance instanceState, _ error) { instance.ID = instanceID // Read the two rows for the per-tenant state (instance_id = 0) and the // per-instance state. - rows, err := ie.QueryBufferedEx( - h.ctx, "tenant-usage-select", txn, + rows, err := txn.QueryBufferedEx( + h.ctx, "tenant-usage-select", txn.KV(), sessiondata.NodeUserSessionDataOverride, `SELECT instance_id, /* 0 */ @@ -209,17 +205,15 @@ func (h *sysTableHelper) readTenantAndInstanceState( } // updateTenantState writes out an updated tenant state. -func (h *sysTableHelper) updateTenantState( - tenant tenantState, ie sqlutil.InternalExecutor, txn *kv.Txn, -) error { +func (h *sysTableHelper) updateTenantState(txn isql.Txn, tenant tenantState) error { consumption, err := protoutil.Marshal(&tenant.Consumption) if err != nil { return err } // Note: it is important that this UPSERT specifies all columns of the // table, to allow it to perform "blind" writes. - _, err = ie.ExecEx( - h.ctx, "tenant-usage-upsert", txn, + _, err = txn.ExecEx( + h.ctx, "tenant-usage-upsert", txn.KV(), sessiondata.NodeUserSessionDataOverride, `UPSERT INTO system.tenant_usage( tenant_id, @@ -250,7 +244,7 @@ func (h *sysTableHelper) updateTenantState( // updateTenantState writes out updated tenant and instance states. func (h *sysTableHelper) updateTenantAndInstanceState( - txn *kv.Txn, ie sqlutil.InternalExecutor, tenant tenantState, instance instanceState, + txn isql.Txn, tenant tenantState, instance instanceState, ) error { consumption, err := protoutil.Marshal(&tenant.Consumption) if err != nil { @@ -258,8 +252,8 @@ func (h *sysTableHelper) updateTenantAndInstanceState( } // Note: it is important that this UPSERT specifies all columns of the // table, to allow it to perform "blind" writes. - _, err = ie.ExecEx( - h.ctx, "tenant-usage-insert", txn, + _, err = txn.ExecEx( + h.ctx, "tenant-usage-insert", txn.KV(), sessiondata.NodeUserSessionDataOverride, `UPSERT INTO system.tenant_usage( tenant_id, @@ -304,7 +298,7 @@ func (h *sysTableHelper) updateTenantAndInstanceState( // infrequent). In addition, the SQL pod start-up process is not blocked on // tenant bucket requests (which happen in the background). func (h *sysTableHelper) accomodateNewInstance( - txn *kv.Txn, ie sqlutil.InternalExecutor, tenant *tenantState, instance *instanceState, + txn isql.Txn, tenant *tenantState, instance *instanceState, ) error { if tenant.FirstInstance == 0 || tenant.FirstInstance > instance.ID { // The new instance has the lowest ID. @@ -313,8 +307,8 @@ func (h *sysTableHelper) accomodateNewInstance( return nil } // Find the previous instance. - row, err := ie.QueryRowEx( - h.ctx, "find-prev-id", txn, + row, err := txn.QueryRowEx( + h.ctx, "find-prev-id", txn.KV(), sessiondata.NodeUserSessionDataOverride, `SELECT instance_id, /* 0 */ @@ -346,8 +340,8 @@ func (h *sysTableHelper) accomodateNewInstance( // Update the previous instance: its next_instance_id is the new instance. // TODO(radu): consider coalescing this with updateTenantAndInstanceState to // perform a single UPSERT. - _, err = ie.ExecEx( - h.ctx, "update-next-id", txn, + _, err = txn.ExecEx( + h.ctx, "update-next-id", txn.KV(), sessiondata.NodeUserSessionDataOverride, // Update the previous instance's next_instance_id. `UPSERT INTO system.tenant_usage( @@ -380,11 +374,11 @@ func (h *sysTableHelper) accomodateNewInstance( // if it is older than the cutoff time, the instance is removed and the next // instance ID is returned (this ID is 0 if this is the highest instance ID). func (h *sysTableHelper) maybeCleanupStaleInstance( - cutoff time.Time, instanceID base.SQLInstanceID, ie sqlutil.InternalExecutor, txn *kv.Txn, + txn isql.Txn, cutoff time.Time, instanceID base.SQLInstanceID, ) (deleted bool, nextInstance base.SQLInstanceID, _ error) { ts := tree.MustMakeDTimestamp(cutoff, time.Microsecond) - row, err := ie.QueryRowEx( - h.ctx, "tenant-usage-delete", txn, + row, err := txn.QueryRowEx( + h.ctx, "tenant-usage-delete", txn.KV(), sessiondata.NodeUserSessionDataOverride, `DELETE FROM system.tenant_usage WHERE tenant_id = $1 AND instance_id = $2 AND last_update < $3 @@ -416,7 +410,7 @@ func (h *sysTableHelper) maybeCleanupStaleInstance( // the same with startID if nothing was cleaned up, and it is 0 if we cleaned up // the last (highest ID) instance. func (h *sysTableHelper) maybeCleanupStaleInstances( - txn *kv.Txn, ie sqlutil.InternalExecutor, cutoff time.Time, startID, endID base.SQLInstanceID, + txn isql.Txn, cutoff time.Time, startID, endID base.SQLInstanceID, ) (nextInstance base.SQLInstanceID, _ error) { log.VEventf( h.ctx, 1, "checking stale instances (tenant=%s startID=%d endID=%d)", @@ -424,7 +418,7 @@ func (h *sysTableHelper) maybeCleanupStaleInstances( ) id := startID for n := 0; n < maxInstancesCleanup; n++ { - deleted, nextInstance, err := h.maybeCleanupStaleInstance(cutoff, id, ie, txn) + deleted, nextInstance, err := h.maybeCleanupStaleInstance(txn, cutoff, id) if err != nil { return -1, err } @@ -441,20 +435,20 @@ func (h *sysTableHelper) maybeCleanupStaleInstances( // maybeCheckInvariants checks the invariants for the system table with a random // probability and only if this is a test build. -func (h *sysTableHelper) maybeCheckInvariants(txn *kv.Txn, ie sqlutil.InternalExecutor) error { +func (h *sysTableHelper) maybeCheckInvariants(txn isql.Txn) error { if buildutil.CrdbTestBuild && rand.Intn(10) == 0 { - return h.checkInvariants(txn, ie) + return h.checkInvariants(txn) } return nil } // checkInvariants reads all rows in the system table for the given tenant and // checks that the state is consistent. -func (h *sysTableHelper) checkInvariants(txn *kv.Txn, ie sqlutil.InternalExecutor) error { +func (h *sysTableHelper) checkInvariants(txn isql.Txn) error { // Read the two rows for the per-tenant state (instance_id = 0) and the // per-instance state. - rows, err := ie.QueryBufferedEx( - h.ctx, "tenant-usage-select", txn, + rows, err := txn.QueryBufferedEx( + h.ctx, "tenant-usage-select", txn.KV(), sessiondata.NodeUserSessionDataOverride, `SELECT instance_id, /* 0 */ @@ -562,14 +556,10 @@ func (h *sysTableHelper) checkInvariants(txn *kv.Txn, ie sqlutil.InternalExecuto // for a given tenant, in a user-readable format (multi-line). Used for testing // and debugging. func InspectTenantMetadata( - ctx context.Context, - ex *sql.InternalExecutor, - txn *kv.Txn, - tenantID roachpb.TenantID, - timeFormat string, + ctx context.Context, txn isql.Txn, tenantID roachpb.TenantID, timeFormat string, ) (string, error) { h := makeSysTableHelper(ctx, tenantID) - tenant, err := h.readTenantState(txn, ex) + tenant, err := h.readTenantState(txn) if err != nil { return "", err } @@ -601,8 +591,8 @@ func InspectTenantMetadata( fmt.Fprintf(&buf, "Last update: %s\n", tenant.LastUpdate.Time.Format(timeFormat)) fmt.Fprintf(&buf, "First active instance: %d\n", tenant.FirstInstance) - rows, err := ex.QueryBufferedEx( - ctx, "inspect-tenant-state", txn, + rows, err := txn.QueryBufferedEx( + ctx, "inspect-tenant-state", txn.KV(), sessiondata.NodeUserSessionDataOverride, `SELECT instance_id, /* 0 */ diff --git a/pkg/ccl/multitenantccl/tenantcostserver/token_bucket.go b/pkg/ccl/multitenantccl/tenantcostserver/token_bucket.go index 994febafbd8c..d0fb4c350328 100644 --- a/pkg/ccl/multitenantccl/tenantcostserver/token_bucket.go +++ b/pkg/ccl/multitenantccl/tenantcostserver/token_bucket.go @@ -12,10 +12,9 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/errors" ) @@ -53,11 +52,11 @@ func (s *instance) TokenBucketRequest( result := &roachpb.TokenBucketResponse{} var consumption roachpb.TenantConsumption - if err := s.ief.TxnWithExecutor(ctx, s.db, nil /* sessionData */, func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor) error { + if err := s.ief.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { *result = roachpb.TokenBucketResponse{} h := makeSysTableHelper(ctx, tenantID) - tenant, instance, err := h.readTenantAndInstanceState(txn, ie, instanceID) + tenant, instance, err := h.readTenantAndInstanceState(txn, instanceID) if err != nil { return err } @@ -66,7 +65,7 @@ func (s *instance) TokenBucketRequest( // If there is no state, we will initialize it. But check that the tenant // is valid and active. It is possible that the tenant was deleted and an // existing tenant process is still sending requests. - if err := s.checkTenantID(ctx, txn, ie, tenantID); err != nil { + if err := s.checkTenantID(ctx, txn, tenantID); err != nil { return err } } @@ -74,7 +73,7 @@ func (s *instance) TokenBucketRequest( tenant.update(now) if !instance.Present { - if err := h.accomodateNewInstance(txn, ie, &tenant, &instance); err != nil { + if err := h.accomodateNewInstance(txn, &tenant, &instance); err != nil { return err } } @@ -87,7 +86,7 @@ func (s *instance) TokenBucketRequest( if in.NextLiveInstanceID != 0 { if err := s.handleNextLiveInstanceID( - &h, &tenant, &instance, base.SQLInstanceID(in.NextLiveInstanceID), txn, ie, + &h, txn, &tenant, &instance, base.SQLInstanceID(in.NextLiveInstanceID), ); err != nil { return err } @@ -107,11 +106,11 @@ func (s *instance) TokenBucketRequest( *result = tenant.Bucket.Request(ctx, in) instance.LastUpdate.Time = now - if err := h.updateTenantAndInstanceState(txn, ie, tenant, instance); err != nil { + if err := h.updateTenantAndInstanceState(txn, tenant, instance); err != nil { return err } - if err := h.maybeCheckInvariants(txn, ie); err != nil { + if err := h.maybeCheckInvariants(txn); err != nil { panic(err) } consumption = tenant.Consumption @@ -143,11 +142,10 @@ func (s *instance) TokenBucketRequest( // (in the circular order). func (s *instance) handleNextLiveInstanceID( h *sysTableHelper, + txn isql.Txn, tenant *tenantState, instance *instanceState, nextLiveInstanceID base.SQLInstanceID, - txn *kv.Txn, - ie sqlutil.InternalExecutor, ) error { // We use NextLiveInstanceID to figure out if there is a potential dead // instance after this instance. @@ -180,7 +178,7 @@ func (s *instance) handleNextLiveInstanceID( // Case 2: range [instance.NextInstance, nextLiveInstanceID) potentially // needs cleanup. instance.NextInstance, err = h.maybeCleanupStaleInstances( - txn, ie, cutoff, instance.NextInstance, nextLiveInstanceID, + txn, cutoff, instance.NextInstance, nextLiveInstanceID, ) if err != nil { return err @@ -195,7 +193,7 @@ func (s *instance) handleNextLiveInstanceID( // Case 2: range [tenant.FirstInstance, nextLiveInstanceID) // potentially needs cleanup. tenant.FirstInstance, err = h.maybeCleanupStaleInstances( - txn, ie, cutoff, tenant.FirstInstance, nextLiveInstanceID, + txn, cutoff, tenant.FirstInstance, nextLiveInstanceID, ) if err != nil { return err @@ -205,7 +203,7 @@ func (s *instance) handleNextLiveInstanceID( // Case 2: in our table, this is not the largest ID. The range // [instance.NextInstance, ∞) potentially needs cleanup. instance.NextInstance, err = h.maybeCleanupStaleInstances( - txn, ie, cutoff, instance.NextInstance, -1, + txn, cutoff, instance.NextInstance, -1, ) if err != nil { return err diff --git a/pkg/ccl/serverccl/role_authentication_test.go b/pkg/ccl/serverccl/role_authentication_test.go index 06ec3909273f..5f2de135ec2d 100644 --- a/pkg/ccl/serverccl/role_authentication_test.go +++ b/pkg/ccl/serverccl/role_authentication_test.go @@ -44,12 +44,6 @@ func TestVerifyPassword(t *testing.T) { ) defer s.Stopper().Stop(ctx) - mon := sql.MakeInternalExecutorMemMonitor(sql.MemoryMetrics{}, s.ClusterSettings()) - mon.StartNoReserved(ctx, s.(*server.TestServer).Server.PGServer().SQLServer.GetBytesMonitor()) - ie := sql.MakeInternalExecutor( - s.(*server.TestServer).Server.PGServer().SQLServer, sql.MemoryMetrics{}, mon, - ) - ts := s.(*server.TestServer) if util.RaceEnabled { @@ -138,7 +132,7 @@ func TestVerifyPassword(t *testing.T) { execCfg := s.ExecutorConfig().(sql.ExecutorConfig) username := username.MakeSQLUsernameFromPreNormalizedString(tc.username) exists, canLoginSQL, canLoginDBConsole, isSuperuser, _, pwRetrieveFn, err := sql.GetUserSessionInitInfo( - context.Background(), &execCfg, &ie, username, "", /* databaseName */ + context.Background(), &execCfg, username, "", /* databaseName */ ) if err != nil { diff --git a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/BUILD.bazel b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/BUILD.bazel index ad58adedb070..fb688bd063df 100644 --- a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/BUILD.bazel +++ b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/BUILD.bazel @@ -16,7 +16,6 @@ go_test( "//pkg/ccl/partitionccl", "//pkg/config/zonepb", "//pkg/jobs/jobspb", - "//pkg/kv", "//pkg/roachpb", "//pkg/security/securityassets", "//pkg/security/securitytest", @@ -30,7 +29,6 @@ go_test( "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/descs", "//pkg/sql/catalog/tabledesc", - "//pkg/sql/sqlutil", "//pkg/testutils/datapathutils", "//pkg/testutils/serverutils", "//pkg/testutils/sqlutils", diff --git a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/datadriven_test.go b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/datadriven_test.go index e60f74974229..2cbca17abf36 100644 --- a/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/datadriven_test.go +++ b/pkg/ccl/spanconfigccl/spanconfigsqltranslatorccl/datadriven_test.go @@ -21,7 +21,6 @@ import ( _ "github.com/cockroachdb/cockroach/pkg/ccl/partitionccl" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/spanconfig/spanconfigsqltranslator" @@ -32,7 +31,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/testutils/datapathutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" @@ -181,12 +179,12 @@ func TestDataDriven(t *testing.T) { var records []spanconfig.Record sqlTranslatorFactory := tenant.SpanConfigSQLTranslatorFactory().(*spanconfigsqltranslator.Factory) - err := execCfg.InternalExecutorFactory.DescsTxnWithExecutor(ctx, execCfg.DB, nil /* session data */, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, ie sqlutil.InternalExecutor, + err := execCfg.InternalDB.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { - sqlTranslator := sqlTranslatorFactory.NewSQLTranslator(txn, ie, descsCol) + sqlTranslator := sqlTranslatorFactory.NewSQLTranslator(txn) var err error - records, _, err = sqlTranslator.Translate(ctx, descIDs, generateSystemSpanConfigs) + records, err = sqlTranslator.Translate(ctx, descIDs, generateSystemSpanConfigs) require.NoError(t, err) return nil }) @@ -213,12 +211,12 @@ func TestDataDriven(t *testing.T) { case "full-translate": sqlTranslatorFactory := tenant.SpanConfigSQLTranslatorFactory().(*spanconfigsqltranslator.Factory) var records []spanconfig.Record - err := execCfg.InternalExecutorFactory.DescsTxnWithExecutor(ctx, execCfg.DB, nil /* session data */, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, ie sqlutil.InternalExecutor, + err := execCfg.InternalDB.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { - sqlTranslator := sqlTranslatorFactory.NewSQLTranslator(txn, ie, descsCol) + sqlTranslator := sqlTranslatorFactory.NewSQLTranslator(txn) var err error - records, _, err = spanconfig.FullTranslate(ctx, sqlTranslator) + records, err = spanconfig.FullTranslate(ctx, sqlTranslator) require.NoError(t, err) return nil }) diff --git a/pkg/ccl/storageccl/BUILD.bazel b/pkg/ccl/storageccl/BUILD.bazel index 29df0c0170da..6fea79cb3fd3 100644 --- a/pkg/ccl/storageccl/BUILD.bazel +++ b/pkg/ccl/storageccl/BUILD.bazel @@ -45,8 +45,7 @@ go_test( "//pkg/security/securitytest", "//pkg/security/username", "//pkg/server", - "//pkg/sql", - "//pkg/sql/sqlutil", + "//pkg/sql/isql", "//pkg/storage", "//pkg/testutils", "//pkg/testutils/serverutils", diff --git a/pkg/ccl/storageccl/external_sst_reader_test.go b/pkg/ccl/storageccl/external_sst_reader_test.go index 83a82d018bb5..d421d80a88fb 100644 --- a/pkg/ccl/storageccl/external_sst_reader_test.go +++ b/pkg/ccl/storageccl/external_sst_reader_test.go @@ -19,8 +19,7 @@ import ( _ "github.com/cockroachdb/cockroach/pkg/cloud/nodelocal" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/sql" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/storageutils" @@ -123,9 +122,7 @@ func TestNewExternalSSTReader(t *testing.T) { clusterSettings, blobs.TestBlobServiceClient(tempDir), username.RootUserName(), - tc.Servers[0].InternalExecutor().(*sql.InternalExecutor), - tc.Servers[0].InternalExecutorFactory().(sqlutil.InternalExecutorFactory), - tc.Servers[0].DB(), + tc.Servers[0].InternalDB().(isql.DB), nil, /* limiters */ cloud.NilMetrics, ) diff --git a/pkg/ccl/streamingccl/streamclient/partitioned_stream_client_test.go b/pkg/ccl/streamingccl/streamclient/partitioned_stream_client_test.go index b6c2bcaaa839..6f4f6c8a704b 100644 --- a/pkg/ccl/streamingccl/streamclient/partitioned_stream_client_test.go +++ b/pkg/ccl/streamingccl/streamclient/partitioned_stream_client_test.go @@ -243,7 +243,7 @@ INSERT INTO d.t2 VALUES (2); // Testing client.Complete() err = client.Complete(ctx, streampb.StreamID(999), true) - require.True(t, testutils.IsError(err, fmt.Sprintf("job %d: not found in system.jobs table", 999)), err) + require.True(t, testutils.IsError(err, "job with ID 999 does not exist"), err) // Makes producer job exit quickly. h.SysSQL.Exec(t, ` diff --git a/pkg/ccl/streamingccl/streamingest/BUILD.bazel b/pkg/ccl/streamingccl/streamingest/BUILD.bazel index bcec2f248b06..c4964823ab23 100644 --- a/pkg/ccl/streamingccl/streamingest/BUILD.bazel +++ b/pkg/ccl/streamingccl/streamingest/BUILD.bazel @@ -41,6 +41,7 @@ go_library( "//pkg/sql/execinfra", "//pkg/sql/execinfrapb", "//pkg/sql/exprutil", + "//pkg/sql/isql", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/physicalplan", @@ -103,7 +104,6 @@ go_test( "//pkg/jobs", "//pkg/jobs/jobspb", "//pkg/keys", - "//pkg/kv", "//pkg/kv/kvserver", "//pkg/kv/kvserver/protectedts", "//pkg/repstream/streampb", @@ -114,9 +114,11 @@ go_test( "//pkg/server", "//pkg/settings/cluster", "//pkg/sql", + "//pkg/sql/catalog/descs", "//pkg/sql/catalog/desctestutils", "//pkg/sql/execinfra", "//pkg/sql/execinfrapb", + "//pkg/sql/isql", "//pkg/sql/sem/eval", "//pkg/sql/sem/tree", "//pkg/storage", diff --git a/pkg/ccl/streamingccl/streamingest/alter_replication_job.go b/pkg/ccl/streamingccl/streamingest/alter_replication_job.go index da18745b4b1a..9985877d62dd 100644 --- a/pkg/ccl/streamingccl/streamingest/alter_replication_job.go +++ b/pkg/ccl/streamingccl/streamingest/alter_replication_job.go @@ -16,12 +16,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/exprutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/asof" @@ -160,23 +160,26 @@ func alterReplicationJobHook( } jobRegistry := p.ExecCfg().JobRegistry if alterTenantStmt.Cutover != nil { - if err := alterTenantJobCutover(ctx, p.Txn(), jobRegistry, - p.ExecCfg().ProtectedTimestampProvider, alterTenantStmt, tenInfo, cutoverTime); err != nil { + pts := p.ExecCfg().ProtectedTimestampProvider.WithTxn(p.InternalSQLTxn()) + if err := alterTenantJobCutover( + ctx, p.InternalSQLTxn(), jobRegistry, pts, + alterTenantStmt, tenInfo, cutoverTime, + ); err != nil { return err } resultsCh <- tree.Datums{eval.TimestampToDecimalDatum(cutoverTime)} } else if !alterTenantStmt.Options.IsDefault() { - if err := alterTenantOptions(ctx, p.Txn(), jobRegistry, options, tenInfo); err != nil { + if err := alterTenantOptions(ctx, p.InternalSQLTxn(), jobRegistry, options, tenInfo); err != nil { return err } } else { switch alterTenantStmt.Command { case tree.ResumeJob: - if err := jobRegistry.Unpause(ctx, p.Txn(), tenInfo.TenantReplicationJobID); err != nil { + if err := jobRegistry.Unpause(ctx, p.InternalSQLTxn(), tenInfo.TenantReplicationJobID); err != nil { return err } case tree.PauseJob: - if err := jobRegistry.PauseRequested(ctx, p.Txn(), tenInfo.TenantReplicationJobID, + if err := jobRegistry.PauseRequested(ctx, p.InternalSQLTxn(), tenInfo.TenantReplicationJobID, "ALTER TENANT PAUSE REPLICATION"); err != nil { return err } @@ -194,9 +197,9 @@ func alterReplicationJobHook( func alterTenantJobCutover( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, jobRegistry *jobs.Registry, - ptp protectedts.Provider, + ptp protectedts.Storage, alterTenantStmt *tree.AlterTenantReplication, tenInfo *descpb.TenantInfo, cutoverTime hlc.Timestamp, @@ -235,7 +238,7 @@ func alterTenantJobCutover( return errors.Newf("replicated tenant %q (%d) has not yet recorded a retained timestamp", tenantName, tenInfo.ID) } else { - record, err := ptp.GetRecord(ctx, txn, *stats.IngestionDetails.ProtectedTimestampRecordID) + record, err := ptp.GetRecord(ctx, *stats.IngestionDetails.ProtectedTimestampRecordID) if err != nil { return err } @@ -256,13 +259,13 @@ func alterTenantJobCutover( func alterTenantOptions( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, jobRegistry *jobs.Registry, options *resolvedTenantReplicationOptions, tenInfo *descpb.TenantInfo, ) error { return jobRegistry.UpdateJobWithTxn(ctx, tenInfo.TenantReplicationJobID, txn, false, /* useReadLock */ - func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + func(txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { streamIngestionDetails := md.Payload.GetStreamIngestion() if ret, ok := options.GetRetention(); ok { streamIngestionDetails.ReplicationTTLSeconds = ret diff --git a/pkg/ccl/streamingccl/streamingest/replication_stream_e2e_test.go b/pkg/ccl/streamingccl/streamingest/replication_stream_e2e_test.go index e7c879708017..96643875d496 100644 --- a/pkg/ccl/streamingccl/streamingest/replication_stream_e2e_test.go +++ b/pkg/ccl/streamingccl/streamingest/replication_stream_e2e_test.go @@ -21,11 +21,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/jobutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -771,12 +771,13 @@ func TestTenantReplicationProtectedTimestampManagement(t *testing.T) { // protecting the destination tenant. checkNoDestinationProtection := func(c *replicationtestutils.TenantStreamingClusters, replicationJobID int) { execCfg := c.DestSysServer.ExecutorConfig().(sql.ExecutorConfig) - require.NoError(t, c.DestCluster.Server(0).DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, c.DestCluster.Server(0).InternalDB().(isql.DB).Txn(ctx, func(ctx context.Context, txn isql.Txn) error { j, err := execCfg.JobRegistry.LoadJobWithTxn(ctx, jobspb.JobID(replicationJobID), txn) require.NoError(t, err) payload := j.Payload() replicationDetails := payload.GetStreamIngestion() - _, err = execCfg.ProtectedTimestampProvider.GetRecord(ctx, txn, *replicationDetails.ProtectedTimestampRecordID) + ptp := execCfg.ProtectedTimestampProvider.WithTxn(txn) + _, err = ptp.GetRecord(ctx, *replicationDetails.ProtectedTimestampRecordID) require.EqualError(t, err, protectedts.ErrNotExists.Error()) return nil })) @@ -784,7 +785,7 @@ func TestTenantReplicationProtectedTimestampManagement(t *testing.T) { checkDestinationProtection := func(c *replicationtestutils.TenantStreamingClusters, frontier hlc.Timestamp, replicationJobID int) { execCfg := c.DestSysServer.ExecutorConfig().(sql.ExecutorConfig) ptp := execCfg.ProtectedTimestampProvider - require.NoError(t, c.DestCluster.Server(0).DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, c.DestCluster.Server(0).InternalDB().(isql.DB).Txn(ctx, func(ctx context.Context, txn isql.Txn) error { j, err := execCfg.JobRegistry.LoadJobWithTxn(ctx, jobspb.JobID(replicationJobID), txn) if err != nil { return err @@ -794,7 +795,7 @@ func TestTenantReplicationProtectedTimestampManagement(t *testing.T) { replicationDetails := payload.GetStreamIngestion() require.NotNil(t, replicationDetails.ProtectedTimestampRecordID) - rec, err := ptp.GetRecord(ctx, txn, *replicationDetails.ProtectedTimestampRecordID) + rec, err := ptp.WithTxn(txn).GetRecord(ctx, *replicationDetails.ProtectedTimestampRecordID) if err != nil { return err } diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingest_manager.go b/pkg/ccl/streamingccl/streamingest/stream_ingest_manager.go index b8b809e6f868..15fa4a07d697 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingest_manager.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingest_manager.go @@ -13,11 +13,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl/replicationutils" "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" + "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/repstream" "github.com/cockroachdb/cockroach/pkg/repstream/streampb" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" @@ -25,16 +26,16 @@ import ( ) type streamIngestManagerImpl struct { - evalCtx *eval.Context - txn *kv.Txn + evalCtx *eval.Context + jobRegistry *jobs.Registry + txn isql.Txn } // CompleteStreamIngestion implements streaming.StreamIngestManager interface. func (r *streamIngestManagerImpl) CompleteStreamIngestion( ctx context.Context, ingestionJobID jobspb.JobID, cutoverTimestamp hlc.Timestamp, ) error { - jobRegistry := r.evalCtx.Planner.ExecutorConfig().(*sql.ExecutorConfig).JobRegistry - return completeStreamIngestion(ctx, jobRegistry, r.txn, ingestionJobID, cutoverTimestamp) + return completeStreamIngestion(ctx, r.jobRegistry, r.txn, ingestionJobID, cutoverTimestamp) } // GetStreamIngestionStats implements streaming.StreamIngestManager interface. @@ -47,7 +48,7 @@ func (r *streamIngestManagerImpl) GetStreamIngestionStats( } func newStreamIngestManagerWithPrivilegesCheck( - ctx context.Context, evalCtx *eval.Context, txn *kv.Txn, + ctx context.Context, evalCtx *eval.Context, txn isql.Txn, ) (eval.StreamIngestManager, error) { isAdmin, err := evalCtx.SessionAccessor.HasAdminRole(ctx) if err != nil { @@ -67,7 +68,11 @@ func newStreamIngestManagerWithPrivilegesCheck( pgcode.InsufficientPrivilege, "replication requires enterprise license") } - return &streamIngestManagerImpl{evalCtx: evalCtx, txn: txn}, nil + return &streamIngestManagerImpl{ + evalCtx: evalCtx, + txn: txn, + jobRegistry: execCfg.JobRegistry, + }, nil } func init() { diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor.go index c71fb79373fd..791aca372f9d 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor.go @@ -17,12 +17,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/streamingccl/streamclient" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/repstream/streampb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/rowexec" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -400,7 +400,6 @@ func (sf *streamIngestionFrontier) maybeUpdatePartitionProgress() error { f := sf.frontier registry := sf.flowCtx.Cfg.JobRegistry jobID := jobspb.JobID(sf.spec.JobID) - ptp := sf.flowCtx.Cfg.ProtectedTimestampProvider frontierResolvedSpans := make([]jobspb.ResolvedSpan, 0) f.Entries(func(sp roachpb.Span, ts hlc.Timestamp) (done span.OpResult) { @@ -414,7 +413,7 @@ func (sf *streamIngestionFrontier) maybeUpdatePartitionProgress() error { sf.lastPartitionUpdate = timeutil.Now() if err := registry.UpdateJobWithTxn(ctx, jobID, nil, false, func( - txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, + txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, ) error { if err := md.CheckRunningOrReverting(); err != nil { return err @@ -451,14 +450,15 @@ func (sf *streamIngestionFrontier) maybeUpdatePartitionProgress() error { return errors.AssertionFailedf("expected replication job to have a protected timestamp " + "record over the destination tenant's keyspan") } - record, err := ptp.GetRecord(ctx, txn, *replicationDetails.ProtectedTimestampRecordID) + ptp := sf.flowCtx.Cfg.ProtectedTimestampProvider.WithTxn(txn) + record, err := ptp.GetRecord(ctx, *replicationDetails.ProtectedTimestampRecordID) if err != nil { return err } newProtectAbove := highWatermark.Add( -int64(replicationDetails.ReplicationTTLSeconds)*time.Second.Nanoseconds(), 0) if record.Timestamp.Less(newProtectAbove) { - return ptp.UpdateTimestamp(ctx, txn, *replicationDetails.ProtectedTimestampRecordID, newProtectAbove) + return ptp.UpdateTimestamp(ctx, *replicationDetails.ProtectedTimestampRecordID, newProtectAbove) } return nil }); err != nil { diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor_test.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor_test.go index 94004575dd11..6c4519c87c1d 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor_test.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_frontier_processor_test.go @@ -24,6 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" @@ -63,7 +64,6 @@ func TestStreamIngestionFrontierProcessor(t *testing.T) { }, }) defer tc.Stopper().Stop(context.Background()) - kvDB := tc.Server(0).DB() st := cluster.MakeTestingClusterSettings() JobCheckpointFrequency.Override(ctx, &st.SV, 200*time.Millisecond) @@ -78,7 +78,7 @@ func TestStreamIngestionFrontierProcessor(t *testing.T) { flowCtx := execinfra.FlowCtx{ Cfg: &execinfra.ServerConfig{ Settings: st, - DB: kvDB, + DB: tc.Server(0).InternalDB().(descs.DB), JobRegistry: registry, BulkSenderLimiter: limit.MakeConcurrentRequestLimiter("test", math.MaxInt), }, diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_job.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_job.go index cd6fb108006e..07d2716917aa 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_job.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_job.go @@ -26,6 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/retry" @@ -38,26 +39,31 @@ import ( func completeStreamIngestion( ctx context.Context, jobRegistry *jobs.Registry, - txn *kv.Txn, + txn isql.Txn, ingestionJobID jobspb.JobID, cutoverTimestamp hlc.Timestamp, ) error { - return jobRegistry.UpdateJobWithTxn(ctx, ingestionJobID, txn, false, /* useReadLock */ - func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { - // TODO(adityamaru): This should change in the future, a user should be - // allowed to correct their cutover time if the process of reverting the job - // has not started. - if jobCutoverTime := md.Progress.GetStreamIngest().CutoverTime; !jobCutoverTime.IsEmpty() { - return errors.Newf("cutover timestamp already set to %s, "+ - "job %d is in the process of cutting over", jobCutoverTime.String(), ingestionJobID) - } + j, err := jobRegistry.LoadJobWithTxn(ctx, ingestionJobID, txn) + if err != nil { + return err + } + return j.WithTxn(txn).Update(ctx, func( + txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, + ) error { + // TODO(adityamaru): This should change in the future, a user should be + // allowed to correct their cutover time if the process of reverting the job + // has not started. + if jobCutoverTime := md.Progress.GetStreamIngest().CutoverTime; !jobCutoverTime.IsEmpty() { + return errors.Newf("cutover timestamp already set to %s, "+ + "job %d is in the process of cutting over", jobCutoverTime.String(), ingestionJobID) + } - // Update the sentinel being polled by the stream ingestion job to - // check if a complete has been signaled. - md.Progress.GetStreamIngest().CutoverTime = cutoverTimestamp - ju.UpdateProgress(md.Progress) - return nil - }) + // Update the sentinel being polled by the stream ingestion job to + // check if a complete has been signaled. + md.Progress.GetStreamIngest().CutoverTime = cutoverTimestamp + ju.UpdateProgress(md.Progress) + return nil + }) } type streamIngestionResumer struct { @@ -128,10 +134,11 @@ func waitUntilProducerActive( } func updateRunningStatus(ctx context.Context, ingestionJob *jobs.Job, status string) { - if err := ingestionJob.RunningStatus(ctx, nil, - func(ctx context.Context, details jobspb.Details) (jobs.RunningStatus, error) { - return jobs.RunningStatus(status), nil - }); err != nil { + if err := ingestionJob.NoTxn().RunningStatus(ctx, func( + ctx context.Context, details jobspb.Details, + ) (jobs.RunningStatus, error) { + return jobs.RunningStatus(status), nil + }); err != nil { log.Warningf(ctx, "error when updating job running status: %s", err) } } @@ -188,7 +195,7 @@ func ingest(ctx context.Context, execCtx sql.JobExecContext, ingestionJob *jobs. } // TODO(casper): update running status - err = ingestionJob.Update(ctx, nil, func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + err = ingestionJob.NoTxn().Update(ctx, func(txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { md.Progress.GetStreamIngest().StreamAddresses = topology.StreamAddresses() ju.UpdateProgress(md.Progress) return nil @@ -265,8 +272,13 @@ func ingest(ctx context.Context, execCtx sql.JobExecContext, ingestionJob *jobs. // Now that we have completed the cutover we can release the protected // timestamp record on the destination tenant's keyspace. if details.ProtectedTimestampRecordID != nil { - if err := execCtx.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return releaseDestinationTenantProtectedTimestamp(ctx, execCtx, txn, *details.ProtectedTimestampRecordID) + if err := execCtx.ExecCfg().InternalDB.Txn(ctx, func( + ctx context.Context, txn isql.Txn, + ) error { + ptp := execCtx.ExecCfg().ProtectedTimestampProvider.WithTxn(txn) + return releaseDestinationTenantProtectedTimestamp( + ctx, ptp, *details.ProtectedTimestampRecordID, + ) }); err != nil { return err } @@ -315,9 +327,7 @@ func ingestWithRetries( } // The ingestion job should never fail, only pause, as progress should never be lost. -func (s *streamIngestionResumer) handleResumeError( - resumeCtx context.Context, execCtx interface{}, err error, -) error { +func (s *streamIngestionResumer) handleResumeError(resumeCtx context.Context, err error) error { const errorFmt = "ingestion job failed (%v) but is being paused" errorMessage := fmt.Sprintf(errorFmt, err) log.Warningf(resumeCtx, errorFmt, err) @@ -325,8 +335,10 @@ func (s *streamIngestionResumer) handleResumeError( // The ingestion job is paused but the producer job will keep // running until it times out. Users can still resume ingestion before // the producer job times out. - return s.job.PauseRequested(resumeCtx, nil /* txn */, func(ctx context.Context, - planHookState interface{}, txn *kv.Txn, progress *jobspb.Progress) error { + return s.job.NoTxn().PauseRequestedWithFunc(resumeCtx, func( + ctx context.Context, planHookState interface{}, txn isql.Txn, + progress *jobspb.Progress, + ) error { progress.RunningStatus = errorMessage return nil }, errorMessage) @@ -338,23 +350,21 @@ func (s *streamIngestionResumer) Resume(resumeCtx context.Context, execCtx inter // Protect the destination tenant's keyspan from garbage collection. err := s.protectDestinationTenant(resumeCtx, execCtx) if err != nil { - return s.handleResumeError(resumeCtx, execCtx, err) + return s.handleResumeError(resumeCtx, err) } // Start ingesting KVs from the replication stream. err = ingestWithRetries(resumeCtx, execCtx.(sql.JobExecContext), s.job) if err != nil { - return s.handleResumeError(resumeCtx, execCtx, err) + return s.handleResumeError(resumeCtx, err) } return nil } func releaseDestinationTenantProtectedTimestamp( - ctx context.Context, execCtx interface{}, txn *kv.Txn, ptsID uuid.UUID, + ctx context.Context, ptp protectedts.Storage, ptsID uuid.UUID, ) error { - jobExecCtx := execCtx.(sql.JobExecContext) - ptp := jobExecCtx.ExecCfg().ProtectedTimestampProvider - if err := ptp.Release(ctx, txn, ptsID); err != nil { + if err := ptp.Release(ctx, ptsID); err != nil { if errors.Is(err, protectedts.ErrNotExists) { // No reason to return an error which might cause problems if it doesn't // seem to exist. @@ -388,13 +398,16 @@ func (s *streamIngestionResumer) protectDestinationTenant( target := ptpb.MakeTenantsTarget([]roachpb.TenantID{oldDetails.DestinationTenantID}) ptsID := uuid.MakeV4() now := execCfg.Clock.Now() - return execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + return execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + ptp := execCfg.ProtectedTimestampProvider.WithTxn(txn) pts := jobsprotectedts.MakeRecord(ptsID, int64(s.job.ID()), now, nil /* deprecatedSpans */, jobsprotectedts.Jobs, target) - if err := execCfg.ProtectedTimestampProvider.Protect(ctx, txn, pts); err != nil { + if err := ptp.Protect(ctx, pts); err != nil { return err } - return s.job.Update(ctx, txn, func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + return s.job.WithTxn(txn).Update(ctx, func( + txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, + ) error { if err := md.CheckRunningOrReverting(); err != nil { return err } @@ -494,21 +507,23 @@ func maybeRevertToCutoverTimestamp( } } } - return true, j.SetProgress(ctx, nil /* txn */, *sp.StreamIngest) + return true, j.NoTxn().SetProgress(ctx, *sp.StreamIngest) } func activateTenant(ctx context.Context, execCtx interface{}, newTenantID roachpb.TenantID) error { p := execCtx.(sql.JobExecContext) execCfg := p.ExecCfg() - return execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - info, err := sql.GetTenantRecordByID(ctx, execCfg, txn, newTenantID) + return execCfg.InternalDB.Txn(ctx, func( + ctx context.Context, txn isql.Txn, + ) error { + info, err := sql.GetTenantRecordByID(ctx, txn, newTenantID) if err != nil { return err } info.State = descpb.TenantInfo_ACTIVE info.TenantReplicationJobID = 0 - return sql.UpdateTenantRecord(ctx, execCfg, txn, info) + return sql.UpdateTenantRecord(ctx, p.ExecCfg().Settings, txn, info) }) } @@ -548,20 +563,25 @@ func (s *streamIngestionResumer) OnFailOrCancel( details := s.job.Details().(jobspb.StreamIngestionDetails) s.cancelProducerJob(ctx, details) - return jobExecCtx.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - tenInfo, err := sql.GetTenantRecordByID(ctx, jobExecCtx.ExecCfg(), txn, details.DestinationTenantID) + execCfg := jobExecCtx.ExecCfg() + return execCfg.InternalDB.Txn(ctx, func( + ctx context.Context, txn isql.Txn, + ) error { + tenInfo, err := sql.GetTenantRecordByID(ctx, txn, details.DestinationTenantID) if err != nil { return errors.Wrap(err, "fetch tenant info") } tenInfo.TenantReplicationJobID = 0 - if err := sql.UpdateTenantRecord(ctx, jobExecCtx.ExecCfg(), txn, tenInfo); err != nil { + if err := sql.UpdateTenantRecord(ctx, execCfg.Settings, txn, tenInfo); err != nil { return errors.Wrap(err, "update tenant record") } if details.ProtectedTimestampRecordID != nil { - if err := releaseDestinationTenantProtectedTimestamp(ctx, execCtx, txn, - *details.ProtectedTimestampRecordID); err != nil { + ptp := execCfg.ProtectedTimestampProvider.WithTxn(txn) + if err := releaseDestinationTenantProtectedTimestamp( + ctx, ptp, *details.ProtectedTimestampRecordID, + ); err != nil { return err } } diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_job_test.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_job_test.go index c3b807b19254..b753b2ce792e 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_job_test.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_job_test.go @@ -24,12 +24,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/repstream/streampb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils/jobutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" @@ -234,7 +234,9 @@ func TestCutoverBuiltin(t *testing.T) { } var job *jobs.StartableJob id := registry.MakeJobID() - err := tc.Server(0).DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + err := tc.Server(0).InternalDB().(isql.DB).Txn(ctx, func( + ctx context.Context, txn isql.Txn, + ) (err error) { return registry.CreateStartableJobWithTxn(ctx, &job, id, txn, streamIngestJobRecord) }) require.NoError(t, err) @@ -246,7 +248,7 @@ func TestCutoverBuiltin(t *testing.T) { require.True(t, sp.StreamIngest.CutoverTime.IsEmpty()) var highWater time.Time - err = job.Update(ctx, nil, func(_ *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + err = job.NoTxn().Update(ctx, func(_ isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { highWater = timeutil.Now().Round(time.Microsecond) hlcHighWater := hlc.Timestamp{WallTime: highWater.UnixNano()} return jobs.UpdateHighwaterProgressed(hlcHighWater, md, ju) @@ -321,7 +323,7 @@ func TestReplicationJobResumptionStartTime(t *testing.T) { <-planned registry := c.DestSysServer.ExecutorConfig().(sql.ExecutorConfig).JobRegistry var replicationJobDetails jobspb.StreamIngestionDetails - require.NoError(t, c.DestSysServer.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, c.DestSysServer.InternalDB().(isql.DB).Txn(ctx, func(ctx context.Context, txn isql.Txn) error { j, err := registry.LoadJobWithTxn(ctx, jobspb.JobID(replicationJobID), txn) require.NoError(t, err) var ok bool diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_planning.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_planning.go index daa67b230904..9c7debecc3ab 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_planning.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_planning.go @@ -165,7 +165,12 @@ func ingestionPlanHook( if err != nil { return err } - destinationTenantID, err := sql.CreateTenantRecord(ctx, p.ExecCfg(), p.Txn(), tenantInfo, initialTenantZoneConfig) + destinationTenantID, err := sql.CreateTenantRecord( + ctx, p.ExecCfg().Codec, p.ExecCfg().Settings, + p.InternalSQLTxn(), + p.ExecCfg().SpanConfigKVAccessor.WithTxn(ctx, p.Txn()), + tenantInfo, initialTenantZoneConfig, + ) if err != nil { return err } @@ -210,12 +215,10 @@ func ingestionPlanHook( Details: streamIngestionDetails, } - _, err = p.ExecCfg().JobRegistry.CreateAdoptableJobWithTxn(ctx, jr, jobID, p.Txn()) - if err != nil { - return err - } - - return nil + _, err = p.ExecCfg().JobRegistry.CreateAdoptableJobWithTxn( + ctx, jr, jobID, p.InternalSQLTxn(), + ) + return err } return fn, nil, nil, false, nil diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor.go index e5befa4b593c..5e8a42fdbbe3 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor.go @@ -281,14 +281,14 @@ func (sip *streamIngestionProcessor) Start(ctx context.Context) { evalCtx := sip.FlowCtx.EvalCtx db := sip.FlowCtx.Cfg.DB var err error - sip.batcher, err = bulk.MakeStreamSSTBatcher(ctx, db, evalCtx.Settings, + sip.batcher, err = bulk.MakeStreamSSTBatcher(ctx, db.KV(), evalCtx.Settings, sip.flowCtx.Cfg.BackupMonitor.MakeBoundAccount(), sip.flowCtx.Cfg.BulkSenderLimiter) if err != nil { sip.MoveToDraining(errors.Wrap(err, "creating stream sst batcher")) return } - sip.rangeBatcher = newRangeKeyBatcher(ctx, evalCtx.Settings, db) + sip.rangeBatcher = newRangeKeyBatcher(ctx, evalCtx.Settings, db.KV()) // Start a poller that checks if the stream ingestion job has been signaled to // cutover. diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor_test.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor_test.go index 86fcdf3304bf..26262ba4dde5 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor_test.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_processor_test.go @@ -24,11 +24,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/repstream/streampb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" @@ -176,7 +176,7 @@ func TestStreamIngestionProcessor(t *testing.T) { tc := testcluster.StartTestCluster(t, 3 /* nodes */, base.TestClusterArgs{}) defer tc.Stopper().Stop(ctx) - kvDB := tc.Server(0).DB() + db := tc.Server(0).InternalDB().(descs.DB) registry := tc.Server(0).JobRegistry().(*jobs.Registry) const tenantID = 20 tenantRekey := execinfrapb.TenantRekey{ @@ -247,7 +247,7 @@ func TestStreamIngestionProcessor(t *testing.T) { topology := streamclient.Topology{ Partitions: partitions, } - out, err := runStreamIngestionProcessor(ctx, t, registry, kvDB, + out, err := runStreamIngestionProcessor(ctx, t, registry, db, topology, initialScanTimestamp, []jobspb.ResolvedSpan{}, tenantRekey, mockClient, nil /* cutoverProvider */, nil /* streamingTestingKnobs */) require.NoError(t, err) @@ -294,7 +294,7 @@ func TestStreamIngestionProcessor(t *testing.T) { streamingTestingKnobs := &sql.StreamingTestingKnobs{BeforeClientSubscribe: func(addr string, token string, clientStartTime hlc.Timestamp) { lastClientStart[token] = clientStartTime }} - out, err := runStreamIngestionProcessor(ctx, t, registry, kvDB, + out, err := runStreamIngestionProcessor(ctx, t, registry, db, topology, initialScanTimestamp, checkpoint, tenantRekey, mockClient, nil /* cutoverProvider */, streamingTestingKnobs) require.NoError(t, err) @@ -321,7 +321,7 @@ func TestStreamIngestionProcessor(t *testing.T) { topology := streamclient.Topology{ Partitions: partitions, } - out, err := runStreamIngestionProcessor(ctx, t, registry, kvDB, + out, err := runStreamIngestionProcessor(ctx, t, registry, db, topology, initialScanTimestamp, []jobspb.ResolvedSpan{}, tenantRekey, &errorStreamClient{}, nil /* cutoverProvider */, nil /* streamingTestingKnobs */) require.NoError(t, err) @@ -450,7 +450,7 @@ func TestRandomClientGeneration(t *testing.T) { tc := testcluster.StartTestCluster(t, 3 /* nodes */, base.TestClusterArgs{}) defer tc.Stopper().Stop(ctx) registry := tc.Server(0).JobRegistry().(*jobs.Registry) - kvDB := tc.Server(0).DB() + db := tc.Server(0).InternalDB().(descs.DB) // TODO: Consider testing variations on these parameters. tenantID := roachpb.MustMakeTenantID(20) @@ -493,7 +493,7 @@ func TestRandomClientGeneration(t *testing.T) { randomStreamClient.RegisterInterception(cancelAfterCheckpoints) randomStreamClient.RegisterInterception(validateFnWithValidator(t, streamValidator)) - out, err := runStreamIngestionProcessor(ctx, t, registry, kvDB, + out, err := runStreamIngestionProcessor(ctx, t, registry, db, topo, initialScanTimestamp, []jobspb.ResolvedSpan{}, tenantRekey, randomStreamClient, noCutover{}, nil /* streamingTestingKnobs*/) require.NoError(t, err) @@ -557,7 +557,7 @@ func runStreamIngestionProcessor( ctx context.Context, t *testing.T, registry *jobs.Registry, - kvDB *kv.DB, + db descs.DB, partitions streamclient.Topology, initialScanTimestamp hlc.Timestamp, checkpoint []jobspb.ResolvedSpan, @@ -566,7 +566,7 @@ func runStreamIngestionProcessor( cutoverProvider cutoverProvider, streamingTestingKnobs *sql.StreamingTestingKnobs, ) (*distsqlutils.RowBuffer, error) { - sip, out, err := getStreamIngestionProcessor(ctx, t, registry, kvDB, + sip, out, err := getStreamIngestionProcessor(ctx, t, registry, db, partitions, initialScanTimestamp, checkpoint, tenantRekey, mockClient, cutoverProvider, streamingTestingKnobs) require.NoError(t, err) @@ -586,7 +586,7 @@ func getStreamIngestionProcessor( ctx context.Context, t *testing.T, registry *jobs.Registry, - kvDB *kv.DB, + db descs.DB, partitions streamclient.Topology, initialScanTimestamp hlc.Timestamp, checkpoint []jobspb.ResolvedSpan, @@ -607,7 +607,7 @@ func getStreamIngestionProcessor( flowCtx := execinfra.FlowCtx{ Cfg: &execinfra.ServerConfig{ Settings: st, - DB: kvDB, + DB: db, JobRegistry: registry, TestingKnobs: execinfra.TestingKnobs{StreamingTestingKnobs: streamingTestingKnobs}, BulkSenderLimiter: limit.MakeConcurrentRequestLimiter("test", math.MaxInt), diff --git a/pkg/ccl/streamingccl/streamproducer/BUILD.bazel b/pkg/ccl/streamingccl/streamproducer/BUILD.bazel index 6f979510204d..ad4e50c74099 100644 --- a/pkg/ccl/streamingccl/streamproducer/BUILD.bazel +++ b/pkg/ccl/streamingccl/streamproducer/BUILD.bazel @@ -31,6 +31,7 @@ go_library( "//pkg/security/username", "//pkg/settings/cluster", "//pkg/sql", + "//pkg/sql/isql", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/sem/eval", @@ -87,8 +88,10 @@ go_test( "//pkg/server", "//pkg/settings/cluster", "//pkg/sql", + "//pkg/sql/catalog/descs", "//pkg/sql/catalog/desctestutils", "//pkg/sql/distsql", + "//pkg/sql/isql", "//pkg/sql/sem/eval", "//pkg/sql/sessiondatapb", "//pkg/testutils", diff --git a/pkg/ccl/streamingccl/streamproducer/producer_job.go b/pkg/ccl/streamingccl/streamproducer/producer_job.go index 3c6f9bd303c3..96162b937619 100644 --- a/pkg/ccl/streamingccl/streamproducer/producer_job.go +++ b/pkg/ccl/streamingccl/streamproducer/producer_job.go @@ -18,11 +18,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" @@ -68,8 +68,8 @@ func (p *producerJobResumer) releaseProtectedTimestamp( ctx context.Context, executorConfig *sql.ExecutorConfig, ) error { ptr := p.job.Details().(jobspb.StreamReplicationDetails).ProtectedTimestampRecordID - return executorConfig.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - err := executorConfig.ProtectedTimestampProvider.Release(ctx, txn, ptr) + return executorConfig.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + err := executorConfig.ProtectedTimestampProvider.WithTxn(txn).Release(ctx, ptr) // In case that a retry happens, the record might have been released. if errors.Is(err, exec.ErrNotFound) { return nil @@ -102,7 +102,7 @@ func (p *producerJobResumer) Resume(ctx context.Context, execCtx interface{}) er case jobspb.StreamReplicationProgress_FINISHED_SUCCESSFULLY: return p.releaseProtectedTimestamp(ctx, execCfg) case jobspb.StreamReplicationProgress_FINISHED_UNSUCCESSFULLY: - return j.Update(ctx, nil, func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + return j.NoTxn().Update(ctx, func(txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { ju.UpdateStatus(jobs.StatusCancelRequested) return nil }) diff --git a/pkg/ccl/streamingccl/streamproducer/producer_job_test.go b/pkg/ccl/streamingccl/streamproducer/producer_job_test.go index 2c8750cfb1ab..46dc66d1b6c3 100644 --- a/pkg/ccl/streamingccl/streamproducer/producer_job_test.go +++ b/pkg/ccl/streamingccl/streamproducer/producer_job_test.go @@ -18,13 +18,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/jobs/jobsprotectedts" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/repstream/streampb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/distsql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" @@ -155,13 +155,18 @@ func TestStreamReplicationProducerJob(t *testing.T) { expirationTime := func(record jobs.Record) time.Time { return record.Progress.(jobspb.StreamReplicationProgress).Expiration } + insqlDB := source.InternalDB().(isql.DB) runJobWithProtectedTimestamp := func(ptsID uuid.UUID, ts hlc.Timestamp, jr jobs.Record) error { - return source.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + return insqlDB.Txn(ctx, func( + ctx context.Context, txn isql.Txn, + ) error { deprecatedTenantSpan := roachpb.Spans{*makeTenantSpan(30)} tenantTarget := ptpb.MakeTenantsTarget([]roachpb.TenantID{roachpb.MustMakeTenantID(30)}) - if err := ptp.Protect(ctx, txn, - jobsprotectedts.MakeRecord(ptsID, int64(jr.JobID), ts, - deprecatedTenantSpan, jobsprotectedts.Jobs, tenantTarget)); err != nil { + record := jobsprotectedts.MakeRecord( + ptsID, int64(jr.JobID), ts, deprecatedTenantSpan, + jobsprotectedts.Jobs, tenantTarget, + ) + if err := ptp.WithTxn(txn).Protect(ctx, record); err != nil { return err } _, err := registry.CreateAdoptableJobWithTxn(ctx, jr, jr.JobID, txn) @@ -169,8 +174,8 @@ func TestStreamReplicationProducerJob(t *testing.T) { }) } getPTSRecord := func(ptsID uuid.UUID) (r *ptpb.Record, err error) { - err = source.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - r, err = ptp.GetRecord(ctx, txn, ptsID) + err = insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + r, err = ptp.WithTxn(txn).GetRecord(ctx, ptsID) return err }) return r, err @@ -199,7 +204,7 @@ func TestStreamReplicationProducerJob(t *testing.T) { require.True(t, testutils.IsError(err, "protected timestamp record does not exist"), err) var status streampb.StreamReplicationStatus - require.NoError(t, source.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { status, err = updateReplicationStreamProgress( ctx, timeutil.Now(), ptp, registry, streampb.StreamID(jr.JobID), hlc.Timestamp{WallTime: timeutil.Now().UnixNano()}, txn) @@ -235,7 +240,7 @@ func TestStreamReplicationProducerJob(t *testing.T) { var streamStatus streampb.StreamReplicationStatus var err error expire := expirationTime(jr).Add(10 * time.Millisecond) - require.NoError(t, source.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { streamStatus, err = updateReplicationStreamProgress( ctx, expire, ptp, registry, streampb.StreamID(jr.JobID), updatedFrontier, txn) return err diff --git a/pkg/ccl/streamingccl/streamproducer/replication_manager.go b/pkg/ccl/streamingccl/streamproducer/replication_manager.go index ebc4f0ccc402..d003f6f176e5 100644 --- a/pkg/ccl/streamingccl/streamproducer/replication_manager.go +++ b/pkg/ccl/streamingccl/streamproducer/replication_manager.go @@ -12,11 +12,11 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/repstream" "github.com/cockroachdb/cockroach/pkg/repstream/streampb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" @@ -25,7 +25,7 @@ import ( type replicationStreamManagerImpl struct { evalCtx *eval.Context - txn *kv.Txn + txn isql.Txn } // StartReplicationStream implements streaming.ReplicationStreamManager interface. @@ -64,7 +64,7 @@ func (r *replicationStreamManagerImpl) CompleteReplicationStream( } func newReplicationStreamManagerWithPrivilegesCheck( - ctx context.Context, evalCtx *eval.Context, txn *kv.Txn, + ctx context.Context, evalCtx *eval.Context, txn isql.Txn, ) (eval.ReplicationStreamManager, error) { isAdmin, err := evalCtx.SessionAccessor.HasAdminRole(ctx) if err != nil { diff --git a/pkg/ccl/streamingccl/streamproducer/replication_manager_test.go b/pkg/ccl/streamingccl/streamproducer/replication_manager_test.go index 2090ae5cdc1c..19c4d5ffeeb1 100644 --- a/pkg/ccl/streamingccl/streamproducer/replication_manager_test.go +++ b/pkg/ccl/streamingccl/streamproducer/replication_manager_test.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -49,9 +50,15 @@ func TestReplicationManagerRequiresAdminRole(t *testing.T) { execCfg := s.ExecutorConfig().(sql.ExecutorConfig) txn := kvDB.NewTxn(ctx, "test") p, cleanup := sql.NewInternalPlanner("test", txn, sqlUser, &sql.MemoryMetrics{}, &execCfg, sessionData) + + // Extract + pi := p.(interface { + EvalContext() *eval.Context + InternalSQLTxn() descs.Txn + }) defer cleanup() - ec := p.(interface{ EvalContext() *eval.Context }).EvalContext() - return newReplicationStreamManagerWithPrivilegesCheck(ctx, ec, txn) + ec := pi.EvalContext() + return newReplicationStreamManagerWithPrivilegesCheck(ctx, ec, pi.InternalSQLTxn()) } for _, tc := range []struct { diff --git a/pkg/ccl/streamingccl/streamproducer/replication_stream_test.go b/pkg/ccl/streamingccl/streamproducer/replication_stream_test.go index 3ff032a29100..8314de7806f8 100644 --- a/pkg/ccl/streamingccl/streamproducer/replication_stream_test.go +++ b/pkg/ccl/streamingccl/streamproducer/replication_stream_test.go @@ -33,6 +33,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" "github.com/cockroachdb/cockroach/pkg/sql/distsql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils/jobutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" @@ -610,9 +611,9 @@ func TestCompleteStreamReplication(t *testing.T) { pj, err := jr.LoadJob(ctx, jobspb.JobID(streamID)) require.NoError(t, err) payload := pj.Payload() - require.ErrorIs(t, h.SysServer.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - ptp := h.SysServer.DistSQLServer().(*distsql.ServerImpl).ServerConfig.ProtectedTimestampProvider - _, err = ptp.GetRecord(ctx, txn, payload.GetStreamReplication().ProtectedTimestampRecordID) + ptp := h.SysServer.DistSQLServer().(*distsql.ServerImpl).ServerConfig.ProtectedTimestampProvider + require.ErrorIs(t, h.SysServer.InternalDB().(isql.DB).Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + _, err = ptp.WithTxn(txn).GetRecord(ctx, payload.GetStreamReplication().ProtectedTimestampRecordID) return err }), protectedts.ErrNotExists) } diff --git a/pkg/ccl/streamingccl/streamproducer/stream_lifetime.go b/pkg/ccl/streamingccl/streamproducer/stream_lifetime.go index 4b8bcfcbc159..c4c1fd137779 100644 --- a/pkg/ccl/streamingccl/streamproducer/stream_lifetime.go +++ b/pkg/ccl/streamingccl/streamproducer/stream_lifetime.go @@ -16,13 +16,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/jobs/jobsprotectedts" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/repstream/streampb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -37,7 +37,7 @@ import ( // 1. Tracks the liveness of the replication stream consumption. // 2. Updates the protected timestamp for spans being replicated. func startReplicationProducerJob( - ctx context.Context, evalCtx *eval.Context, txn *kv.Txn, tenantName roachpb.TenantName, + ctx context.Context, evalCtx *eval.Context, txn isql.Txn, tenantName roachpb.TenantName, ) (streampb.ReplicationProducerSpec, error) { execConfig := evalCtx.Planner.ExecutorConfig().(*sql.ExecutorConfig) hasAdminRole, err := evalCtx.SessionAccessor.HasAdminRole(ctx) @@ -53,7 +53,7 @@ func startReplicationProducerJob( return streampb.ReplicationProducerSpec{}, errors.Errorf("kv.rangefeed.enabled must be true to start a replication job") } - tenantRecord, err := sql.GetTenantRecordByName(ctx, evalCtx.Planner.ExecutorConfig().(*sql.ExecutorConfig), txn, tenantName) + tenantRecord, err := sql.GetTenantRecordByName(ctx, evalCtx.Settings, txn, tenantName) if err != nil { return streampb.ReplicationProducerSpec{}, err } @@ -68,7 +68,7 @@ func startReplicationProducerJob( return streampb.ReplicationProducerSpec{}, err } - ptp := execConfig.ProtectedTimestampProvider + ptp := execConfig.ProtectedTimestampProvider.WithTxn(txn) statementTime := hlc.Timestamp{ WallTime: evalCtx.GetStmtTimestamp().UnixNano(), } @@ -77,7 +77,7 @@ func startReplicationProducerJob( pts := jobsprotectedts.MakeRecord(ptsID, int64(jr.JobID), statementTime, deprecatedSpansToProtect, jobsprotectedts.Jobs, targetToProtect) - if err := ptp.Protect(ctx, txn, pts); err != nil { + if err := ptp.Protect(ctx, pts); err != nil { return streampb.ReplicationProducerSpec{}, err } return streampb.ReplicationProducerSpec{ @@ -110,15 +110,22 @@ func convertProducerJobStatusToStreamStatus( func updateReplicationStreamProgress( ctx context.Context, expiration time.Time, - ptsProvider protectedts.Provider, + ptsProvider protectedts.Manager, registry *jobs.Registry, streamID streampb.StreamID, consumedTime hlc.Timestamp, - txn *kv.Txn, + txn isql.Txn, ) (status streampb.StreamReplicationStatus, err error) { - const useReadLock = false - err = registry.UpdateJobWithTxn(ctx, jobspb.JobID(streamID), txn, useReadLock, - func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + updateJob := func() (streampb.StreamReplicationStatus, error) { + j, err := registry.LoadJobWithTxn(ctx, jobspb.JobID(streamID), txn) + if err != nil { + return status, err + } + if err := j.WithTxn(txn).Update(ctx, func( + txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, + ) error { + status = streampb.StreamReplicationStatus{} + pts := ptsProvider.WithTxn(txn) status.StreamStatus = convertProducerJobStatusToStreamStatus(md.Status) // Skip checking PTS record in cases that it might already be released if status.StreamStatus != streampb.StreamReplicationStatus_STREAM_ACTIVE && @@ -127,7 +134,7 @@ func updateReplicationStreamProgress( } ptsID := md.Payload.GetStreamReplication().ProtectedTimestampRecordID - ptsRecord, err := ptsProvider.GetRecord(ctx, txn, ptsID) + ptsRecord, err := pts.GetRecord(ctx, ptsID) if err != nil { return err } @@ -145,7 +152,7 @@ func updateReplicationStreamProgress( // ingestion using the previous ingestion high watermark, it can fall behind the // source cluster protected timestamp. if shouldUpdatePTS := ptsRecord.Timestamp.Less(consumedTime); shouldUpdatePTS { - if err = ptsProvider.UpdateTimestamp(ctx, txn, ptsID, consumedTime); err != nil { + if err = pts.UpdateTimestamp(ctx, ptsID, consumedTime); err != nil { return err } status.ProtectedTimestamp = &consumedTime @@ -154,13 +161,17 @@ func updateReplicationStreamProgress( md.Progress.GetStreamReplication().Expiration = expiration ju.UpdateProgress(md.Progress) return nil - }) + }); err != nil { + return streampb.StreamReplicationStatus{}, err + } + return status, nil + } + status, err = updateJob() if jobs.HasJobNotFoundError(err) || testutils.IsError(err, "not found in system.jobs table") { status.StreamStatus = streampb.StreamReplicationStatus_STREAM_INACTIVE err = nil } - return status, err } @@ -170,7 +181,7 @@ func updateReplicationStreamProgress( func heartbeatReplicationStream( ctx context.Context, evalCtx *eval.Context, - txn *kv.Txn, + txn isql.Txn, streamID streampb.StreamID, frontier hlc.Timestamp, ) (streampb.StreamReplicationStatus, error) { @@ -191,8 +202,9 @@ func heartbeatReplicationStream( } status.StreamStatus = convertProducerJobStatusToStreamStatus(pj.Status()) payload := pj.Payload() - ptsRecord, err := execConfig.ProtectedTimestampProvider.GetRecord(ctx, txn, - payload.GetStreamReplication().ProtectedTimestampRecordID) + ptsRecord, err := execConfig.ProtectedTimestampProvider.WithTxn(txn).GetRecord( + ctx, payload.GetStreamReplication().ProtectedTimestampRecordID, + ) // Nil protected timestamp indicates it was not created or has been released. if errors.Is(err, protectedts.ErrNotExists) { return status, nil @@ -224,10 +236,9 @@ func getReplicationStreamSpec( } // Partition the spans with SQLPlanner - var noTxn *kv.Txn dsp := jobExecCtx.DistSQLPlanner() planCtx := dsp.NewPlanningCtx(ctx, jobExecCtx.ExtendedEvalContext(), - nil /* planner */, noTxn, sql.DistributionTypeSystemTenantOnly) + nil /* planner */, nil /* txn */, sql.DistributionTypeSystemTenantOnly) details, ok := j.Details().(jobspb.StreamReplicationDetails) if !ok { @@ -270,32 +281,37 @@ func getReplicationStreamSpec( func completeReplicationStream( ctx context.Context, evalCtx *eval.Context, - txn *kv.Txn, + txn isql.Txn, streamID streampb.StreamID, successfulIngestion bool, ) error { - registry := evalCtx.Planner.ExecutorConfig().(*sql.ExecutorConfig).JobRegistry - const useReadLock = false - return registry.UpdateJobWithTxn(ctx, jobspb.JobID(streamID), txn, useReadLock, - func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { - // Updates the stream ingestion status, make the job resumer exit running - // when picking up the new status. - if (md.Status == jobs.StatusRunning || md.Status == jobs.StatusPending) && - md.Progress.GetStreamReplication().StreamIngestionStatus == - jobspb.StreamReplicationProgress_NOT_FINISHED { - if successfulIngestion { - md.Progress.GetStreamReplication().StreamIngestionStatus = - jobspb.StreamReplicationProgress_FINISHED_SUCCESSFULLY - md.Progress.RunningStatus = "succeeding this producer job as the corresponding " + - "stream ingestion finished successfully" - } else { - md.Progress.GetStreamReplication().StreamIngestionStatus = - jobspb.StreamReplicationProgress_FINISHED_UNSUCCESSFULLY - md.Progress.RunningStatus = "canceling this producer job as the corresponding " + - "stream ingestion did not finish successfully" - } - ju.UpdateProgress(md.Progress) + jobExecCtx := evalCtx.JobExecContext.(sql.JobExecContext) + registry := jobExecCtx.ExecCfg().JobRegistry + j, err := registry.LoadJobWithTxn(ctx, jobspb.JobID(streamID), txn) + if err != nil { + return err + } + return j.WithTxn(txn).Update(ctx, func( + txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, + ) error { + // Updates the stream ingestion status, make the job resumer exit running + // when picking up the new status. + if (md.Status == jobs.StatusRunning || md.Status == jobs.StatusPending) && + md.Progress.GetStreamReplication().StreamIngestionStatus == + jobspb.StreamReplicationProgress_NOT_FINISHED { + if successfulIngestion { + md.Progress.GetStreamReplication().StreamIngestionStatus = + jobspb.StreamReplicationProgress_FINISHED_SUCCESSFULLY + md.Progress.RunningStatus = "succeeding this producer job as the corresponding " + + "stream ingestion finished successfully" + } else { + md.Progress.GetStreamReplication().StreamIngestionStatus = + jobspb.StreamReplicationProgress_FINISHED_UNSUCCESSFULLY + md.Progress.RunningStatus = "canceling this producer job as the corresponding " + + "stream ingestion did not finish successfully" } - return nil - }) + ju.UpdateProgress(md.Progress) + } + return nil + }) } diff --git a/pkg/ccl/testccl/sqlccl/BUILD.bazel b/pkg/ccl/testccl/sqlccl/BUILD.bazel index 37167f76671f..c1b2d2096c5b 100644 --- a/pkg/ccl/testccl/sqlccl/BUILD.bazel +++ b/pkg/ccl/testccl/sqlccl/BUILD.bazel @@ -21,7 +21,6 @@ go_test( "//pkg/jobs/jobspb", "//pkg/jobs/jobsprotectedts", "//pkg/keys", - "//pkg/kv", "//pkg/kv/kvserver/protectedts/ptpb", "//pkg/roachpb", "//pkg/security/securityassets", @@ -34,6 +33,7 @@ go_test( "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/desctestutils", "//pkg/sql/gcjob", + "//pkg/sql/isql", "//pkg/sql/sessiondatapb", "//pkg/sql/sqlliveness/slinstance", "//pkg/sql/sqltestutils", diff --git a/pkg/ccl/testccl/sqlccl/tenant_gc_test.go b/pkg/ccl/testccl/sqlccl/tenant_gc_test.go index 42573bc7a6f8..4f5e0c69dae8 100644 --- a/pkg/ccl/testccl/sqlccl/tenant_gc_test.go +++ b/pkg/ccl/testccl/sqlccl/tenant_gc_test.go @@ -20,7 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/jobs/jobsprotectedts" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" @@ -29,6 +28,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" "github.com/cockroachdb/cockroach/pkg/sql/gcjob" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" @@ -114,10 +114,15 @@ func TestGCTenantRemovesSpanConfigs(t *testing.T) { beforeDelete := len(records) // Mark the tenant as dropped by updating its record. - require.NoError(t, sql.TestingUpdateTenantRecord( - ctx, &execCfg, nil, /* txn */ - &descpb.TenantInfo{ID: tenantID.ToUint64(), State: descpb.TenantInfo_DROP}, - )) + + require.NoError(t, ts.InternalDB().(isql.DB).Txn(ctx, func( + ctx context.Context, txn isql.Txn, + ) error { + return sql.TestingUpdateTenantRecord( + ctx, ts.ClusterSettings(), txn, + &descpb.TenantInfo{ID: tenantID.ToUint64(), State: descpb.TenantInfo_DROP}, + ) + })) // Run GC on the tenant. progress := &jobspb.SchemaChangeGCProgress{ @@ -396,7 +401,9 @@ func TestGCTableOrIndexWaitsForProtectedTimestamps(t *testing.T) { mu.Lock() mu.jobID = jobID mu.Unlock() - sj, err := jobs.TestingCreateAndStartJob(ctx, registry, execCfg.DB, record, jobs.WithJobID(jobID)) + sj, err := jobs.TestingCreateAndStartJob( + ctx, registry, execCfg.InternalDB, record, jobs.WithJobID(jobID), + ) require.NoError(t, err) ensureGCBlockedByPTS(t, registry, sj) @@ -463,7 +470,7 @@ func TestGCTenantJobWaitsForProtectedTimestamps(t *testing.T) { JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), }, } - srv, sqlDBRaw, kvDB := serverutils.StartServer(t, args) + srv, sqlDBRaw, _ := serverutils.StartServer(t, args) sqlDB := sqlutils.MakeSQLRunner(sqlDBRaw) sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms'") @@ -472,13 +479,14 @@ func TestGCTenantJobWaitsForProtectedTimestamps(t *testing.T) { jobRegistry := execCfg.JobRegistry defer srv.Stopper().Stop(ctx) + insqlDB := execCfg.InternalDB ptp := execCfg.ProtectedTimestampProvider mkRecordAndProtect := func(ts hlc.Timestamp, target *ptpb.Target) *ptpb.Record { recordID := uuid.MakeV4() rec := jobsprotectedts.MakeRecord(recordID, int64(1), ts, nil, /* deprecatedSpans */ jobsprotectedts.Jobs, target) - require.NoError(t, execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return ptp.Protect(ctx, txn, rec) + require.NoError(t, insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return ptp.WithTxn(txn).Protect(ctx, rec) })) return rec } @@ -515,7 +523,10 @@ func TestGCTenantJobWaitsForProtectedTimestamps(t *testing.T) { job, err := jobRegistry.LoadJob(ctx, sj.ID()) require.NoError(t, err) require.Equal(t, jobs.StatusSucceeded, job.Status()) - _, err = sql.GetTenantRecordByID(ctx, &execCfg, nil /* txn */, tenID) + err = insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + _, err = sql.GetTenantRecordByID(ctx, txn, tenID) + return err + }) require.EqualError(t, err, fmt.Sprintf(`tenant "%d" does not exist`, tenID.ToUint64())) progress := job.Progress() require.Equal(t, jobspb.SchemaChangeGCProgress_CLEARED, progress.GetSchemaChangeGC().Tenant.Status) @@ -538,14 +549,14 @@ func TestGCTenantJobWaitsForProtectedTimestamps(t *testing.T) { tenantTarget := ptpb.MakeTenantsTarget([]roachpb.TenantID{roachpb.MustMakeTenantID(tenID)}) rec := mkRecordAndProtect(hlc.Timestamp{WallTime: int64(dropTime - 1)}, tenantTarget) - sj, err := jobs.TestingCreateAndStartJob(ctx, jobRegistry, kvDB, record) + sj, err := jobs.TestingCreateAndStartJob(ctx, jobRegistry, insqlDB, record) require.NoError(t, err) checkGCBlockedByPTS(t, sj, tenID) // Release the record. - require.NoError(t, execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - require.NoError(t, ptp.Release(ctx, txn, rec.ID.GetUUID())) + require.NoError(t, insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + require.NoError(t, ptp.WithTxn(txn).Release(ctx, rec.ID.GetUUID())) return nil })) @@ -574,15 +585,16 @@ func TestGCTenantJobWaitsForProtectedTimestamps(t *testing.T) { tenantTarget := ptpb.MakeTenantsTarget([]roachpb.TenantID{roachpb.MustMakeTenantID(tenID)}) tenantRec := mkRecordAndProtect(hlc.Timestamp{WallTime: int64(dropTime)}, tenantTarget) - sj, err := jobs.TestingCreateAndStartJob(ctx, jobRegistry, kvDB, record) + sj, err := jobs.TestingCreateAndStartJob(ctx, jobRegistry, insqlDB, record) require.NoError(t, err) checkTenantGCed(t, sj, roachpb.MustMakeTenantID(tenID)) // Cleanup. - require.NoError(t, execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - require.NoError(t, ptp.Release(ctx, txn, clusterRec.ID.GetUUID())) - require.NoError(t, ptp.Release(ctx, txn, tenantRec.ID.GetUUID())) + require.NoError(t, insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + ptps := ptp.WithTxn(txn) + require.NoError(t, ptps.Release(ctx, clusterRec.ID.GetUUID())) + require.NoError(t, ptps.Release(ctx, tenantRec.ID.GetUUID())) return nil })) }) @@ -602,8 +614,9 @@ func TestGCTenantJobWaitsForProtectedTimestamps(t *testing.T) { rec := jobsprotectedts.MakeRecord(recordID, int64(1), hlc.Timestamp{WallTime: timeutil.Now().UnixNano()}, nil, /* deprecatedSpans */ jobsprotectedts.Jobs, clusterTarget) - require.NoError(t, execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return tenPtp.Protect(ctx, txn, rec) + tenInsqlDB := ten.ExecutorConfig().(sql.ExecutorConfig).InternalDB + require.NoError(t, tenInsqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return tenPtp.WithTxn(txn).Protect(ctx, rec) })) sqlDB.Exec(t, `DROP TENANT [$1]`, tenID.ToUint64()) @@ -613,7 +626,10 @@ func TestGCTenantJobWaitsForProtectedTimestamps(t *testing.T) { "SELECT status FROM [SHOW JOBS] WHERE description = 'GC for tenant 10'", [][]string{{"succeeded"}}, ) - _, err := sql.GetTenantRecordByID(ctx, &execCfg, nil /* txn */, tenID) + err := insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + _, err := sql.GetTenantRecordByID(ctx, txn, tenID) + return err + }) require.EqualError(t, err, `tenant "10" does not exist`) // PTS record protecting system tenant cluster should block tenant GC. @@ -633,14 +649,14 @@ func TestGCTenantJobWaitsForProtectedTimestamps(t *testing.T) { clusterTarget := ptpb.MakeClusterTarget() rec := mkRecordAndProtect(hlc.Timestamp{WallTime: int64(dropTime - 1)}, clusterTarget) - sj, err := jobs.TestingCreateAndStartJob(ctx, jobRegistry, kvDB, record) + sj, err := jobs.TestingCreateAndStartJob(ctx, jobRegistry, insqlDB, record) require.NoError(t, err) checkGCBlockedByPTS(t, sj, tenID) // Release the record. - require.NoError(t, execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - require.NoError(t, ptp.Release(ctx, txn, rec.ID.GetUUID())) + require.NoError(t, insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + require.NoError(t, ptp.WithTxn(txn).Release(ctx, rec.ID.GetUUID())) return nil })) diff --git a/pkg/ccl/workloadccl/storage.go b/pkg/ccl/workloadccl/storage.go index 2c636335a54e..ca89503773e6 100644 --- a/pkg/ccl/workloadccl/storage.go +++ b/pkg/ccl/workloadccl/storage.go @@ -42,9 +42,7 @@ func GetStorage(ctx context.Context, cfg FixtureConfig) (cloud.ExternalStorage, clustersettings.MakeClusterSettings(), nil, /* blobClientFactory */ username.SQLUsername{}, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ nil, /* limiters */ cloud.NilMetrics, /* metrics */ ) diff --git a/pkg/cli/BUILD.bazel b/pkg/cli/BUILD.bazel index e5a34c2d399b..20de29be69d2 100644 --- a/pkg/cli/BUILD.bazel +++ b/pkg/cli/BUILD.bazel @@ -376,6 +376,7 @@ go_test( "//pkg/settings/cluster", "//pkg/sql", "//pkg/sql/catalog/descpb", + "//pkg/sql/isql", "//pkg/sql/protoreflect", "//pkg/sql/tests", "//pkg/storage", diff --git a/pkg/cli/debug_job_trace_test.go b/pkg/cli/debug_job_trace_test.go index a644adaf9576..86cee3db0155 100644 --- a/pkg/cli/debug_job_trace_test.go +++ b/pkg/cli/debug_job_trace_test.go @@ -24,10 +24,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -103,7 +103,9 @@ func TestDebugJobTrace(t *testing.T) { // to inject our traceSpanResumer. var job *jobs.StartableJob id := registry.MakeJobID() - require.NoError(t, c.TestServer.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + require.NoError(t, c.TestServer.InternalDB().(isql.DB).Txn(ctx, func( + ctx context.Context, txn isql.Txn, + ) (err error) { err = registry.CreateStartableJobWithTxn(ctx, &job, id, txn, jobs.Record{ Username: username.RootUserName(), Details: jobspb.BackupDetails{}, diff --git a/pkg/cli/democluster/demo_cluster.go b/pkg/cli/democluster/demo_cluster.go index 6bc47eb13961..c29523f22ba9 100644 --- a/pkg/cli/democluster/demo_cluster.go +++ b/pkg/cli/democluster/demo_cluster.go @@ -493,7 +493,7 @@ func (c *transientCluster) Start(ctx context.Context) (err error) { // Also create the user/password for the secondary tenant. ts := c.tenantServers[0] tctx := ts.AnnotateCtx(ctx) - ieTenant := ts.DistSQLServer().(*distsql.ServerImpl).ServerConfig.Executor + ieTenant := ts.DistSQLServer().(*distsql.ServerImpl).ServerConfig.DB.Executor() _, err = ieTenant.Exec(tctx, "tenant-password", nil, fmt.Sprintf("CREATE USER %s WITH PASSWORD '%s'", demoUsername, demoPassword)) if err != nil { @@ -507,7 +507,7 @@ func (c *transientCluster) Start(ctx context.Context) (err error) { if c.demoCtx.Multitenant && !c.demoCtx.DisableServerController { // Select the default tenant. - ie := c.firstServer.DistSQLServer().(*distsql.ServerImpl).ServerConfig.Executor + ie := c.firstServer.DistSQLServer().(*distsql.ServerImpl).ServerConfig.DB.Executor() // Choose the tenant to use when no tenant is specified on a // connection or web URL. if _, err := ie.Exec(ctx, "default-tenant", nil, diff --git a/pkg/cloud/BUILD.bazel b/pkg/cloud/BUILD.bazel index 8f57f1f8691d..7b4f742b27e4 100644 --- a/pkg/cloud/BUILD.bazel +++ b/pkg/cloud/BUILD.bazel @@ -19,11 +19,10 @@ go_library( "//pkg/base", "//pkg/blobs", "//pkg/cloud/cloudpb", - "//pkg/kv", "//pkg/security/username", "//pkg/settings", "//pkg/settings/cluster", - "//pkg/sql/sqlutil", + "//pkg/sql/isql", "//pkg/util/ctxgroup", "//pkg/util/ioctx", "//pkg/util/log", diff --git a/pkg/cloud/amazon/s3_storage_test.go b/pkg/cloud/amazon/s3_storage_test.go index 1a2be44d4bb9..e297d525f65f 100644 --- a/pkg/cloud/amazon/s3_storage_test.go +++ b/pkg/cloud/amazon/s3_storage_test.go @@ -47,9 +47,7 @@ func makeS3Storage( clientFactory := blobs.TestBlobServiceClient(testSettings.ExternalIODir) s, err := cloud.MakeExternalStorage(ctx, conf, base.ExternalIODirConfig{}, testSettings, clientFactory, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ nil, /* limiters */ cloud.NilMetrics, ) @@ -113,26 +111,19 @@ func TestPutS3(t *testing.T) { bucket, "backup-test-default", cloud.AuthParam, cloud.AuthParamImplicit, ), false, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings) }) t.Run("auth-specified", func(t *testing.T) { uri := S3URI(bucket, "backup-test", &cloudpb.ExternalStorage_S3{AccessKey: creds.AccessKeyID, Secret: creds.SecretAccessKey, Region: "us-east-1"}, ) - cloudtestutils.CheckExportStore(t, uri, false, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ - testSettings, + cloudtestutils.CheckExportStore( + t, uri, false, user, nil /* db */, testSettings, + ) + cloudtestutils.CheckListFiles( + t, uri, user, nil /* db */, testSettings, ) - cloudtestutils.CheckListFiles(t, uri, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ - testSettings) }) // Tests that we can put an object with server side encryption specified. @@ -156,9 +147,7 @@ func TestPutS3(t *testing.T) { ), false, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings, ) @@ -174,9 +163,7 @@ func TestPutS3(t *testing.T) { ), false, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings) }) @@ -246,17 +233,11 @@ func TestPutS3AssumeRole(t *testing.T) { uri := S3URI(bucket, "backup-test", &cloudpb.ExternalStorage_S3{Auth: cloud.AuthParamImplicit, RoleARN: roleArn, Region: "us-east-1"}, ) - cloudtestutils.CheckExportStore(t, uri, false, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ - testSettings, + cloudtestutils.CheckExportStore( + t, uri, false, user, nil /* db */, testSettings, ) - cloudtestutils.CheckListFiles(t, uri, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ - testSettings, + cloudtestutils.CheckListFiles( + t, uri, user, nil /* db */, testSettings, ) }) @@ -264,17 +245,11 @@ func TestPutS3AssumeRole(t *testing.T) { uri := S3URI(bucket, "backup-test", &cloudpb.ExternalStorage_S3{Auth: cloud.AuthParamSpecified, RoleARN: roleArn, AccessKey: creds.AccessKeyID, Secret: creds.SecretAccessKey, Region: "us-east-1"}, ) - cloudtestutils.CheckExportStore(t, uri, false, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ - testSettings, + cloudtestutils.CheckExportStore( + t, uri, false, user, nil /* db */, testSettings, ) - cloudtestutils.CheckListFiles(t, uri, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ - testSettings, + cloudtestutils.CheckListFiles( + t, uri, user, nil /* db */, testSettings, ) }) @@ -306,11 +281,8 @@ func TestPutS3AssumeRole(t *testing.T) { Region: "us-east-1", }, ) - cloudtestutils.CheckNoPermission(t, roleURI, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ - testSettings, + cloudtestutils.CheckNoPermission( + t, roleURI, user, nil /* db */, testSettings, ) } @@ -332,11 +304,8 @@ func TestPutS3AssumeRole(t *testing.T) { Region: "us-east-1", }, ) - cloudtestutils.CheckNoPermission(t, uri, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ - testSettings, + cloudtestutils.CheckNoPermission( + t, uri, user, nil /* db */, testSettings, ) // Finally, check that the chain of roles can be used to access the storage. @@ -351,11 +320,8 @@ func TestPutS3AssumeRole(t *testing.T) { }, ) - cloudtestutils.CheckExportStore(t, uri, false, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ - testSettings, + cloudtestutils.CheckExportStore( + t, uri, false, user, nil /* db */, testSettings, ) }) } @@ -394,11 +360,8 @@ func TestPutS3Endpoint(t *testing.T) { testSettings := cluster.MakeTestingClusterSettings() - cloudtestutils.CheckExportStore(t, u.String(), false, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ - testSettings, + cloudtestutils.CheckExportStore( + t, u.String(), false, user, nil /* db */, testSettings, ) } @@ -471,9 +434,7 @@ func TestS3BucketDoesNotExist(t *testing.T) { clientFactory := blobs.TestBlobServiceClient(testSettings.ExternalIODir) s, err := cloud.MakeExternalStorage(ctx, conf, base.ExternalIODirConfig{}, testSettings, clientFactory, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ nil, /* limiters */ cloud.NilMetrics, ) diff --git a/pkg/cloud/azure/azure_storage_test.go b/pkg/cloud/azure/azure_storage_test.go index 722fd8542e2e..4ed048b27d8d 100644 --- a/pkg/cloud/azure/azure_storage_test.go +++ b/pkg/cloud/azure/azure_storage_test.go @@ -74,15 +74,12 @@ func TestAzure(t *testing.T) { testSettings := cluster.MakeTestingClusterSettings() cloudtestutils.CheckExportStore(t, cfg.filePath("backup-test"), false, username.RootUserName(), - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings, ) - cloudtestutils.CheckListFiles(t, cfg.filePath("listing-test"), username.RootUserName(), - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + cloudtestutils.CheckListFiles(t, cfg.filePath("listing-test"), + username.RootUserName(), + nil, /* db */ testSettings, ) } diff --git a/pkg/cloud/cloudtestutils/BUILD.bazel b/pkg/cloud/cloudtestutils/BUILD.bazel index db24a23ab80b..615fac833bfd 100644 --- a/pkg/cloud/cloudtestutils/BUILD.bazel +++ b/pkg/cloud/cloudtestutils/BUILD.bazel @@ -11,10 +11,9 @@ go_library( "//pkg/blobs", "//pkg/cloud", "//pkg/cloud/cloudpb", - "//pkg/kv", "//pkg/security/username", "//pkg/settings/cluster", - "//pkg/sql/sqlutil", + "//pkg/sql/isql", "//pkg/util/ioctx", "//pkg/util/randutil", "//pkg/util/sysutil", diff --git a/pkg/cloud/cloudtestutils/cloud_test_helpers.go b/pkg/cloud/cloudtestutils/cloud_test_helpers.go index 727c277d2382..89142c0e15dd 100644 --- a/pkg/cloud/cloudtestutils/cloud_test_helpers.go +++ b/pkg/cloud/cloudtestutils/cloud_test_helpers.go @@ -29,10 +29,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/blobs" "github.com/cockroachdb/cockroach/pkg/cloud" "github.com/cockroachdb/cockroach/pkg/cloud/cloudpb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/ioctx" "github.com/cockroachdb/cockroach/pkg/util/randutil" "github.com/cockroachdb/cockroach/pkg/util/sysutil" @@ -107,9 +106,7 @@ func storeFromURI( uri string, clientFactory blobs.BlobClientFactory, user username.SQLUsername, - ie sqlutil.InternalExecutor, - ief sqlutil.InternalExecutorFactory, - kvDB *kv.DB, + db isql.DB, testSettings *cluster.Settings, ) cloud.ExternalStorage { conf, err := cloud.ExternalStorageConfFromURI(uri, user) @@ -118,7 +115,7 @@ func storeFromURI( } // Setup a sink for the given args. s, err := cloud.MakeExternalStorage(ctx, conf, base.ExternalIODirConfig{}, testSettings, - clientFactory, ie, ief, kvDB, nil, cloud.NilMetrics) + clientFactory, db, nil, cloud.NilMetrics) if err != nil { t.Fatal(err) } @@ -131,9 +128,7 @@ func CheckExportStore( storeURI string, skipSingleFile bool, user username.SQLUsername, - ie sqlutil.InternalExecutor, - ief sqlutil.InternalExecutorFactory, - kvDB *kv.DB, + db isql.DB, testSettings *cluster.Settings, ) { ioConf := base.ExternalIODirConfig{} @@ -147,7 +142,7 @@ func CheckExportStore( // Setup a sink for the given args. clientFactory := blobs.TestBlobServiceClient(testSettings.ExternalIODir) s, err := cloud.MakeExternalStorage(ctx, conf, ioConf, testSettings, clientFactory, - ie, ief, kvDB, nil, cloud.NilMetrics) + db, nil, cloud.NilMetrics) if err != nil { t.Fatal(err) } @@ -255,7 +250,7 @@ func CheckExportStore( t.Fatal(err) } singleFile := storeFromURI(ctx, t, appendPath(t, storeURI, testingFilename), clientFactory, - user, ie, ief, kvDB, testSettings) + user, db, testSettings) defer singleFile.Close() res, err := singleFile.ReadFile(ctx, "") @@ -276,7 +271,7 @@ func CheckExportStore( t.Run("write-single-file-by-uri", func(t *testing.T) { const testingFilename = "B" singleFile := storeFromURI(ctx, t, appendPath(t, storeURI, testingFilename), clientFactory, - user, ie, ief, kvDB, testSettings) + user, db, testSettings) defer singleFile.Close() if err := cloud.WriteFile(ctx, singleFile, "", bytes.NewReader([]byte("bbb"))); err != nil { @@ -307,7 +302,7 @@ func CheckExportStore( if err := cloud.WriteFile(ctx, s, testingFilename, bytes.NewReader([]byte("aaa"))); err != nil { t.Fatal(err) } - singleFile := storeFromURI(ctx, t, storeURI, clientFactory, user, ie, ief, kvDB, testSettings) + singleFile := storeFromURI(ctx, t, storeURI, clientFactory, user, db, testSettings) defer singleFile.Close() // Read a valid file. @@ -348,12 +343,10 @@ func CheckListFiles( t *testing.T, storeURI string, user username.SQLUsername, - ie sqlutil.InternalExecutor, - ief sqlutil.InternalExecutorFactory, - kvDB *kv.DB, + db isql.DB, testSettings *cluster.Settings, ) { - CheckListFilesCanonical(t, storeURI, "", user, ie, ief, kvDB, testSettings) + CheckListFilesCanonical(t, storeURI, "", user, db, testSettings) } // CheckListFilesCanonical is like CheckListFiles but takes a canonical prefix @@ -364,9 +357,7 @@ func CheckListFilesCanonical( storeURI string, canonical string, user username.SQLUsername, - ie sqlutil.InternalExecutor, - ief sqlutil.InternalExecutorFactory, - kvDB *kv.DB, + db isql.DB, testSettings *cluster.Settings, ) { ctx := context.Background() @@ -379,7 +370,7 @@ func CheckListFilesCanonical( clientFactory := blobs.TestBlobServiceClient(testSettings.ExternalIODir) for _, fileName := range fileNames { - file := storeFromURI(ctx, t, storeURI, clientFactory, user, ie, ief, kvDB, testSettings) + file := storeFromURI(ctx, t, storeURI, clientFactory, user, db, testSettings) if err := cloud.WriteFile(ctx, file, fileName, bytes.NewReader([]byte("bbb"))); err != nil { t.Fatal(err) } @@ -467,7 +458,7 @@ func CheckListFilesCanonical( }, } { t.Run(tc.name, func(t *testing.T) { - s := storeFromURI(ctx, t, tc.uri, clientFactory, user, ie, ief, kvDB, testSettings) + s := storeFromURI(ctx, t, tc.uri, clientFactory, user, db, testSettings) var actual []string require.NoError(t, s.List(ctx, tc.prefix, tc.delimiter, func(f string) error { actual = append(actual, f) @@ -480,7 +471,7 @@ func CheckListFilesCanonical( }) for _, fileName := range fileNames { - file := storeFromURI(ctx, t, storeURI, clientFactory, user, ie, ief, kvDB, testSettings) + file := storeFromURI(ctx, t, storeURI, clientFactory, user, db, testSettings) if err := file.Delete(ctx, fileName); err != nil { t.Fatal(err) } @@ -500,9 +491,7 @@ func uploadData( s, err := cloud.MakeExternalStorage(ctx, dest, base.ExternalIODirConfig{}, testSettings, nil, /* blobClientFactory */ - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ nil, /* limiters */ cloud.NilMetrics, ) @@ -546,9 +535,7 @@ func CheckAntagonisticRead( ctx := context.Background() s, err := cloud.MakeExternalStorage(ctx, conf, base.ExternalIODirConfig{}, testSettings, nil, /* blobClientFactory */ - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ nil, /* limiters */ cloud.NilMetrics, ) @@ -569,9 +556,7 @@ func CheckNoPermission( t *testing.T, storeURI string, user username.SQLUsername, - ie sqlutil.InternalExecutor, - ief sqlutil.InternalExecutorFactory, - kvDB *kv.DB, + db isql.DB, testSettings *cluster.Settings, ) { ioConf := base.ExternalIODirConfig{} @@ -583,8 +568,9 @@ func CheckNoPermission( } clientFactory := blobs.TestBlobServiceClient(testSettings.ExternalIODir) - s, err := cloud.MakeExternalStorage(ctx, conf, ioConf, testSettings, clientFactory, ie, ief, kvDB, - nil, cloud.NilMetrics) + s, err := cloud.MakeExternalStorage( + ctx, conf, ioConf, testSettings, clientFactory, db, nil, cloud.NilMetrics, + ) if err != nil { t.Fatal(err) } diff --git a/pkg/cloud/external_storage.go b/pkg/cloud/external_storage.go index cd1feb3851f3..239799cb9bb0 100644 --- a/pkg/cloud/external_storage.go +++ b/pkg/cloud/external_storage.go @@ -19,10 +19,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/blobs" "github.com/cockroachdb/cockroach/pkg/cloud/cloudpb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/ioctx" "github.com/cockroachdb/errors" ) @@ -149,15 +148,13 @@ type ExternalStorageURIParser func(ExternalStorageURIContext, *url.URL) (cloudpb // ExternalStorageContext contains the dependencies passed to external storage // implementations during creation. type ExternalStorageContext struct { - IOConf base.ExternalIODirConfig - Settings *cluster.Settings - BlobClientFactory blobs.BlobClientFactory - InternalExecutor sqlutil.InternalExecutor - InternalExecutorFactory sqlutil.InternalExecutorFactory - DB *kv.DB - Options []ExternalStorageOption - Limiters Limiters - MetricsRecorder MetricsRecorder + IOConf base.ExternalIODirConfig + Settings *cluster.Settings + BlobClientFactory blobs.BlobClientFactory + DB isql.DB + Options []ExternalStorageOption + Limiters Limiters + MetricsRecorder MetricsRecorder } // ExternalStorageOptions holds dependencies and values that can be diff --git a/pkg/cloud/externalconn/BUILD.bazel b/pkg/cloud/externalconn/BUILD.bazel index 0ad707343f15..da501f530e04 100644 --- a/pkg/cloud/externalconn/BUILD.bazel +++ b/pkg/cloud/externalconn/BUILD.bazel @@ -17,14 +17,13 @@ go_library( "//pkg/cloud", "//pkg/cloud/cloudpb", "//pkg/cloud/externalconn/connectionpb", - "//pkg/kv", "//pkg/security/username", "//pkg/sql/catalog/colinfo", + "//pkg/sql/isql", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/util/protoutil", "@com_github_cockroachdb_errors//:errors", ], diff --git a/pkg/cloud/externalconn/connection_kms.go b/pkg/cloud/externalconn/connection_kms.go index fb6247fc83eb..bc9b12ad0b49 100644 --- a/pkg/cloud/externalconn/connection_kms.go +++ b/pkg/cloud/externalconn/connection_kms.go @@ -16,7 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/cloud" "github.com/cockroachdb/cockroach/pkg/cloud/externalconn/connectionpb" - "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/errors" ) @@ -38,9 +38,9 @@ func makeExternalConnectionKMS( // Retrieve the external connection object from the system table. var ec ExternalConnection - if err := env.DBHandle().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := env.DBHandle().Txn(ctx, func(ctx context.Context, txn isql.Txn) error { var err error - ec, err = LoadExternalConnection(ctx, externalConnectionName, env.InternalExecutor(), txn) + ec, err = LoadExternalConnection(ctx, externalConnectionName, txn) return err }); err != nil { return nil, errors.Wrap(err, "failed to load external connection object") diff --git a/pkg/cloud/externalconn/connection_storage.go b/pkg/cloud/externalconn/connection_storage.go index 6d08c0f6d404..d4a408e1d123 100644 --- a/pkg/cloud/externalconn/connection_storage.go +++ b/pkg/cloud/externalconn/connection_storage.go @@ -18,8 +18,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/cloud" "github.com/cockroachdb/cockroach/pkg/cloud/cloudpb" "github.com/cockroachdb/cockroach/pkg/cloud/externalconn/connectionpb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/errors" ) @@ -66,9 +66,9 @@ func makeExternalConnectionStorage( // Retrieve the external connection object from the system table. var ec ExternalConnection - if err := args.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := args.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { var err error - ec, err = LoadExternalConnection(ctx, cfg.Name, args.InternalExecutor, txn) + ec, err = LoadExternalConnection(ctx, cfg.Name, txn) return err }); err != nil { return nil, errors.Wrap(err, "failed to load external connection object") @@ -92,7 +92,6 @@ func makeExternalConnectionStorage( uri.Path = path.Join(uri.Path, cfg.Path) return cloud.ExternalStorageFromURI(ctx, uri.String(), args.IOConf, args.Settings, args.BlobClientFactory, username.MakeSQLUsernameFromPreNormalizedString(cfg.User), - args.InternalExecutor, args.InternalExecutorFactory, args.DB, args.Limiters, args.MetricsRecorder.Metrics(), args.Options...) default: return nil, errors.Newf("cannot connect to %T; unsupported resource for an ExternalStorage connection", d) diff --git a/pkg/cloud/externalconn/record.go b/pkg/cloud/externalconn/record.go index fc7d9e31f2b2..387095e56a30 100644 --- a/pkg/cloud/externalconn/record.go +++ b/pkg/cloud/externalconn/record.go @@ -17,14 +17,13 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/cloud/externalconn/connectionpb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" ) @@ -90,12 +89,12 @@ func (e *externalConnectionNotFoundError) Error() string { // `system.external_connections` table and returns the read-only interface for // interacting with it. func LoadExternalConnection( - ctx context.Context, name string, ex sqlutil.InternalExecutor, txn *kv.Txn, + ctx context.Context, name string, txn isql.Txn, ) (ExternalConnection, error) { // Loading an External Connection is only allowed for users with the `USAGE` // privilege. We run the query as `node` since the user might not have // `SELECT` on the system table. - row, cols, err := ex.QueryRowExWithCols(ctx, "lookup-schedule", txn, + row, cols, err := txn.QueryRowExWithCols(ctx, "lookup-schedule", txn.KV(), sessiondata.NodeUserSessionDataOverride, fmt.Sprintf("SELECT * FROM system.external_connections WHERE connection_name = '%s'", name)) @@ -280,7 +279,7 @@ func generatePlaceholders(n int) string { // table. If an error is returned, it is callers responsibility to handle it // (e.g. rollback transaction). func (e *MutableExternalConnection) Create( - ctx context.Context, ex sqlutil.InternalExecutor, user username.SQLUsername, txn *kv.Txn, + ctx context.Context, txn isql.Txn, user username.SQLUsername, ) error { cols, qargs, err := e.marshalChanges() if err != nil { @@ -291,7 +290,7 @@ func (e *MutableExternalConnection) Create( // `EXTERNALCONNECTION` system privilege. We run the query as `node` // since the user might not have `INSERT` on the system table. createQuery := "INSERT INTO system.external_connections (%s) VALUES (%s) RETURNING connection_name" - row, retCols, err := ex.QueryRowExWithCols(ctx, "ExternalConnection.Create", txn, + row, retCols, err := txn.QueryRowExWithCols(ctx, "ExternalConnection.Create", txn.KV(), sessiondata.NodeUserSessionDataOverride, fmt.Sprintf(createQuery, strings.Join(cols, ","), generatePlaceholders(len(qargs))), qargs..., diff --git a/pkg/cloud/externalconn/utils/BUILD.bazel b/pkg/cloud/externalconn/utils/BUILD.bazel index c37e47b7bfb1..b37afb9ae6e3 100644 --- a/pkg/cloud/externalconn/utils/BUILD.bazel +++ b/pkg/cloud/externalconn/utils/BUILD.bazel @@ -9,11 +9,10 @@ go_library( deps = [ "//pkg/base", "//pkg/cloud", - "//pkg/kv", "//pkg/security/username", "//pkg/settings/cluster", "//pkg/sql", - "//pkg/sql/sqlutil", + "//pkg/sql/isql", "//pkg/util/ioctx", "//pkg/util/log", "@com_github_cockroachdb_errors//:errors", diff --git a/pkg/cloud/externalconn/utils/connection_utils.go b/pkg/cloud/externalconn/utils/connection_utils.go index b4c2f303cfb2..1bccbb9d078d 100644 --- a/pkg/cloud/externalconn/utils/connection_utils.go +++ b/pkg/cloud/externalconn/utils/connection_utils.go @@ -18,11 +18,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/cloud" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/ioctx" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" @@ -108,8 +107,8 @@ func (e *externalConnectionKMSEnv) KMSConfig() *base.ExternalIODirConfig { } // DBHandle implements the KMSEnv interface. -func (e *externalConnectionKMSEnv) DBHandle() *kv.DB { - return e.execCfg.DB +func (e *externalConnectionKMSEnv) DBHandle() isql.DB { + return e.execCfg.InternalDB } // User implements the KMSEnv interface. @@ -117,11 +116,6 @@ func (e *externalConnectionKMSEnv) User() username.SQLUsername { return e.user } -// InternalExecutor implements the KMSEnv interface. -func (e *externalConnectionKMSEnv) InternalExecutor() sqlutil.InternalExecutor { - return e.execCfg.InternalExecutor -} - var _ cloud.KMSEnv = &externalConnectionKMSEnv{} // CheckKMSConnection encrypts, decrypts and matches the contents of a sentinel diff --git a/pkg/cloud/gcp/gcs_storage_test.go b/pkg/cloud/gcp/gcs_storage_test.go index dcfda74d1570..08b45bfe657e 100644 --- a/pkg/cloud/gcp/gcs_storage_test.go +++ b/pkg/cloud/gcp/gcs_storage_test.go @@ -65,9 +65,7 @@ func TestPutGoogleCloud(t *testing.T) { uri, false, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings, ) cloudtestutils.CheckListFiles(t, fmt.Sprintf("gs://%s/%s/%s?%s=%s&%s=%s", @@ -79,9 +77,7 @@ func TestPutGoogleCloud(t *testing.T) { CredentialsParam, url.QueryEscape(encoded), ), username.RootUserName(), - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings, ) }) @@ -96,9 +92,7 @@ func TestPutGoogleCloud(t *testing.T) { cloud.AuthParam, cloud.AuthParamImplicit), false, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings) cloudtestutils.CheckListFiles(t, fmt.Sprintf("gs://%s/%s/%s?%s=%s", bucket, @@ -107,9 +101,7 @@ func TestPutGoogleCloud(t *testing.T) { cloud.AuthParam, cloud.AuthParamImplicit, ), username.RootUserName(), - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings, ) }) @@ -140,9 +132,7 @@ func TestPutGoogleCloud(t *testing.T) { uri, false, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings) cloudtestutils.CheckListFiles(t, fmt.Sprintf("gs://%s/%s/%s?%s=%s&%s=%s", bucket, @@ -153,9 +143,7 @@ func TestPutGoogleCloud(t *testing.T) { BearerTokenParam, token.AccessToken, ), username.RootUserName(), - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings, ) }) @@ -185,9 +173,7 @@ func TestGCSAssumeRole(t *testing.T) { // access to the bucket. cloudtestutils.CheckNoPermission(t, fmt.Sprintf("gs://%s/%s?%s=%s", limitedBucket, "backup-test-assume-role", CredentialsParam, url.QueryEscape(encoded)), user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings, ) @@ -202,9 +188,7 @@ func TestGCSAssumeRole(t *testing.T) { assumedAccount, CredentialsParam, url.QueryEscape(encoded), ), false, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings, ) cloudtestutils.CheckListFiles(t, fmt.Sprintf("gs://%s/%s/%s?%s=%s&%s=%s&%s=%s", @@ -218,9 +202,7 @@ func TestGCSAssumeRole(t *testing.T) { CredentialsParam, url.QueryEscape(encoded), ), username.RootUserName(), - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings, ) }) @@ -234,17 +216,13 @@ func TestGCSAssumeRole(t *testing.T) { // access to the bucket. cloudtestutils.CheckNoPermission(t, fmt.Sprintf("gs://%s/%s?%s=%s", limitedBucket, "backup-test-assume-role", cloud.AuthParam, cloud.AuthParamImplicit), user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings, ) cloudtestutils.CheckExportStore(t, fmt.Sprintf("gs://%s/%s?%s=%s&%s=%s", limitedBucket, "backup-test-assume-role", cloud.AuthParam, cloud.AuthParamImplicit, AssumeRoleParam, assumedAccount), false, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings, ) cloudtestutils.CheckListFiles(t, fmt.Sprintf("gs://%s/%s/%s?%s=%s&%s=%s", @@ -256,9 +234,7 @@ func TestGCSAssumeRole(t *testing.T) { AssumeRoleParam, assumedAccount, ), username.RootUserName(), - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings, ) }) @@ -300,9 +276,7 @@ func TestGCSAssumeRole(t *testing.T) { q.Encode(), ) cloudtestutils.CheckNoPermission(t, roleURI, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings, ) } @@ -316,15 +290,11 @@ func TestGCSAssumeRole(t *testing.T) { q.Encode(), ) cloudtestutils.CheckExportStore(t, uri, false, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings, ) cloudtestutils.CheckListFiles(t, uri, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings, ) }) @@ -370,9 +340,7 @@ func TestFileDoesNotExist(t *testing.T) { s, err := cloud.MakeExternalStorage(context.Background(), conf, base.ExternalIODirConfig{}, testSettings, nil, /* blobClientFactory */ - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ nil, /* limiters */ cloud.NilMetrics, ) @@ -390,9 +358,7 @@ func TestFileDoesNotExist(t *testing.T) { s, err := cloud.MakeExternalStorage(context.Background(), conf, base.ExternalIODirConfig{}, testSettings, nil, /* blobClientFactory */ - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ nil, /* limiters */ cloud.NilMetrics, ) @@ -428,18 +394,14 @@ func TestCompressedGCS(t *testing.T) { s1, err := cloud.MakeExternalStorage(ctx, conf1, base.ExternalIODirConfig{}, testSettings, nil, /* blobClientFactory */ - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ nil, /* limiters */ cloud.NilMetrics, ) require.NoError(t, err) s2, err := cloud.MakeExternalStorage(ctx, conf2, base.ExternalIODirConfig{}, testSettings, nil, /* blobClientFactory */ - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ nil, /* limiters */ cloud.NilMetrics, ) diff --git a/pkg/cloud/httpsink/http_storage_test.go b/pkg/cloud/httpsink/http_storage_test.go index 9f9736bb004e..b9806a16fde5 100644 --- a/pkg/cloud/httpsink/http_storage_test.go +++ b/pkg/cloud/httpsink/http_storage_test.go @@ -122,9 +122,7 @@ func TestPutHttp(t *testing.T) { srv, files, cleanup := makeServer() defer cleanup() cloudtestutils.CheckExportStore(t, srv.String(), false, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings, ) if expected, actual := 14, files(); expected != actual { @@ -144,9 +142,7 @@ func TestPutHttp(t *testing.T) { combined.Host = strings.Join([]string{srv1.Host, srv2.Host, srv3.Host}, ",") cloudtestutils.CheckExportStore(t, combined.String(), true, user, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ testSettings, ) if expected, actual := 3, files1(); expected != actual { @@ -172,9 +168,7 @@ func TestPutHttp(t *testing.T) { t.Fatal(err) } s, err := cloud.MakeExternalStorage(ctx, conf, base.ExternalIODirConfig{}, testSettings, blobs.TestEmptyBlobClientFactory, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ nil, /* limiters */ cloud.NilMetrics, ) @@ -332,9 +326,7 @@ func TestCanDisableHttp(t *testing.T) { testSettings := cluster.MakeTestingClusterSettings() s, err := cloud.MakeExternalStorage(context.Background(), cloudpb.ExternalStorage{Provider: cloudpb.ExternalStorageProvider_http}, conf, testSettings, blobs.TestEmptyBlobClientFactory, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ nil, /* limiters */ cloud.NilMetrics, ) @@ -356,9 +348,7 @@ func TestCanDisableOutbound(t *testing.T) { cloudpb.ExternalStorageProvider_nodelocal, } { s, err := cloud.MakeExternalStorage(context.Background(), cloudpb.ExternalStorage{Provider: provider}, conf, testSettings, blobs.TestEmptyBlobClientFactory, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ nil, /* limiters */ cloud.NilMetrics, ) @@ -391,9 +381,7 @@ func TestExternalStorageCanUseHTTPProxy(t *testing.T) { conf, err := cloud.ExternalStorageConfFromURI("http://my-server", username.RootUserName()) require.NoError(t, err) s, err := cloud.MakeExternalStorage(context.Background(), conf, base.ExternalIODirConfig{}, testSettings, nil, - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ nil, /* limiters */ cloud.NilMetrics, ) diff --git a/pkg/cloud/impl_registry.go b/pkg/cloud/impl_registry.go index 4c1f7c448778..973684c5dfe3 100644 --- a/pkg/cloud/impl_registry.go +++ b/pkg/cloud/impl_registry.go @@ -21,11 +21,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/blobs" "github.com/cockroachdb/cockroach/pkg/cloud/cloudpb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/ioctx" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/metric" @@ -151,9 +150,7 @@ func ExternalStorageFromURI( settings *cluster.Settings, blobClientFactory blobs.BlobClientFactory, user username.SQLUsername, - ie sqlutil.InternalExecutor, - ief sqlutil.InternalExecutorFactory, - kvDB *kv.DB, + db isql.DB, limiters Limiters, metrics metric.Struct, opts ...ExternalStorageOption, @@ -163,7 +160,7 @@ func ExternalStorageFromURI( return nil, err } return MakeExternalStorage(ctx, conf, externalConfig, settings, blobClientFactory, - ie, ief, kvDB, limiters, metrics, opts...) + db, limiters, metrics, opts...) } // SanitizeExternalStorageURI returns the external storage URI with with some @@ -207,9 +204,7 @@ func MakeExternalStorage( conf base.ExternalIODirConfig, settings *cluster.Settings, blobClientFactory blobs.BlobClientFactory, - ie sqlutil.InternalExecutor, - ief sqlutil.InternalExecutorFactory, - kvDB *kv.DB, + db isql.DB, limiters Limiters, metrics metric.Struct, opts ...ExternalStorageOption, @@ -220,15 +215,13 @@ func MakeExternalStorage( return nil, errors.Newf("invalid metrics type: %T", metrics) } args := ExternalStorageContext{ - IOConf: conf, - Settings: settings, - BlobClientFactory: blobClientFactory, - InternalExecutor: ie, - InternalExecutorFactory: ief, - DB: kvDB, - Options: opts, - Limiters: limiters, - MetricsRecorder: cloudMetrics, + IOConf: conf, + Settings: settings, + BlobClientFactory: blobClientFactory, + DB: db, + Options: opts, + Limiters: limiters, + MetricsRecorder: cloudMetrics, } if conf.DisableOutbound && dest.Provider != cloudpb.ExternalStorageProvider_userfile { return nil, errors.New("external network access is disabled") diff --git a/pkg/cloud/kms.go b/pkg/cloud/kms.go index 2bc92c5d7e76..f22ee08ed081 100644 --- a/pkg/cloud/kms.go +++ b/pkg/cloud/kms.go @@ -15,10 +15,9 @@ import ( "net/url" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/errors" ) @@ -42,9 +41,8 @@ type KMS interface { type KMSEnv interface { ClusterSettings() *cluster.Settings KMSConfig() *base.ExternalIODirConfig - DBHandle() *kv.DB + DBHandle() isql.DB User() username.SQLUsername - InternalExecutor() sqlutil.InternalExecutor } // KMSFromURIFactory describes a factory function for KMS given a URI. diff --git a/pkg/cloud/kms_test_utils.go b/pkg/cloud/kms_test_utils.go index cf43e4ead424..0674c0df7302 100644 --- a/pkg/cloud/kms_test_utils.go +++ b/pkg/cloud/kms_test_utils.go @@ -16,10 +16,9 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/stretchr/testify/require" ) @@ -27,9 +26,8 @@ import ( type TestKMSEnv struct { Settings *cluster.Settings ExternalIOConfig *base.ExternalIODirConfig - DB *kv.DB + DB isql.DB Username username.SQLUsername - InternalEx sqlutil.InternalExecutor } var _ KMSEnv = &TestKMSEnv{} @@ -45,7 +43,7 @@ func (e *TestKMSEnv) KMSConfig() *base.ExternalIODirConfig { } // DBHandle returns the database handle associated with the KMSEnv. -func (e *TestKMSEnv) DBHandle() *kv.DB { +func (e *TestKMSEnv) DBHandle() isql.DB { return e.DB } @@ -54,11 +52,6 @@ func (e *TestKMSEnv) User() username.SQLUsername { return e.Username } -// InternalExecutor returns the internal executor associated with the KMSEnv. -func (e *TestKMSEnv) InternalExecutor() sqlutil.InternalExecutor { - return e.InternalEx -} - // KMSEncryptDecrypt is the method used to test if the given KMS can // correctly encrypt and decrypt a string func KMSEncryptDecrypt(t *testing.T, kmsURI string, env KMSEnv) { diff --git a/pkg/cloud/nodelocal/nodelocal_storage_test.go b/pkg/cloud/nodelocal/nodelocal_storage_test.go index cfc62c526c3f..1a43d2c2f797 100644 --- a/pkg/cloud/nodelocal/nodelocal_storage_test.go +++ b/pkg/cloud/nodelocal/nodelocal_storage_test.go @@ -30,11 +30,10 @@ func TestPutLocal(t *testing.T) { testSettings.ExternalIODir = p dest := MakeLocalStorageURI(p) - cloudtestutils.CheckExportStore(t, dest, false, username.RootUserName(), nil, nil, nil, testSettings) - cloudtestutils.CheckListFiles(t, "nodelocal://0/listing-test/basepath", username.RootUserName(), - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ - testSettings, + cloudtestutils.CheckExportStore( + t, dest, false, username.RootUserName(), nil /* db */, testSettings) + url := "nodelocal://0/listing-test/basepath" + cloudtestutils.CheckListFiles( + t, url, username.RootUserName(), nil /*db */, testSettings, ) } diff --git a/pkg/cloud/nullsink/nullsink_storage_test.go b/pkg/cloud/nullsink/nullsink_storage_test.go index 6ac900c1f765..d6b3b7d6363c 100644 --- a/pkg/cloud/nullsink/nullsink_storage_test.go +++ b/pkg/cloud/nullsink/nullsink_storage_test.go @@ -39,9 +39,7 @@ func TestNullSinkReadAndWrite(t *testing.T) { s, err := cloud.MakeExternalStorage(ctx, conf, base.ExternalIODirConfig{}, nil, /* Cluster Settings */ nil, /* blobClientFactory */ - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ nil, /* limiters */ cloud.NilMetrics, ) diff --git a/pkg/cloud/userfile/BUILD.bazel b/pkg/cloud/userfile/BUILD.bazel index 90aaadafadfa..ec5912d68ec2 100644 --- a/pkg/cloud/userfile/BUILD.bazel +++ b/pkg/cloud/userfile/BUILD.bazel @@ -17,11 +17,9 @@ go_library( "//pkg/cloud/externalconn/connectionpb", "//pkg/cloud/externalconn/utils", "//pkg/cloud/userfile/filetable", - "//pkg/kv", "//pkg/security/username", "//pkg/server/telemetry", "//pkg/settings/cluster", - "//pkg/sql/sqlutil", "//pkg/util/ioctx", "@com_github_cockroachdb_errors//:errors", "@com_github_cockroachdb_errors//oserror", @@ -46,8 +44,8 @@ go_test( "//pkg/security/username", "//pkg/server", "//pkg/settings/cluster", + "//pkg/sql/isql", "//pkg/sql/sem/tree", - "//pkg/sql/sqlutil", "//pkg/sql/tests", "//pkg/testutils", "//pkg/testutils/serverutils", diff --git a/pkg/cloud/userfile/file_table_storage.go b/pkg/cloud/userfile/file_table_storage.go index 96dbcd8d98f2..c4414423df7a 100644 --- a/pkg/cloud/userfile/file_table_storage.go +++ b/pkg/cloud/userfile/file_table_storage.go @@ -23,11 +23,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/cloud" "github.com/cockroachdb/cockroach/pkg/cloud/cloudpb" "github.com/cockroachdb/cockroach/pkg/cloud/userfile/filetable" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/ioctx" "github.com/cockroachdb/errors" "github.com/cockroachdb/errors/oserror" @@ -82,8 +80,6 @@ type fileTableStorage struct { fs *filetable.FileToTableSystem cfg cloudpb.ExternalStorage_FileTable ioConf base.ExternalIODirConfig - db *kv.DB - ie sqlutil.InternalExecutor prefix string // relative filepath settings *cluster.Settings } @@ -122,7 +118,7 @@ func makeFileTableStorage( // cfg.User is already a normalized SQL username. user := username.MakeSQLUsernameFromPreNormalizedString(cfg.User) - executor := filetable.MakeInternalFileToTableExecutor(args.InternalExecutor, args.InternalExecutorFactory, args.DB) + executor := filetable.MakeInternalFileToTableExecutor(args.DB) fileToTableSystem, err := filetable.NewFileToTableSystem(ctx, cfg.QualifiedTableName, executor, user) @@ -133,8 +129,6 @@ func makeFileTableStorage( fs: fileToTableSystem, cfg: cfg, ioConf: args.IOConf, - db: args.DB, - ie: args.InternalExecutor, prefix: cfg.Path, settings: args.Settings, }, nil @@ -253,12 +247,6 @@ func (f *fileTableStorage) Writer(ctx context.Context, basename string) (io.Writ return nil, err } - // This is only possible if the method is invoked by a SQLConnFileTableStorage - // which should never be the case. - if f.ie == nil { - return nil, errors.New("cannot Write without a configured internal executor") - } - return f.fs.NewFileWriter(ctx, filepath, filetable.ChunkDefaultSize) } diff --git a/pkg/cloud/userfile/file_table_storage_test.go b/pkg/cloud/userfile/file_table_storage_test.go index b0ee86e9f6d3..f84bb17ca9e2 100644 --- a/pkg/cloud/userfile/file_table_storage_test.go +++ b/pkg/cloud/userfile/file_table_storage_test.go @@ -23,8 +23,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/cloud/cloudtestutils" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -44,26 +44,24 @@ func TestPutUserFileTable(t *testing.T) { ctx := context.Background() params, _ := tests.CreateTestServerParams() - s, _, kvDB := serverutils.StartServer(t, params) + s, _, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop(ctx) dest := MakeUserFileStorageURI(qualifiedTableName, filename) - ie := s.InternalExecutor().(sqlutil.InternalExecutor) - ief := s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory) - cloudtestutils.CheckExportStore(t, dest, false, username.RootUserName(), ie, ief, kvDB, testSettings) + db := s.InternalDB().(isql.DB) + cloudtestutils.CheckExportStore(t, dest, false, username.RootUserName(), db, testSettings) cloudtestutils.CheckListFiles(t, "userfile://defaultdb.public.file_list_table/listing-test/basepath", - username.RootUserName(), ie, ief, kvDB, testSettings) + username.RootUserName(), db, testSettings) t.Run("empty-qualified-table-name", func(t *testing.T) { dest := MakeUserFileStorageURI("", filename) - ie := s.InternalExecutor().(sqlutil.InternalExecutor) - cloudtestutils.CheckExportStore(t, dest, false, username.RootUserName(), ie, ief, kvDB, testSettings) + cloudtestutils.CheckExportStore(t, dest, false, username.RootUserName(), db, testSettings) cloudtestutils.CheckListFilesCanonical(t, "userfile:///listing-test/basepath", "userfile://defaultdb.public.userfiles_root/listing-test/basepath", - username.RootUserName(), ie, ief, kvDB, testSettings) + username.RootUserName(), db, testSettings) }) t.Run("reject-normalized-basename", func(t *testing.T) { @@ -72,7 +70,7 @@ func TestPutUserFileTable(t *testing.T) { store, err := cloud.ExternalStorageFromURI(ctx, userfileURL.String()+"/", base.ExternalIODirConfig{}, cluster.NoSettings, blobs.TestEmptyBlobClientFactory, - username.RootUserName(), ie, ief, kvDB, nil, cloud.NilMetrics) + username.RootUserName(), db, nil, cloud.NilMetrics) require.NoError(t, err) defer store.Close() @@ -106,12 +104,11 @@ func TestUserScoping(t *testing.T) { ctx := context.Background() params, _ := tests.CreateTestServerParams() - s, sqlDB, kvDB := serverutils.StartServer(t, params) + s, sqlDB, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop(ctx) dest := MakeUserFileStorageURI(qualifiedTableName, "") - ie := s.InternalExecutor().(sqlutil.InternalExecutor) - ief := s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory) + db := s.InternalDB().(isql.DB) // Create two users and grant them all privileges on defaultdb. user1 := username.MakeSQLUsernameFromPreNormalizedString("foo") @@ -121,14 +118,14 @@ func TestUserScoping(t *testing.T) { // Write file as user1. fileTableSystem1, err := cloud.ExternalStorageFromURI(ctx, dest, base.ExternalIODirConfig{}, - cluster.NoSettings, blobs.TestEmptyBlobClientFactory, user1, ie, ief, kvDB, nil, + cluster.NoSettings, blobs.TestEmptyBlobClientFactory, user1, db, nil, cloud.NilMetrics) require.NoError(t, err) require.NoError(t, cloud.WriteFile(ctx, fileTableSystem1, filename, bytes.NewReader([]byte("aaa")))) // Attempt to read/write file as user2 and expect to fail. fileTableSystem2, err := cloud.ExternalStorageFromURI(ctx, dest, base.ExternalIODirConfig{}, - cluster.NoSettings, blobs.TestEmptyBlobClientFactory, user2, ie, ief, kvDB, nil, + cluster.NoSettings, blobs.TestEmptyBlobClientFactory, user2, db, nil, cloud.NilMetrics) require.NoError(t, err) _, err = fileTableSystem2.ReadFile(ctx, filename) @@ -137,7 +134,7 @@ func TestUserScoping(t *testing.T) { // Read file as root and expect to succeed. fileTableSystem3, err := cloud.ExternalStorageFromURI(ctx, dest, base.ExternalIODirConfig{}, - cluster.NoSettings, blobs.TestEmptyBlobClientFactory, username.RootUserName(), ie, ief, kvDB, + cluster.NoSettings, blobs.TestEmptyBlobClientFactory, username.RootUserName(), db, nil, cloud.NilMetrics) require.NoError(t, err) _, err = fileTableSystem3.ReadFile(ctx, filename) diff --git a/pkg/cloud/userfile/filetable/BUILD.bazel b/pkg/cloud/userfile/filetable/BUILD.bazel index 5ca59c1ae3d4..b1dabe478f41 100644 --- a/pkg/cloud/userfile/filetable/BUILD.bazel +++ b/pkg/cloud/userfile/filetable/BUILD.bazel @@ -8,13 +8,11 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/cloud", - "//pkg/kv", "//pkg/security/username", - "//pkg/sql/catalog/descs", + "//pkg/sql/isql", "//pkg/sql/parser", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/util/ioctx", "//pkg/util/log", "@com_github_cockroachdb_errors//:errors", diff --git a/pkg/cloud/userfile/filetable/file_table_read_writer.go b/pkg/cloud/userfile/filetable/file_table_read_writer.go index c935c0fd075f..51533f14eb8e 100644 --- a/pkg/cloud/userfile/filetable/file_table_read_writer.go +++ b/pkg/cloud/userfile/filetable/file_table_read_writer.go @@ -20,13 +20,11 @@ import ( "os" "github.com/cockroachdb/cockroach/pkg/cloud" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/ioctx" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" @@ -42,7 +40,7 @@ var payloadTableNameSuffix = "_upload_payload" // FileToTableExecutorRows encompasses the two formats in which the // InternalFileToTableExecutor and SQLConnFileToTableExecutor output their rows. type FileToTableExecutorRows struct { - internalExecResultsIterator sqlutil.InternalRows + internalExecResultsIterator isql.Rows sqlConnExecResults driver.Rows } @@ -60,19 +58,16 @@ type FileToTableSystemExecutor interface { // InternalFileToTableExecutor is the SQL query executor which uses an internal // SQL connection to interact with the database. type InternalFileToTableExecutor struct { - ie sqlutil.InternalExecutor - ief sqlutil.InternalExecutorFactory - db *kv.DB + ie isql.Executor + db isql.DB } var _ FileToTableSystemExecutor = &InternalFileToTableExecutor{} // MakeInternalFileToTableExecutor returns an instance of a // InternalFileToTableExecutor. -func MakeInternalFileToTableExecutor( - ie sqlutil.InternalExecutor, ief sqlutil.InternalExecutorFactory, db *kv.DB, -) *InternalFileToTableExecutor { - return &InternalFileToTableExecutor{ie, ief, db} +func MakeInternalFileToTableExecutor(db isql.DB) *InternalFileToTableExecutor { + return &InternalFileToTableExecutor{ie: db.Executor(), db: db} } // Query implements the FileToTableSystemExecutor interface. @@ -245,28 +240,28 @@ func NewFileToTableSystem( if err != nil { return nil, err } - if err := e.ief.(descs.TxnManager).DescsTxnWithExecutor( - ctx, e.db, nil /* SessionData */, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor, + if err := e.db.Txn( + ctx, func( + ctx context.Context, txn isql.Txn, ) error { // TODO(adityamaru): Handle scenario where the user has already created // tables with the same names not via the FileToTableSystem // object. Not sure if we want to error out or work around it. - tablesExist, err := f.checkIfFileAndPayloadTableExist(ctx, txn, ie) + tablesExist, err := f.checkIfFileAndPayloadTableExist(ctx, txn) if err != nil { return err } if !tablesExist { - if err := f.createFileAndPayloadTables(ctx, txn, ie); err != nil { + if err := f.createFileAndPayloadTables(ctx, txn); err != nil { return err } - if err := f.grantCurrentUserTablePrivileges(ctx, txn, ie); err != nil { + if err := f.grantCurrentUserTablePrivileges(ctx, txn); err != nil { return err } - if err := f.revokeOtherUserTablePrivileges(ctx, txn, ie); err != nil { + if err := f.revokeOtherUserTablePrivileges(ctx, txn); err != nil { return err } } @@ -367,12 +362,10 @@ func DestroyUserFileSystem(ctx context.Context, f *FileToTableSystem) error { return err } - if err := e.ief.(descs.TxnManager).DescsTxnWithExecutor( - ctx, e.db, nil /* sd */, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor, - ) error { + if err := e.db.Txn( + ctx, func(ctx context.Context, txn isql.Txn) error { dropPayloadTableQuery := fmt.Sprintf(`DROP TABLE %s`, f.GetFQPayloadTableName()) - _, err := ie.ExecEx(ctx, "drop-payload-table", txn, + _, err := txn.ExecEx(ctx, "drop-payload-table", txn.KV(), sessiondata.InternalExecutorOverride{User: f.username}, dropPayloadTableQuery) if err != nil { @@ -380,7 +373,7 @@ func DestroyUserFileSystem(ctx context.Context, f *FileToTableSystem) error { } dropFileTableQuery := fmt.Sprintf(`DROP TABLE %s CASCADE`, f.GetFQFileTableName()) - _, err = ie.ExecEx(ctx, "drop-file-table", txn, + _, err = txn.ExecEx(ctx, "drop-file-table", txn.KV(), sessiondata.InternalExecutorOverride{User: f.username}, dropFileTableQuery) if err != nil { @@ -412,7 +405,7 @@ SELECT file_id FROM %s WHERE filename=$1)` // already open explicit txn to provide transactional guarantees. This is used // by WriteFile to allow for overwriting of an existing file with the same name. func (f *FileToTableSystem) deleteFileWithoutTxn( - ctx context.Context, filename string, ie sqlutil.InternalExecutor, + ctx context.Context, filename string, ie isql.Executor, ) error { execSessionDataOverride := sessiondata.InternalExecutorOverride{User: f.username} _, err := ie.ExecEx(ctx, "delete-payload-table", @@ -462,8 +455,7 @@ func (f *FileToTableSystem) DeleteFile(ctx context.Context, filename string) err // Payload table. type payloadWriter struct { fileID tree.Datum - ief sqlutil.InternalExecutorFactory - db *kv.DB + ief isql.DB ctx context.Context byteOffset int execSessionDataOverride sessiondata.InternalExecutorOverride @@ -474,11 +466,9 @@ type payloadWriter struct { // WriteChunk inserts a single row into the Payload table as an operation in the // transaction txn. // TODO(janexing): can the insert happen with a nil txn? -func (p *payloadWriter) WriteChunk( - buf []byte, txn *kv.Txn, ie sqlutil.InternalExecutor, -) (int, error) { +func (p *payloadWriter) WriteChunk(buf []byte, txn isql.Txn) (int, error) { insertChunkQuery := fmt.Sprintf(`INSERT INTO %s VALUES ($1, $2, $3)`, p.payloadTableName) - _, err := ie.ExecEx(p.ctx, "insert-file-chunk", txn, p.execSessionDataOverride, + _, err := txn.ExecEx(p.ctx, "insert-file-chunk", txn.KV(), p.execSessionDataOverride, insertChunkQuery, p.fileID, p.byteOffset, buf) if err != nil { return 0, err @@ -512,9 +502,7 @@ func newChunkWriter( filename string, user username.SQLUsername, fileTableName, payloadTableName string, - ief sqlutil.InternalExecutorFactory, - ie sqlutil.InternalExecutor, - db *kv.DB, + db isql.DB, ) (*chunkWriter, error) { execSessionDataOverride := sessiondata.InternalExecutorOverride{User: user} @@ -524,6 +512,7 @@ func newChunkWriter( fileNameQuery := fmt.Sprintf(`INSERT INTO %s VALUES ($1, DEFAULT, $2, $3) RETURNING file_id`, fileTableName) + ie := db.Executor() res, err := ie.QueryRowEx(ctx, "insert-file-name", nil /* txn */, execSessionDataOverride, fileNameQuery, filename, 0, execSessionDataOverride.User) @@ -535,9 +524,13 @@ func newChunkWriter( } pw := &payloadWriter{ - res[0], ief, db, ctx, 0, - execSessionDataOverride, fileTableName, - payloadTableName} + ctx: ctx, + fileID: res[0], + ief: db, + byteOffset: 0, + fileTableName: fileTableName, + payloadTableName: payloadTableName, + } bytesBuffer := bytes.NewBuffer(make([]byte, 0, chunkSize)) return &chunkWriter{ bytesBuffer, pw, execSessionDataOverride, @@ -582,10 +575,10 @@ func (w *chunkWriter) Write(buf []byte) (int, error) { // retry loop. if w.buf.Len() == w.buf.Cap() { // TODO(janexing): Is it necessary to run the following within a txn? - if err := w.pw.ief.TxnWithExecutor(w.pw.ctx, w.pw.db, nil /* sessionData */, func( - ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, + if err := w.pw.ief.Txn(w.pw.ctx, func( + ctx context.Context, txn isql.Txn, ) error { - if n, err := w.pw.WriteChunk(w.buf.Bytes(), txn, ie); err != nil { + if n, err := w.pw.WriteChunk(w.buf.Bytes(), txn); err != nil { return err } else if n != w.buf.Len() { return errors.Wrap(io.ErrShortWrite, "error when writing in chunkWriter") @@ -613,11 +606,10 @@ func (w *chunkWriter) Close() error { // payloadWriter Write() method, then the txn is aborted and the error is // propagated here. if w.buf.Len() > 0 { - // TODO(janexing): Is it necessary to run the following within a txn? - if err := w.pw.ief.TxnWithExecutor(w.pw.ctx, w.pw.db, nil /* sessionData */, func( - ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, + if err := w.pw.ief.Txn(w.pw.ctx, func( + ctx context.Context, txn isql.Txn, ) error { - if n, err := w.pw.WriteChunk(w.buf.Bytes(), txn, ie); err != nil { + if n, err := w.pw.WriteChunk(w.buf.Bytes(), txn); err != nil { return err } else if n != w.buf.Len() { return errors.Wrap(io.ErrShortWrite, "error when closing chunkWriter") @@ -632,7 +624,7 @@ func (w *chunkWriter) Close() error { // were actually written to the payload table. updateFileSizeQuery := fmt.Sprintf(`UPDATE %s SET file_size=$1 WHERE filename=$2`, w.fileTableName) - ie := w.pw.ief.MakeInternalExecutorWithoutTxn() + ie := w.pw.ief.Executor() _, err := ie.ExecEx(w.pw.ctx, "update-file-size", nil /* txn */, w.execSessionDataOverride, updateFileSizeQuery, w.pw.byteOffset, w.filename) @@ -795,7 +787,7 @@ func (f *FileToTableSystem) ReadFile( } func (f *FileToTableSystem) checkIfFileAndPayloadTableExist( - ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, + ctx context.Context, txn isql.Txn, ) (bool, error) { tablePrefix, err := f.GetTableName() if err != nil { @@ -824,7 +816,7 @@ func (f *FileToTableSystem) checkIfFileAndPayloadTableExist( tableExistenceQuery := fmt.Sprintf( `SELECT table_name FROM [SHOW TABLES FROM %s] WHERE table_name=$1 OR table_name=$2`, databaseSchema) - numRows, err := ie.ExecEx(ctx, "tables-exist", txn, + numRows, err := txn.ExecEx(ctx, "tables-exist", txn.KV(), sessiondata.RootUserSessionDataOverride, tableExistenceQuery, fileTableName, payloadTableName) if err != nil { @@ -838,12 +830,10 @@ func (f *FileToTableSystem) checkIfFileAndPayloadTableExist( return numRows == 2, nil } -func (f *FileToTableSystem) createFileAndPayloadTables( - ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, -) error { +func (f *FileToTableSystem) createFileAndPayloadTables(ctx context.Context, txn isql.Txn) error { // Create the File and Payload tables to hold the file chunks. fileTableCreateQuery := fmt.Sprintf(fileTableSchema, f.GetFQFileTableName()) - _, err := ie.ExecEx(ctx, "create-file-table", txn, + _, err := txn.ExecEx(ctx, "create-file-table", txn.KV(), sessiondata.InternalExecutorOverride{User: f.username}, fileTableCreateQuery) if err != nil { @@ -851,7 +841,7 @@ func (f *FileToTableSystem) createFileAndPayloadTables( } payloadTableCreateQuery := fmt.Sprintf(payloadTableSchema, f.GetFQPayloadTableName()) - _, err = ie.ExecEx(ctx, "create-payload-table", txn, + _, err = txn.ExecEx(ctx, "create-payload-table", txn.KV(), sessiondata.InternalExecutorOverride{User: f.username}, payloadTableCreateQuery) if err != nil { @@ -860,7 +850,7 @@ func (f *FileToTableSystem) createFileAndPayloadTables( addFKQuery := fmt.Sprintf(`ALTER TABLE %s ADD CONSTRAINT file_id_fk FOREIGN KEY ( file_id) REFERENCES %s (file_id)`, f.GetFQPayloadTableName(), f.GetFQFileTableName()) - _, err = ie.ExecEx(ctx, "create-payload-table", txn, + _, err = txn.ExecEx(ctx, "create-payload-table", txn.KV(), sessiondata.InternalExecutorOverride{User: f.username}, addFKQuery) if err != nil { @@ -873,11 +863,11 @@ file_id) REFERENCES %s (file_id)`, f.GetFQPayloadTableName(), f.GetFQFileTableNa // Grant the current user all read/edit privileges for the file and payload // tables. func (f *FileToTableSystem) grantCurrentUserTablePrivileges( - ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, + ctx context.Context, txn isql.Txn, ) error { grantQuery := fmt.Sprintf(`GRANT SELECT, INSERT, DROP, DELETE ON TABLE %s, %s TO %s`, f.GetFQFileTableName(), f.GetFQPayloadTableName(), f.username.SQLIdentifier()) - _, err := ie.ExecEx(ctx, "grant-user-file-payload-table-access", txn, + _, err := txn.ExecEx(ctx, "grant-user-file-payload-table-access", txn.KV(), sessiondata.RootUserSessionDataOverride, grantQuery) if err != nil { @@ -890,12 +880,12 @@ func (f *FileToTableSystem) grantCurrentUserTablePrivileges( // Revoke all privileges from every user and role except root/admin and the // current user. func (f *FileToTableSystem) revokeOtherUserTablePrivileges( - ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, + ctx context.Context, txn isql.Txn, ) error { getUsersQuery := `SELECT username FROM system. users WHERE NOT "username" = 'root' AND NOT "username" = 'admin' AND NOT "username" = $1` - it, err := ie.QueryIteratorEx( - ctx, "get-users", txn, + it, err := txn.QueryIteratorEx( + ctx, "get-users", txn.KV(), sessiondata.RootUserSessionDataOverride, getUsersQuery, f.username, ) @@ -917,7 +907,7 @@ users WHERE NOT "username" = 'root' AND NOT "username" = 'admin' AND NOT "userna for _, user := range users { revokeQuery := fmt.Sprintf(`REVOKE ALL ON TABLE %s, %s FROM %s`, f.GetFQFileTableName(), f.GetFQPayloadTableName(), user.SQLIdentifier()) - _, err = ie.ExecEx(ctx, "revoke-user-privileges", txn, + _, err = txn.ExecEx(ctx, "revoke-user-privileges", txn.KV(), sessiondata.RootUserSessionDataOverride, revokeQuery) if err != nil { @@ -954,5 +944,5 @@ func (f *FileToTableSystem) NewFileWriter( } return newChunkWriter(ctx, chunkSize, filename, f.username, f.GetFQFileTableName(), - f.GetFQPayloadTableName(), e.ief, e.ie, e.db) + f.GetFQPayloadTableName(), e.db) } diff --git a/pkg/cloud/userfile/filetable/filetabletest/BUILD.bazel b/pkg/cloud/userfile/filetable/filetabletest/BUILD.bazel index 81d959c53cec..422ea496e8b2 100644 --- a/pkg/cloud/userfile/filetable/filetabletest/BUILD.bazel +++ b/pkg/cloud/userfile/filetable/filetabletest/BUILD.bazel @@ -16,8 +16,7 @@ go_test( "//pkg/security/securitytest", "//pkg/security/username", "//pkg/server", - "//pkg/sql", - "//pkg/sql/sqlutil", + "//pkg/sql/isql", "//pkg/sql/tests", "//pkg/testutils", "//pkg/testutils/serverutils", diff --git a/pkg/cloud/userfile/filetable/filetabletest/file_table_read_writer_test.go b/pkg/cloud/userfile/filetable/filetabletest/file_table_read_writer_test.go index 08cad907c65c..7ba49cb05ad1 100644 --- a/pkg/cloud/userfile/filetable/filetabletest/file_table_read_writer_test.go +++ b/pkg/cloud/userfile/filetable/filetabletest/file_table_read_writer_test.go @@ -21,8 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/cloud/userfile/filetable" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/sql" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -110,9 +109,7 @@ func TestListAndDeleteFiles(t *testing.T) { defer s.Stopper().Stop(ctx) executor := filetable.MakeInternalFileToTableExecutor( - s.InternalExecutor().(*sql.InternalExecutor), - s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory), - kvDB, + s.InternalDB().(isql.DB), ) fileTableReadWriter, err := filetable.NewFileToTableSystem(ctx, qualifiedTableName, executor, username.RootUserName()) @@ -165,9 +162,7 @@ func TestReadWriteFile(t *testing.T) { defer s.Stopper().Stop(ctx) executor := filetable.MakeInternalFileToTableExecutor( - s.InternalExecutor().(*sql.InternalExecutor), - s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory), - kvDB, + s.InternalDB().(isql.DB), ) fileTableReadWriter, err := filetable.NewFileToTableSystem(ctx, qualifiedTableName, executor, username.RootUserName()) @@ -352,9 +347,7 @@ func TestUserGrants(t *testing.T) { // Operate under non-admin user. executor := filetable.MakeInternalFileToTableExecutor( - s.InternalExecutor().(*sql.InternalExecutor), - s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory), - kvDB, + s.InternalDB().(isql.DB), ) johnUser := username.MakeSQLUsernameFromPreNormalizedString("john") fileTableReadWriter, err := filetable.NewFileToTableSystem(ctx, qualifiedTableName, @@ -440,9 +433,7 @@ func TestDifferentUserDisallowed(t *testing.T) { // Operate under non-admin user john. executor := filetable.MakeInternalFileToTableExecutor( - s.InternalExecutor().(*sql.InternalExecutor), - s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory), - kvDB, + s.InternalDB().(isql.DB), ) johnUser := username.MakeSQLUsernameFromPreNormalizedString("john") fileTableReadWriter, err := filetable.NewFileToTableSystem(ctx, qualifiedTableName, @@ -502,9 +493,7 @@ func TestDifferentRoleDisallowed(t *testing.T) { // Operate under non-admin user john. executor := filetable.MakeInternalFileToTableExecutor( - s.InternalExecutor().(*sql.InternalExecutor), - s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory), - kvDB, + s.InternalDB().(isql.DB), ) johnUser := username.MakeSQLUsernameFromPreNormalizedString("john") fileTableReadWriter, err := filetable.NewFileToTableSystem(ctx, qualifiedTableName, @@ -541,9 +530,7 @@ func TestDatabaseScope(t *testing.T) { defer s.Stopper().Stop(ctx) executor := filetable.MakeInternalFileToTableExecutor( - s.InternalExecutor().(*sql.InternalExecutor), - s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory), - kvDB, + s.InternalDB().(isql.DB), ) fileTableReadWriter, err := filetable.NewFileToTableSystem(ctx, qualifiedTableName, executor, username.RootUserName()) diff --git a/pkg/jobs/BUILD.bazel b/pkg/jobs/BUILD.bazel index 98be8e7ae7e2..9217d9e939df 100644 --- a/pkg/jobs/BUILD.bazel +++ b/pkg/jobs/BUILD.bazel @@ -44,12 +44,13 @@ go_library( "//pkg/sql/catalog/colinfo", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/descs", + "//pkg/sql/isql", "//pkg/sql/protoreflect", "//pkg/sql/sem/builtins", + "//pkg/sql/sem/catconstants", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", "//pkg/sql/sqlliveness", - "//pkg/sql/sqlutil", "//pkg/sql/types", "//pkg/util", "//pkg/util/contextutil", @@ -119,13 +120,12 @@ go_test( "//pkg/sql/catalog/catalogkeys", "//pkg/sql/catalog/colinfo", "//pkg/sql/catalog/descpb", - "//pkg/sql/catalog/descs", "//pkg/sql/catalog/desctestutils", "//pkg/sql/catalog/tabledesc", + "//pkg/sql/isql", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", "//pkg/sql/sqlliveness", - "//pkg/sql/sqlutil", "//pkg/sql/tests", "//pkg/testutils", "//pkg/testutils/serverutils", diff --git a/pkg/jobs/adopt.go b/pkg/jobs/adopt.go index f2fbfa334384..4b3ebd86c327 100644 --- a/pkg/jobs/adopt.go +++ b/pkg/jobs/adopt.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/multitenant" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness" @@ -71,7 +72,7 @@ func (r *Registry) maybeDumpTrace( // could have been canceled at this point. dumpCtx, _ := r.makeCtx() - ieNotBoundToTxn := r.internalExecutorFactory.MakeInternalExecutorWithoutTxn() + ieNotBoundToTxn := r.internalDB.Executor() // If the job has failed, and the dump mode is set to anything // except noDump, then we should dump the trace. @@ -92,14 +93,14 @@ func (r *Registry) maybeDumpTrace( // claimJobs places a claim with the given SessionID to job rows that are // available. func (r *Registry) claimJobs(ctx context.Context, s sqlliveness.Session) error { - return r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + return r.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { // Run the claim transaction at low priority to ensure that it does not // contend with foreground reads. - if err := txn.SetUserPriority(roachpb.MinUserPriority); err != nil { + if err := txn.KV().SetUserPriority(roachpb.MinUserPriority); err != nil { return errors.WithAssertionFailure(err) } - numRows, err := r.ex.Exec( - ctx, "claim-jobs", txn, claimQuery, + numRows, err := txn.Exec( + ctx, "claim-jobs", txn.KV(), claimQuery, s.ID().UnsafeBytes(), r.ID(), maxAdoptionsPerLoop) if err != nil { return errors.Wrap(err, "could not query jobs table") @@ -166,7 +167,7 @@ func getProcessQuery( func (r *Registry) processClaimedJobs(ctx context.Context, s sqlliveness.Session) error { query, args := getProcessQuery(ctx, s, r) - it, err := r.ex.QueryIteratorEx( + it, err := r.db.Executor().QueryIteratorEx( ctx, "select-running/get-claimed-jobs", nil, sessiondata.NodeUserSessionDataOverride, query, args..., ) @@ -240,12 +241,14 @@ func (r *Registry) filterAlreadyRunningAndCancelFromPreviousSessions( } // resumeJob resumes a claimed job. -func (r *Registry) resumeJob(ctx context.Context, jobID jobspb.JobID, s sqlliveness.Session) error { +func (r *Registry) resumeJob( + ctx context.Context, jobID jobspb.JobID, s sqlliveness.Session, +) (retErr error) { log.Infof(ctx, "job %d: resuming execution", jobID) resumeQuery := resumeQueryWithBackoff args := []interface{}{jobID, s.ID().UnsafeBytes(), r.clock.Now().GoTime(), r.RetryInitialDelay(), r.RetryMaxDelay()} - row, err := r.ex.QueryRowEx( + row, err := r.db.Executor().QueryRowEx( ctx, "get-job-row", nil, sessiondata.NodeUserSessionDataOverride, resumeQuery, args..., ) @@ -319,7 +322,6 @@ func (r *Registry) resumeJob(ctx context.Context, jobID jobspb.JobID, s sqlliven if opts, ok := options[payload.Type()]; ok && opts.disableTenantCostControl { resumeCtx = multitenant.WithTenantCostControlExemption(resumeCtx) } - if alreadyAdopted := r.addAdoptedJob(jobID, s, cancel); alreadyAdopted { return nil } @@ -396,7 +398,7 @@ func (r *Registry) runJob( span.SetTag("job-id", attribute.Int64Value(int64(job.ID()))) defer span.Finish() if span.TraceID() != 0 { - if err := job.Update(ctx, nil /* txn */, func(txn *kv.Txn, md JobMetadata, + if err := job.NoTxn().Update(ctx, func(txn isql.Txn, md JobMetadata, ju *JobUpdater) error { progress := *md.Progress progress.TraceID = span.TraceID() @@ -441,14 +443,14 @@ func (r *Registry) maybeClearLease(job *Job, jobErr error) { if jobErr == nil { return } - r.clearLeaseForJobID(job.ID(), nil /* txn */) + r.clearLeaseForJobID(job.ID(), r.db.Executor(), nil /* txn */) } -func (r *Registry) clearLeaseForJobID(jobID jobspb.JobID, txn *kv.Txn) { +func (r *Registry) clearLeaseForJobID(jobID jobspb.JobID, ex isql.Executor, txn *kv.Txn) { // We use the serverCtx here rather than the context from the // caller since the caller's context may have been canceled. r.withSession(r.serverCtx, func(ctx context.Context, s sqlliveness.Session) { - n, err := r.ex.ExecEx(ctx, "clear-job-claim", txn, + n, err := ex.ExecEx(ctx, "clear-job-claim", txn, sessiondata.NodeUserSessionDataOverride, clearClaimQuery, jobID, s.ID().UnsafeBytes(), r.ID()) if err != nil { @@ -475,18 +477,18 @@ RETURNING id, status ` func (r *Registry) servePauseAndCancelRequests(ctx context.Context, s sqlliveness.Session) error { - return r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + return r.internalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { // Run the claim transaction at low priority to ensure that it does not // contend with foreground reads. - if err := txn.SetUserPriority(roachpb.MinUserPriority); err != nil { + if err := txn.KV().SetUserPriority(roachpb.MinUserPriority); err != nil { return errors.WithAssertionFailure(err) } // Note that we have to buffer all rows first - before processing each // job - because we have to make sure that the query executes without an // error (otherwise, the system.jobs table might diverge from the jobs // registry). - rows, err := r.ex.QueryBufferedEx( - ctx, "cancel/pause-requested", txn, sessiondata.NodeUserSessionDataOverride, + rows, err := txn.QueryBufferedEx( + ctx, "cancel/pause-requested", txn.KV(), sessiondata.NodeUserSessionDataOverride, pauseAndCancelUpdate, s.ID().UnsafeBytes(), r.ID(), ) if err != nil { @@ -502,11 +504,13 @@ func (r *Registry) servePauseAndCancelRequests(ctx context.Context, s sqllivenes // If we didn't already have a running job for this lease, // clear out the lease here since it won't be cleared be // cleared out on Resume exit. - r.clearLeaseForJobID(id, txn) + r.clearLeaseForJobID(id, txn, txn.KV()) } log.Infof(ctx, "job %d, session %s: paused", id, s.ID()) case StatusReverting: - if err := job.Update(ctx, txn, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { + if err := job.WithTxn(txn).Update(ctx, func( + txn isql.Txn, md JobMetadata, ju *JobUpdater, + ) error { if !r.cancelRegisteredJobContext(id) { // If we didn't already have a running job for this // lease, clear out the lease here since it won't be @@ -516,7 +520,7 @@ func (r *Registry) servePauseAndCancelRequests(ctx context.Context, s sqllivenes // the fact that the job struct does not have a // claim set and thus won't validate the claim on // update. - r.clearLeaseForJobID(id, txn) + r.clearLeaseForJobID(id, txn, txn.KV()) } md.Payload.Error = errJobCanceled.Error() encodedErr := errors.EncodeError(ctx, errJobCanceled) diff --git a/pkg/jobs/delegate_control_test.go b/pkg/jobs/delegate_control_test.go index 2a310f127d3b..7f5021c71545 100644 --- a/pkg/jobs/delegate_control_test.go +++ b/pkg/jobs/delegate_control_test.go @@ -55,12 +55,14 @@ func TestScheduleControl(t *testing.T) { var recurringNever string + schedules := ScheduledJobDB(th.cfg.DB) makeSchedule := func(name string, cron string) int64 { schedule := th.newScheduledJob(t, name, "sql") if cron != "" { require.NoError(t, schedule.SetSchedule(cron)) } - require.NoError(t, schedule.Create(ctx, th.cfg.InternalExecutor, nil)) + + require.NoError(t, schedules.Create(ctx, schedule)) return schedule.ScheduleID() } @@ -83,7 +85,7 @@ func TestScheduleControl(t *testing.T) { ms := time.Microsecond firstRunTime := timeutil.Now().Add(10 * time.Second).Truncate(ms) schedule.SetNextRun(firstRunTime) - require.NoError(t, schedule.Create(ctx, th.cfg.InternalExecutor, nil)) + require.NoError(t, schedules.Create(ctx, schedule)) scheduleID := schedule.ScheduleID() require.Equal(t, schedule.NextRun(), firstRunTime) th.sqlDB.Exec(t, "RESUME SCHEDULE $1", scheduleID) @@ -95,7 +97,7 @@ func TestScheduleControl(t *testing.T) { t.Run("cannot-resume-one-off-schedule", func(t *testing.T) { schedule := th.newScheduledJob(t, "test schedule", "select 42") - require.NoError(t, schedule.Create(ctx, th.cfg.InternalExecutor, nil)) + require.NoError(t, schedules.Create(ctx, schedule)) th.sqlDB.ExpectErr(t, "cannot set next run for schedule", "RESUME SCHEDULE $1", schedule.ScheduleID()) @@ -233,7 +235,7 @@ func TestJobsControlForSchedules(t *testing.T) { t.Run(jobControl, func(t *testing.T) { // Go through internal executor to execute job control command. // This correctly reports the number of effected rows. - numEffected, err := th.cfg.InternalExecutor.ExecEx( + numEffected, err := th.cfg.DB.Executor().ExecEx( context.Background(), "test-num-effected", nil, @@ -315,7 +317,7 @@ func TestFilterJobsControlForSchedules(t *testing.T) { // correctly reports the number of effected rows which should only be // equal to the number of validStartingStates as all the other states are // invalid/no-ops. - numEffected, err := th.cfg.InternalExecutor.ExecEx( + numEffected, err := th.cfg.DB.Executor().ExecEx( context.Background(), "test-num-effected", nil, @@ -350,7 +352,7 @@ func TestJobControlByType(t *testing.T) { t.Run("Errors if invalid type is specified", func(t *testing.T) { invalidTypeQuery := "PAUSE ALL blah JOBS" - _, err := th.cfg.InternalExecutor.ExecEx( + _, err := th.cfg.DB.Executor().ExecEx( context.Background(), "test-invalid-type", nil, @@ -430,7 +432,7 @@ func TestJobControlByType(t *testing.T) { jobIdsClause := fmt.Sprint(strings.Join(jobIDStrings, ", ")) // Execute the command and verify its executed on the expected number of rows - numEffected, err := th.cfg.InternalExecutor.ExecEx( + numEffected, err := th.cfg.DB.Executor().ExecEx( context.Background(), "test-num-effected", nil, diff --git a/pkg/jobs/executor_impl.go b/pkg/jobs/executor_impl.go index 20e734648892..2e2cd19188bf 100644 --- a/pkg/jobs/executor_impl.go +++ b/pkg/jobs/executor_impl.go @@ -15,11 +15,9 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/errors" "github.com/gogo/protobuf/types" @@ -41,10 +39,10 @@ const retryFailedJobAfter = time.Minute // ExecuteJob implements ScheduledJobExecutor interface. func (e *inlineScheduledJobExecutor) ExecuteJob( ctx context.Context, + txn isql.Txn, cfg *scheduledjobs.JobExecutionConfig, - _ scheduledjobs.JobSchedulerEnv, + env scheduledjobs.JobSchedulerEnv, schedule *ScheduledJob, - txn *kv.Txn, ) error { sqlArgs := &jobspb.SqlStatementExecutionArg{} @@ -56,7 +54,7 @@ func (e *inlineScheduledJobExecutor) ExecuteJob( // to capture execution traces, or some similar debug information and save that. // Also, performing this under the same transaction as the scan loop is not ideal // since a single failure would result in rollback for all of the changes. - _, err := cfg.InternalExecutor.ExecEx(ctx, "inline-exec", txn, + _, err := txn.ExecEx(ctx, "inline-exec", txn.KV(), sessiondata.RootUserSessionDataOverride, sqlArgs.Statement, ) @@ -72,13 +70,12 @@ func (e *inlineScheduledJobExecutor) ExecuteJob( // NotifyJobTermination implements ScheduledJobExecutor interface. func (e *inlineScheduledJobExecutor) NotifyJobTermination( ctx context.Context, + txn isql.Txn, jobID jobspb.JobID, jobStatus Status, - _ jobspb.Details, + details jobspb.Details, env scheduledjobs.JobSchedulerEnv, schedule *ScheduledJob, - ex sqlutil.InternalExecutor, - txn *kv.Txn, ) error { // For now, only interested in failed status. if jobStatus == StatusFailed { @@ -93,12 +90,7 @@ func (e *inlineScheduledJobExecutor) Metrics() metric.Struct { } func (e *inlineScheduledJobExecutor) GetCreateScheduleStatement( - ctx context.Context, - env scheduledjobs.JobSchedulerEnv, - txn *kv.Txn, - descsCol *descs.Collection, - sj *ScheduledJob, - ex sqlutil.InternalExecutor, + ctx context.Context, txn isql.Txn, env scheduledjobs.JobSchedulerEnv, sj *ScheduledJob, ) (string, error) { return "", errors.AssertionFailedf("unimplemented method: 'GetCreateScheduleStatement'") } diff --git a/pkg/jobs/executor_impl_test.go b/pkg/jobs/executor_impl_test.go index b092b4bbd54b..55faf23cbff0 100644 --- a/pkg/jobs/executor_impl_test.go +++ b/pkg/jobs/executor_impl_test.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/stretchr/testify/require" @@ -60,12 +61,14 @@ func TestInlineExecutorFailedJobsHandling(t *testing.T) { j.SetScheduleDetails(jobspb.ScheduleDetails{OnError: test.onError}) ctx := context.Background() - require.NoError(t, j.Create(ctx, h.cfg.InternalExecutor, nil)) + require.NoError(t, ScheduledJobDB(h.cfg.DB).Create(ctx, j)) // Pretend we failed running; we expect job to be rescheduled. - require.NoError(t, NotifyJobTermination( - ctx, h.env, 123, StatusFailed, nil, j.ScheduleID(), h.cfg.InternalExecutor, nil)) - + require.NoError(t, h.cfg.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return NotifyJobTermination( + ctx, txn, h.env, 123, StatusFailed, nil, j.ScheduleID(), + ) + })) // Verify nextRun updated loaded := h.loadSchedule(t, j.ScheduleID()) require.Equal(t, test.expectedNextRun, loaded.NextRun()) diff --git a/pkg/jobs/helpers_test.go b/pkg/jobs/helpers_test.go index 13c6c4ea6452..db478d35c817 100644 --- a/pkg/jobs/helpers_test.go +++ b/pkg/jobs/helpers_test.go @@ -14,7 +14,7 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/errors" ) @@ -59,7 +59,7 @@ type OnPauseRequestFunc = onPauseRequestFunc var _ PauseRequester = FakeResumer{} func (d FakeResumer) OnPauseRequest( - ctx context.Context, execCtx interface{}, txn *kv.Txn, details *jobspb.Progress, + ctx context.Context, execCtx interface{}, txn isql.Txn, details *jobspb.Progress, ) error { if d.PauseRequest == nil { return nil @@ -67,54 +67,53 @@ func (d FakeResumer) OnPauseRequest( return d.PauseRequest(ctx, execCtx, txn, details) } +func (r *Registry) CancelRequested(ctx context.Context, txn isql.Txn, id jobspb.JobID) error { + return r.cancelRequested(ctx, txn, id) +} + // Started is a wrapper around the internal function that moves a job to the // started state. func (j *Job) Started(ctx context.Context) error { - return j.started(ctx, nil /* txn */) + return j.NoTxn().started(ctx) } // Reverted is a wrapper around the internal function that moves a job to the // reverting state. func (j *Job) Reverted(ctx context.Context, err error) error { - return j.reverted(ctx, nil /* txn */, err, nil) + return j.NoTxn().reverted(ctx, err, nil) } // Paused is a wrapper around the internal function that moves a job to the // paused state. func (j *Job) Paused(ctx context.Context) error { - return j.paused(ctx, nil /* txn */, nil /* fn */) + return j.NoTxn().paused(ctx, nil /* fn */) } // Failed is a wrapper around the internal function that moves a job to the // failed state. func (j *Job) Failed(ctx context.Context, causingErr error) error { - return j.failed(ctx, nil /* txn */, causingErr, nil /* fn */) + return j.NoTxn().failed(ctx, causingErr) } // Succeeded is a wrapper around the internal function that moves a job to the // succeeded state. func (j *Job) Succeeded(ctx context.Context) error { - return j.succeeded(ctx, nil /* txn */, nil /* fn */) + return j.NoTxn().succeeded(ctx, nil /* fn */) } // TestingCurrentStatus returns the current job status from the jobs table or error. -func (j *Job) TestingCurrentStatus(ctx context.Context, txn *kv.Txn) (Status, error) { +func (j *Job) TestingCurrentStatus(ctx context.Context) (Status, error) { var statusString tree.DString - if err := j.runInTxn(ctx, txn, func(ctx context.Context, txn *kv.Txn) error { - const selectStmt = "SELECT status FROM system.jobs WHERE id = $1" - row, err := j.registry.ex.QueryRow(ctx, "job-status", txn, selectStmt, j.ID()) - if err != nil { - return errors.Wrapf(err, "job %d: can't query system.jobs", j.ID()) - } - if row == nil { - return errors.Errorf("job %d: not found in system.jobs", j.ID()) - } - - statusString = tree.MustBeDString(row[0]) - return nil - }); err != nil { - return "", err + const selectStmt = "SELECT status FROM system.jobs WHERE id = $1" + row, err := j.registry.db.Executor().QueryRow(ctx, "job-status", nil, selectStmt, j.ID()) + if err != nil { + return "", errors.Wrapf(err, "job %d: can't query system.jobs", j.ID()) } + if row == nil { + return "", errors.Errorf("job %d: not found in system.jobs", j.ID()) + } + + statusString = tree.MustBeDString(row[0]) return Status(statusString), nil } diff --git a/pkg/jobs/job_scheduler.go b/pkg/jobs/job_scheduler.go index 3f6c7df9f4f5..d308a66e99d0 100644 --- a/pkg/jobs/job_scheduler.go +++ b/pkg/jobs/job_scheduler.go @@ -23,9 +23,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/settings" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/contextutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/metric" @@ -75,19 +75,15 @@ var errScheduleNotRunnable = errors.New("schedule not runnable") // loadCandidateScheduleForExecution looks up and locks candidate schedule for execution. // The schedule is locked via FOR UPDATE clause to ensure that only this scheduler can modify it. // If schedule cannot execute, a errScheduleNotRunnable error is returned. -func loadCandidateScheduleForExecution( - ctx context.Context, - scheduleID int64, - env scheduledjobs.JobSchedulerEnv, - ie sqlutil.InternalExecutor, - txn *kv.Txn, +func (s scheduledJobStorageTxn) loadCandidateScheduleForExecution( + ctx context.Context, scheduleID int64, env scheduledjobs.JobSchedulerEnv, ) (*ScheduledJob, error) { lookupStmt := fmt.Sprintf( "SELECT * FROM %s WHERE schedule_id=%d AND next_run < %s FOR UPDATE", env.ScheduledJobsTableName(), scheduleID, env.NowExpr()) - row, cols, err := ie.QueryRowExWithCols( + row, cols, err := s.txn.QueryRowExWithCols( ctx, "find-scheduled-jobs-exec", - txn, + s.txn.KV(), sessiondata.RootUserSessionDataOverride, lookupStmt) if err != nil { @@ -107,17 +103,14 @@ func loadCandidateScheduleForExecution( // lookupNumRunningJobs returns the number of running jobs for the specified schedule. func lookupNumRunningJobs( - ctx context.Context, - scheduleID int64, - env scheduledjobs.JobSchedulerEnv, - ie sqlutil.InternalExecutor, + ctx context.Context, scheduleID int64, env scheduledjobs.JobSchedulerEnv, txn isql.Txn, ) (int64, error) { lookupStmt := fmt.Sprintf( "SELECT count(*) FROM %s WHERE created_by_type = '%s' AND created_by_id = %d AND status IN %s", env.SystemJobsTableName(), CreatedByScheduledJobs, scheduleID, NonTerminalStatusTupleString) - row, err := ie.QueryRowEx( + row, err := txn.QueryRowEx( ctx, "lookup-num-running", - /*txn=*/ nil, + txn.KV(), sessiondata.RootUserSessionDataOverride, lookupStmt) if err != nil { @@ -129,8 +122,9 @@ func lookupNumRunningJobs( const recheckRunningAfter = 1 * time.Minute func (s *jobScheduler) processSchedule( - ctx context.Context, schedule *ScheduledJob, numRunning int64, txn *kv.Txn, + ctx context.Context, schedule *ScheduledJob, numRunning int64, txn isql.Txn, ) error { + scheduleStorage := ScheduledJobTxn(txn) if numRunning > 0 { switch schedule.ScheduleDetails().Wait { case jobspb.ScheduleDetails_WAIT: @@ -140,14 +134,14 @@ func (s *jobScheduler) processSchedule( schedule.SetNextRun(s.env.Now().Add(recheckRunningAfter)) schedule.SetScheduleStatus("delayed due to %d already running", numRunning) s.metrics.RescheduleWait.Inc(1) - return schedule.Update(ctx, s.InternalExecutor, txn) + return scheduleStorage.Update(ctx, schedule) case jobspb.ScheduleDetails_SKIP: if err := schedule.ScheduleNextRun(); err != nil { return err } schedule.SetScheduleStatus("rescheduled due to %d already running", numRunning) s.metrics.RescheduleSkip.Inc(1) - return schedule.Update(ctx, s.InternalExecutor, txn) + return scheduleStorage.Update(ctx, schedule) } } @@ -165,7 +159,7 @@ func (s *jobScheduler) processSchedule( schedule.SetNextRun(time.Time{}) } - if err := schedule.Update(ctx, s.InternalExecutor, txn); err != nil { + if err := scheduleStorage.Update(ctx, schedule); err != nil { return err } @@ -181,14 +175,14 @@ func (s *jobScheduler) processSchedule( schedule.ScheduledRunTime(), schedule.NextRun()) execCtx := logtags.AddTag(ctx, "schedule", schedule.ScheduleID()) - if err := executor.ExecuteJob(execCtx, s.JobExecutionConfig, s.env, schedule, txn); err != nil { + if err := executor.ExecuteJob(execCtx, txn, s.JobExecutionConfig, s.env, schedule); err != nil { return errors.Wrapf(err, "executing schedule %d", schedule.ScheduleID()) } s.metrics.NumStarted.Inc(1) // Persist any mutations to the underlying schedule. - return schedule.Update(ctx, s.InternalExecutor, txn) + return scheduleStorage.Update(ctx, schedule) } type savePointError struct { @@ -235,9 +229,12 @@ func withSavePoint(ctx context.Context, txn *kv.Txn, fn func() error) error { // executeCandidateSchedule attempts to execute schedule. // The schedule is executed only if it's running. func (s *jobScheduler) executeCandidateSchedule( - ctx context.Context, candidate int64, txn *kv.Txn, + ctx context.Context, candidate int64, txn isql.Txn, ) error { - schedule, err := loadCandidateScheduleForExecution(ctx, candidate, s.env, s.InternalExecutor, txn) + sj := scheduledJobStorageTxn{txn} + schedule, err := sj.loadCandidateScheduleForExecution( + ctx, candidate, s.env, + ) if err != nil { if errors.Is(err, errScheduleNotRunnable) { return nil @@ -253,13 +250,15 @@ func (s *jobScheduler) executeCandidateSchedule( return nil } - numRunning, err := lookupNumRunningJobs(ctx, schedule.ScheduleID(), s.env, s.InternalExecutor) + numRunning, err := lookupNumRunningJobs( + ctx, schedule.ScheduleID(), s.env, txn, + ) if err != nil { return err } timeout := schedulerScheduleExecutionTimeout.Get(&s.Settings.SV) - if processErr := withSavePoint(ctx, txn, func() error { + if processErr := withSavePoint(ctx, txn.KV(), func() error { if timeout > 0 { return contextutil.RunWithTimeout( ctx, fmt.Sprintf("process-schedule-%d", schedule.ScheduleID()), timeout, @@ -279,7 +278,7 @@ func (s *jobScheduler) executeCandidateSchedule( "error processing schedule %d: %+v", schedule.ScheduleID(), processErr) // Try updating schedule record to indicate schedule execution error. - if err := withSavePoint(ctx, txn, func() error { + if err := withSavePoint(ctx, txn.KV(), func() error { // Discard changes already made to the schedule, and treat schedule // execution failure the same way we treat job failure. schedule.ClearDirty() @@ -296,7 +295,7 @@ func (s *jobScheduler) executeCandidateSchedule( return err } } - return schedule.Update(ctx, s.InternalExecutor, txn) + return sj.Update(ctx, schedule) }); err != nil { if errors.HasType(err, (*savePointError)(nil)) { return errors.Wrapf(err, @@ -321,7 +320,7 @@ func (s *jobScheduler) executeSchedules(ctx context.Context, maxSchedules int64) findSchedulesStmt := fmt.Sprintf( `SELECT schedule_id FROM %s WHERE next_run < %s ORDER BY random() %s`, s.env.ScheduledJobsTableName(), s.env.NowExpr(), limitClause) - it, err := s.InternalExecutor.QueryIteratorEx( + it, err := s.DB.Executor().QueryIteratorEx( ctx, "find-scheduled-jobs", /*txn=*/ nil, sessiondata.RootUserSessionDataOverride, @@ -341,7 +340,7 @@ func (s *jobScheduler) executeSchedules(ctx context.Context, maxSchedules int64) for ok, err = it.Next(ctx); ok; ok, err = it.Next(ctx) { row := it.Cur() candidateID := int64(tree.MustBeDInt(row[0])) - if err := s.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := s.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { return s.executeCandidateSchedule(ctx, candidateID, txn) }); err != nil { log.Errorf(ctx, "error executing candidate schedule %d: %s", candidateID, err) diff --git a/pkg/jobs/job_scheduler_test.go b/pkg/jobs/job_scheduler_test.go index e3dfb2291d59..c14c7aac0562 100644 --- a/pkg/jobs/job_scheduler_test.go +++ b/pkg/jobs/job_scheduler_test.go @@ -21,13 +21,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" @@ -70,8 +68,9 @@ func TestJobSchedulerReschedulesRunning(t *testing.T) { require.NoError(t, j.SetSchedule("@hourly")) require.NoError(t, - h.cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - require.NoError(t, j.Create(ctx, h.cfg.InternalExecutor, txn)) + h.cfg.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + schedules := ScheduledJobTxn(txn) + require.NoError(t, schedules.Create(ctx, j)) // Lets add few fake runs for this schedule, including terminal and // non terminal states. @@ -126,8 +125,9 @@ func TestJobSchedulerExecutesAfterTerminal(t *testing.T) { require.NoError(t, j.SetSchedule("@hourly")) require.NoError(t, - h.cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - require.NoError(t, j.Create(ctx, h.cfg.InternalExecutor, txn)) + h.cfg.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + schedules := ScheduledJobTxn(txn) + require.NoError(t, schedules.Create(ctx, j)) // Let's add few fake runs for this schedule which are in every // terminal state. @@ -169,8 +169,9 @@ func TestJobSchedulerExecutesAndSchedulesNextRun(t *testing.T) { require.NoError(t, j.SetSchedule("@hourly")) require.NoError(t, - h.cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - require.NoError(t, j.Create(ctx, h.cfg.InternalExecutor, txn)) + h.cfg.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + schedules := ScheduledJobTxn(txn) + require.NoError(t, schedules.Create(ctx, j)) return nil })) @@ -237,11 +238,11 @@ type recordScheduleExecutor struct { } func (n *recordScheduleExecutor) ExecuteJob( - _ context.Context, - _ *scheduledjobs.JobExecutionConfig, - _ scheduledjobs.JobSchedulerEnv, + ctx context.Context, + txn isql.Txn, + cfg *scheduledjobs.JobExecutionConfig, + env scheduledjobs.JobSchedulerEnv, schedule *ScheduledJob, - _ *kv.Txn, ) error { n.executed = append(n.executed, schedule.ScheduleID()) return nil @@ -249,13 +250,12 @@ func (n *recordScheduleExecutor) ExecuteJob( func (n *recordScheduleExecutor) NotifyJobTermination( ctx context.Context, + txn isql.Txn, jobID jobspb.JobID, jobStatus Status, - _ jobspb.Details, + details jobspb.Details, env scheduledjobs.JobSchedulerEnv, schedule *ScheduledJob, - ex sqlutil.InternalExecutor, - txn *kv.Txn, ) error { return nil } @@ -265,12 +265,7 @@ func (n *recordScheduleExecutor) Metrics() metric.Struct { } func (n *recordScheduleExecutor) GetCreateScheduleStatement( - ctx context.Context, - env scheduledjobs.JobSchedulerEnv, - txn *kv.Txn, - descsCol *descs.Collection, - sj *ScheduledJob, - ex sqlutil.InternalExecutor, + ctx context.Context, txn isql.Txn, env scheduledjobs.JobSchedulerEnv, sj *ScheduledJob, ) (string, error) { return "", errors.AssertionFailedf("unimplemented method: 'GetCreateScheduleStatement'") } @@ -309,7 +304,8 @@ func TestJobSchedulerCanBeDisabledWhileSleeping(t *testing.T) { // to verify this. schedule := h.newScheduledJobForExecutor("test_job", executorName, nil) schedule.SetNextRun(h.env.Now()) - require.NoError(t, schedule.Create(ctx, h.cfg.InternalExecutor, nil)) + schedules := ScheduledJobDB(h.cfg.DB) + require.NoError(t, schedules.Create(ctx, schedule)) // Advance time so that daemon picks up test_job. h.env.AdvanceTime(time.Second) @@ -380,10 +376,11 @@ func TestJobSchedulerDaemonProcessesJobs(t *testing.T) { const numJobs = 5 scheduleRunTime := h.env.Now().Add(time.Hour) var scheduleIDs []int64 + schedules := ScheduledJobDB(h.cfg.DB) for i := 0; i < numJobs; i++ { schedule := h.newScheduledJob(t, "test_job", "SELECT 42") schedule.SetNextRun(scheduleRunTime) - require.NoError(t, schedule.Create(ctx, h.cfg.InternalExecutor, nil)) + require.NoError(t, schedules.Create(ctx, schedule)) scheduleIDs = append(scheduleIDs, schedule.ScheduleID()) } @@ -423,10 +420,11 @@ func TestJobSchedulerDaemonHonorsMaxJobsLimit(t *testing.T) { const numJobs = 5 scheduleRunTime := h.env.Now().Add(time.Hour) var scheduleIDs []int64 + schedules := ScheduledJobDB(h.cfg.DB) for i := 0; i < numJobs; i++ { schedule := h.newScheduledJob(t, "test_job", "SELECT 42") schedule.SetNextRun(scheduleRunTime) - require.NoError(t, schedule.Create(ctx, h.cfg.InternalExecutor, nil)) + require.NoError(t, schedules.Create(ctx, schedule)) scheduleIDs = append(scheduleIDs, schedule.ScheduleID()) } @@ -467,25 +465,24 @@ type returnErrorExecutor struct { } func (e *returnErrorExecutor) ExecuteJob( - _ context.Context, - _ *scheduledjobs.JobExecutionConfig, - _ scheduledjobs.JobSchedulerEnv, + ctx context.Context, + txn isql.Txn, + cfg *scheduledjobs.JobExecutionConfig, + env scheduledjobs.JobSchedulerEnv, schedule *ScheduledJob, - _ *kv.Txn, ) error { e.numCalls++ return errors.Newf("error for schedule %d", schedule.ScheduleID()) } func (e *returnErrorExecutor) NotifyJobTermination( - _ context.Context, - _ jobspb.JobID, - _ Status, - _ jobspb.Details, - _ scheduledjobs.JobSchedulerEnv, - _ *ScheduledJob, - _ sqlutil.InternalExecutor, - _ *kv.Txn, + ctx context.Context, + txn isql.Txn, + jobID jobspb.JobID, + jobStatus Status, + details jobspb.Details, + env scheduledjobs.JobSchedulerEnv, + schedule *ScheduledJob, ) error { return nil } @@ -495,12 +492,7 @@ func (e *returnErrorExecutor) Metrics() metric.Struct { } func (e *returnErrorExecutor) GetCreateScheduleStatement( - ctx context.Context, - env scheduledjobs.JobSchedulerEnv, - txn *kv.Txn, - descsCol *descs.Collection, - sj *ScheduledJob, - ex sqlutil.InternalExecutor, + ctx context.Context, txn isql.Txn, env scheduledjobs.JobSchedulerEnv, sj *ScheduledJob, ) (string, error) { return "", errors.AssertionFailedf("unimplemented method: 'GetCreateScheduleStatement'") } @@ -522,10 +514,11 @@ func TestJobSchedulerToleratesBadSchedules(t *testing.T) { // Create few one-off schedules. const numJobs = 5 scheduleRunTime := h.env.Now().Add(time.Hour) + schedules := ScheduledJobDB(h.cfg.DB) for i := 0; i < numJobs; i++ { s := h.newScheduledJobForExecutor("schedule", executorName, nil) s.SetNextRun(scheduleRunTime) - require.NoError(t, s.Create(ctx, h.cfg.InternalExecutor, nil)) + require.NoError(t, schedules.Create(ctx, s)) } h.env.SetTime(scheduleRunTime.Add(time.Second)) daemon := newJobScheduler(h.cfg, h.env, metric.NewRegistry()) @@ -549,7 +542,8 @@ func TestJobSchedulerRetriesFailed(t *testing.T) { daemon := newJobScheduler(h.cfg, h.env, metric.NewRegistry()) schedule := h.newScheduledJobForExecutor("schedule", executorName, nil) - require.NoError(t, schedule.Create(ctx, h.cfg.InternalExecutor, nil)) + schedules := ScheduledJobDB(h.cfg.DB) + require.NoError(t, schedules.Create(ctx, schedule)) startTime := h.env.Now() execTime := startTime.Add(time.Hour).Add(time.Second) @@ -568,7 +562,7 @@ func TestJobSchedulerRetriesFailed(t *testing.T) { h.env.SetTime(startTime) schedule.SetScheduleDetails(jobspb.ScheduleDetails{OnError: tc.onError}) require.NoError(t, schedule.SetSchedule("@hourly")) - require.NoError(t, schedule.Update(ctx, h.cfg.InternalExecutor, nil)) + require.NoError(t, schedules.Update(ctx, schedule)) h.env.SetTime(execTime) require.NoError(t, daemon.executeSchedules(ctx, 1)) @@ -596,7 +590,7 @@ func TestJobSchedulerDaemonUsesSystemTables(t *testing.T) { Knobs: base.TestingKnobs{JobsTestingKnobs: knobs}, }) defer s.Stopper().Stop(ctx) - + schedules := ScheduledJobDB(s.InternalDB().(isql.DB)) runner := sqlutils.MakeSQLRunner(db) runner.Exec(t, "CREATE TABLE defaultdb.foo(a int)") @@ -609,8 +603,7 @@ func TestJobSchedulerDaemonUsesSystemTables(t *testing.T) { &jobspb.SqlStatementExecutionArg{Statement: "INSERT INTO defaultdb.foo VALUES (1), (2), (3)"}) require.NoError(t, err) schedule.SetExecutionDetails(InlineExecutorName, jobspb.ExecutionArguments{Args: any}) - require.NoError(t, schedule.Create( - ctx, s.InternalExecutor().(sqlutil.InternalExecutor), nil)) + require.NoError(t, schedules.Create(ctx, schedule)) // Verify the schedule ran. testutils.SucceedsSoon(t, func() error { @@ -629,15 +622,15 @@ type txnConflictExecutor struct { func (e *txnConflictExecutor) ExecuteJob( ctx context.Context, + txn isql.Txn, cfg *scheduledjobs.JobExecutionConfig, env scheduledjobs.JobSchedulerEnv, schedule *ScheduledJob, - txn *kv.Txn, ) error { // Read number of rows -- this count will be used when updating // a single row in the table. - row, err := cfg.InternalExecutor.QueryRow( - ctx, "txn-executor", txn, "SELECT count(*) FROM defaultdb.foo") + row, err := txn.QueryRow( + ctx, "txn-executor", txn.KV(), "SELECT count(*) FROM defaultdb.foo") if err != nil { return err } @@ -651,20 +644,19 @@ func (e *txnConflictExecutor) ExecuteJob( } // Try updating. - _, err = cfg.InternalExecutor.Exec( - ctx, "txn-executor", txn, "UPDATE defaultdb.foo SET b=b+$1 WHERE a=1", cnt) + _, err = txn.Exec( + ctx, "txn-executor", txn.KV(), "UPDATE defaultdb.foo SET b=b+$1 WHERE a=1", cnt) return err } func (e *txnConflictExecutor) NotifyJobTermination( ctx context.Context, + txn isql.Txn, jobID jobspb.JobID, jobStatus Status, details jobspb.Details, env scheduledjobs.JobSchedulerEnv, schedule *ScheduledJob, - ex sqlutil.InternalExecutor, - txn *kv.Txn, ) error { return nil } @@ -674,12 +666,7 @@ func (e *txnConflictExecutor) Metrics() metric.Struct { } func (e *txnConflictExecutor) GetCreateScheduleStatement( - ctx context.Context, - env scheduledjobs.JobSchedulerEnv, - txn *kv.Txn, - descsCol *descs.Collection, - sj *ScheduledJob, - ex sqlutil.InternalExecutor, + ctx context.Context, txn isql.Txn, env scheduledjobs.JobSchedulerEnv, sj *ScheduledJob, ) (string, error) { return "", errors.AssertionFailedf("unimplemented method: 'GetCreateScheduleStatement'") } @@ -706,6 +693,7 @@ INSERT INTO defaultdb.foo VALUES(1, 1) } defer registerScopedScheduledJobExecutor(execName, ex)() + schedules := ScheduledJobDB(h.cfg.DB) // Setup schedule with our test executor. schedule := NewScheduledJob(h.env) schedule.SetScheduleLabel("test schedule") @@ -713,8 +701,7 @@ INSERT INTO defaultdb.foo VALUES(1, 1) nextRun := h.env.Now().Add(time.Hour) schedule.SetNextRun(nextRun) schedule.SetExecutionDetails(execName, jobspb.ExecutionArguments{}) - require.NoError(t, schedule.Create( - ctx, h.cfg.InternalExecutor, nil)) + require.NoError(t, schedules.Create(ctx, schedule)) // Execute schedule on another thread. g := ctxgroup.WithContext(context.Background()) @@ -725,7 +712,7 @@ INSERT INTO defaultdb.foo VALUES(1, 1) }) require.NoError(t, - h.cfg.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { + h.cfg.DB.Txn(context.Background(), func(ctx context.Context, txn isql.Txn) error { // Let schedule start running, and wait for it to be ready to update. h.env.SetTime(nextRun.Add(time.Second)) close(ready) @@ -734,11 +721,11 @@ INSERT INTO defaultdb.foo VALUES(1, 1) // Before we let schedule proceed, update the number of rows in the table. // This should cause transaction in schedule to restart, but we don't // expect to see any errors in the schedule status. - if _, err := h.cfg.InternalExecutor.Exec(ctx, "update-a", txn, + if _, err := txn.Exec(ctx, "update-a", txn.KV(), `UPDATE defaultdb.foo SET b=3 WHERE a=1`); err != nil { return err } - if _, err := h.cfg.InternalExecutor.Exec(ctx, "add-row", txn, + if _, err := txn.Exec(ctx, "add-row", txn.KV(), `INSERT INTO defaultdb.foo VALUES (123, 123)`); err != nil { return err } @@ -762,10 +749,10 @@ var _ ScheduledJobExecutor = (*blockUntilCancelledExecutor)(nil) func (e *blockUntilCancelledExecutor) ExecuteJob( ctx context.Context, + txn isql.Txn, cfg *scheduledjobs.JobExecutionConfig, env scheduledjobs.JobSchedulerEnv, schedule *ScheduledJob, - txn *kv.Txn, ) error { done := func() {} e.once.Do(func() { @@ -779,13 +766,12 @@ func (e *blockUntilCancelledExecutor) ExecuteJob( func (e *blockUntilCancelledExecutor) NotifyJobTermination( ctx context.Context, + txn isql.Txn, jobID jobspb.JobID, jobStatus Status, details jobspb.Details, env scheduledjobs.JobSchedulerEnv, schedule *ScheduledJob, - ex sqlutil.InternalExecutor, - txn *kv.Txn, ) error { return nil } @@ -795,12 +781,7 @@ func (e *blockUntilCancelledExecutor) Metrics() metric.Struct { } func (e *blockUntilCancelledExecutor) GetCreateScheduleStatement( - ctx context.Context, - env scheduledjobs.JobSchedulerEnv, - txn *kv.Txn, - descsCol *descs.Collection, - sj *ScheduledJob, - ex sqlutil.InternalExecutor, + ctx context.Context, txn isql.Txn, env scheduledjobs.JobSchedulerEnv, sj *ScheduledJob, ) (string, error) { return "", errors.AssertionFailedf("unexpected GetCreateScheduleStatement call") } @@ -831,6 +812,8 @@ func TestDisablingSchedulerCancelsSchedules(t *testing.T) { ts, _, _ := serverutils.StartServer(t, base.TestServerArgs{Knobs: knobs}) defer ts.Stopper().Stop(context.Background()) + schedules := ScheduledJobDB(ts.InternalDB().(isql.DB)) + // Create schedule which blocks until its context cancelled due to disabled scheduler. // We only need to create one schedule. This is because // scheduler executes its batch of schedules sequentially, and so, creating more @@ -840,8 +823,7 @@ func TestDisablingSchedulerCancelsSchedules(t *testing.T) { schedule.SetOwner(username.TestUserName()) schedule.SetNextRun(timeutil.Now()) schedule.SetExecutionDetails(executorName, jobspb.ExecutionArguments{}) - require.NoError(t, schedule.Create( - context.Background(), ts.InternalExecutor().(sqlutil.InternalExecutor), nil)) + require.NoError(t, schedules.Create(context.Background(), schedule)) readWithTimeout(t, ex.started) // Disable scheduler and verify all running schedules were cancelled. @@ -865,6 +847,7 @@ func TestSchedulePlanningRespectsTimeout(t *testing.T) { } ts, _, _ := serverutils.StartServer(t, base.TestServerArgs{Knobs: knobs}) defer ts.Stopper().Stop(context.Background()) + schedules := ScheduledJobDB(ts.InternalDB().(isql.DB)) // timeout must be long enough to work when running under stress. schedulerScheduleExecutionTimeout.Override( @@ -878,8 +861,7 @@ func TestSchedulePlanningRespectsTimeout(t *testing.T) { schedule.SetOwner(username.TestUserName()) schedule.SetNextRun(timeutil.Now()) schedule.SetExecutionDetails(executorName, jobspb.ExecutionArguments{}) - require.NoError(t, schedule.Create( - context.Background(), ts.InternalExecutor().(sqlutil.InternalExecutor), nil)) + require.NoError(t, schedules.Create(context.Background(), schedule)) readWithTimeout(t, ex.started) readWithTimeout(t, ex.done) diff --git a/pkg/jobs/jobs.go b/pkg/jobs/jobs.go index cf1b86a4ceb7..daa860c426ff 100644 --- a/pkg/jobs/jobs.go +++ b/pkg/jobs/jobs.go @@ -23,11 +23,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/protoreflect" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/json" @@ -256,14 +256,14 @@ func (j *Job) taskName() string { // Started marks the tracked job as started by updating status to running in // jobs table. -func (j *Job) started(ctx context.Context, txn *kv.Txn) error { - return j.Update(ctx, txn, func(_ *kv.Txn, md JobMetadata, ju *JobUpdater) error { +func (u Updater) started(ctx context.Context) error { + return u.Update(ctx, func(_ isql.Txn, md JobMetadata, ju *JobUpdater) error { if md.Status != StatusPending && md.Status != StatusRunning { return errors.Errorf("job with status %s cannot be marked started", md.Status) } if md.Payload.StartedMicros == 0 { ju.UpdateStatus(StatusRunning) - md.Payload.StartedMicros = timeutil.ToUnixMicros(j.registry.clock.Now().GoTime()) + md.Payload.StartedMicros = timeutil.ToUnixMicros(u.now()) ju.UpdatePayload(md.Payload) } // md.RunStats can be nil because of the timing of version-update when exponential-backoff @@ -274,7 +274,7 @@ func (j *Job) started(ctx context.Context, txn *kv.Txn) error { // // TODO (sajjad): Update this comment after version 22.2 has been released. if md.RunStats != nil { - ju.UpdateRunStats(md.RunStats.NumRuns+1, j.registry.clock.Now().GoTime()) + ju.UpdateRunStats(md.RunStats.NumRuns+1, u.now()) } return nil }) @@ -282,15 +282,15 @@ func (j *Job) started(ctx context.Context, txn *kv.Txn) error { // CheckStatus verifies the status of the job and returns an error if the job's // status isn't Running or Reverting. -func (j *Job) CheckStatus(ctx context.Context, txn *kv.Txn) error { - return j.Update(ctx, txn, func(_ *kv.Txn, md JobMetadata, _ *JobUpdater) error { +func (u Updater) CheckStatus(ctx context.Context) error { + return u.Update(ctx, func(_ isql.Txn, md JobMetadata, _ *JobUpdater) error { return md.CheckRunningOrReverting() }) } // CheckTerminalStatus returns true if the job is in a terminal status. -func (j *Job) CheckTerminalStatus(ctx context.Context, txn *kv.Txn) bool { - err := j.Update(ctx, txn, func(_ *kv.Txn, md JobMetadata, _ *JobUpdater) error { +func (u Updater) CheckTerminalStatus(ctx context.Context) bool { + err := u.Update(ctx, func(_ isql.Txn, md JobMetadata, _ *JobUpdater) error { if !md.Status.Terminal() { return &InvalidStatusError{md.ID, md.Status, "checking that job status is success", md.Payload.Error} } @@ -303,10 +303,8 @@ func (j *Job) CheckTerminalStatus(ctx context.Context, txn *kv.Txn) bool { // RunningStatus updates the detailed status of a job currently in progress. // It sets the job's RunningStatus field to the value returned by runningStatusFn // and persists runningStatusFn's modifications to the job's details, if any. -func (j *Job) RunningStatus( - ctx context.Context, txn *kv.Txn, runningStatusFn RunningStatusFn, -) error { - return j.Update(ctx, txn, func(_ *kv.Txn, md JobMetadata, ju *JobUpdater) error { +func (u Updater) RunningStatus(ctx context.Context, runningStatusFn RunningStatusFn) error { + return u.Update(ctx, func(_ isql.Txn, md JobMetadata, ju *JobUpdater) error { if err := md.CheckRunningOrReverting(); err != nil { return err } @@ -347,10 +345,8 @@ func FractionUpdater(f float32) FractionProgressedFn { // // Jobs for which progress computations do not depend on their details can // use the FractionUpdater helper to construct a ProgressedFn. -func (j *Job) FractionProgressed( - ctx context.Context, txn *kv.Txn, progressedFn FractionProgressedFn, -) error { - return j.Update(ctx, txn, func(_ *kv.Txn, md JobMetadata, ju *JobUpdater) error { +func (u Updater) FractionProgressed(ctx context.Context, progressedFn FractionProgressedFn) error { + return u.Update(ctx, func(_ isql.Txn, md JobMetadata, ju *JobUpdater) error { if err := md.CheckRunningOrReverting(); err != nil { return err } @@ -362,7 +358,7 @@ func (j *Job) FractionProgressed( if fractionCompleted < 0.0 || fractionCompleted > 1.0 { return errors.Errorf( "job %d: fractionCompleted %f is outside allowable range [0.0, 1.0]", - j.ID(), fractionCompleted, + u.j.ID(), fractionCompleted, ) } md.Progress.Progress = &jobspb.Progress_FractionCompleted{ @@ -376,10 +372,8 @@ func (j *Job) FractionProgressed( // paused sets the status of the tracked job to paused. It is called by the // registry adoption loop by the node currently running a job to move it from // PauseRequested to paused. -func (j *Job) paused( - ctx context.Context, txn *kv.Txn, fn func(context.Context, *kv.Txn) error, -) error { - return j.Update(ctx, txn, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { +func (u Updater) paused(ctx context.Context, fn func(context.Context, isql.Txn) error) error { + return u.Update(ctx, func(txn isql.Txn, md JobMetadata, ju *JobUpdater) error { if md.Status == StatusPaused { // Already paused - do nothing. return nil @@ -397,12 +391,12 @@ func (j *Job) paused( }) } -// unpaused sets the status of the tracked job to running or reverting iff the +// Unpaused sets the status of the tracked job to running or reverting iff the // job is currently paused. It does not directly resume the job; rather, it // expires the job's lease so that a Registry adoption loop detects it and // resumes it. -func (j *Job) unpaused(ctx context.Context, txn *kv.Txn) error { - return j.Update(ctx, txn, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { +func (u Updater) Unpaused(ctx context.Context) error { + return u.Update(ctx, func(txn isql.Txn, md JobMetadata, ju *JobUpdater) error { if md.Status == StatusRunning || md.Status == StatusReverting { // Already resumed - do nothing. return nil @@ -422,18 +416,16 @@ func (j *Job) unpaused(ctx context.Context, txn *kv.Txn) error { }) } -// cancelRequested sets the status of the tracked job to cancel-requested. It +// CancelRequested sets the status of the tracked job to cancel-requested. It // does not directly cancel the job; like job.Paused, it expects the job to call // job.Progressed soon, observe a "job is cancel-requested" error, and abort. // Further the node the runs the job will actively cancel it when it notices // that it is in state StatusCancelRequested and will move it to state // StatusReverting. -func (j *Job) cancelRequested( - ctx context.Context, txn *kv.Txn, fn func(context.Context, *kv.Txn) error, -) error { - return j.Update(ctx, txn, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { +func (u Updater) CancelRequested(ctx context.Context) error { + return u.Update(ctx, func(txn isql.Txn, md JobMetadata, ju *JobUpdater) error { if md.Payload.Noncancelable { - return errors.Newf("job %d: not cancelable", j.ID()) + return errors.Newf("job %d: not cancelable", md.ID) } if md.Status == StatusCancelRequested || md.Status == StatusCanceled { return nil @@ -444,12 +436,7 @@ func (j *Job) cancelRequested( if md.Status == StatusPaused && md.Payload.FinalResumeError != nil { decodedErr := errors.DecodeError(ctx, *md.Payload.FinalResumeError) return errors.Wrapf(decodedErr, "job %d is paused and has non-nil FinalResumeError "+ - "hence cannot be canceled and should be reverted", j.ID()) - } - if fn != nil { - if err := fn(ctx, txn); err != nil { - return err - } + "hence cannot be canceled and should be reverted", md.ID) } ju.UpdateStatus(StatusCancelRequested) return nil @@ -459,17 +446,19 @@ func (j *Job) cancelRequested( // onPauseRequestFunc is a function used to perform action on behalf of a job // implementation when a pause is requested. type onPauseRequestFunc func( - ctx context.Context, planHookState interface{}, txn *kv.Txn, progress *jobspb.Progress, + ctx context.Context, planHookState interface{}, txn isql.Txn, progress *jobspb.Progress, ) error -// PauseRequested sets the status of the tracked job to pause-requested. It does -// not directly pause the job; it expects the node that runs the job will +// PauseRequestedWithFunc sets the status of the tracked job to pause-requested. +// It does not directly pause the job; it expects the node that runs the job will // actively cancel it when it notices that it is in state StatusPauseRequested -// and will move it to state StatusPaused. -func (j *Job) PauseRequested( - ctx context.Context, txn *kv.Txn, fn onPauseRequestFunc, reason string, +// and will move it to state StatusPaused. If a function is passed, it will be +// used to update the job state. If the job has builtin logic to run upon +// pausing, it will be ignored; use PauseRequested if you want that logic to run. +func (u Updater) PauseRequestedWithFunc( + ctx context.Context, fn onPauseRequestFunc, reason string, ) error { - return j.Update(ctx, txn, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { + return u.Update(ctx, func(txn isql.Txn, md JobMetadata, ju *JobUpdater) error { if md.Status == StatusPauseRequested || md.Status == StatusPaused { return nil } @@ -477,7 +466,7 @@ func (j *Job) PauseRequested( return fmt.Errorf("job with status %s cannot be requested to be paused", md.Status) } if fn != nil { - execCtx, cleanup := j.registry.execCtx("pause request", j.Payload().UsernameProto.Decode()) + execCtx, cleanup := u.j.registry.execCtx("pause request", md.Payload.UsernameProto.Decode()) defer cleanup() if err := fn(ctx, execCtx, txn, md.Progress); err != nil { return err @@ -487,16 +476,31 @@ func (j *Job) PauseRequested( ju.UpdateStatus(StatusPauseRequested) md.Payload.PauseReason = reason ju.UpdatePayload(md.Payload) - log.Infof(ctx, "job %d: pause requested recorded with reason %s", j.ID(), reason) + log.Infof(ctx, "job %d: pause requested recorded with reason %s", md.ID, reason) return nil }) } +// PauseRequested is like PausedRequestedWithFunc but uses the default +// implementation of OnPauseRequested if the underlying job is a +// PauseRequester. +func (u Updater) PauseRequested(ctx context.Context, reason string) error { + resumer, err := u.j.registry.createResumer(u.j, u.j.registry.settings) + if err != nil { + return err + } + var fn onPauseRequestFunc + if pr, ok := resumer.(PauseRequester); ok { + fn = pr.OnPauseRequest + } + return u.PauseRequestedWithFunc(ctx, fn, reason) +} + // reverted sets the status of the tracked job to reverted. -func (j *Job) reverted( - ctx context.Context, txn *kv.Txn, err error, fn func(context.Context, *kv.Txn) error, +func (u Updater) reverted( + ctx context.Context, err error, fn func(context.Context, isql.Txn) error, ) error { - return j.Update(ctx, txn, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { + return u.Update(ctx, func(txn isql.Txn, md JobMetadata, ju *JobUpdater) error { if md.Status != StatusReverting && md.Status != StatusCancelRequested && md.Status != StatusRunning && @@ -537,50 +541,37 @@ func (j *Job) reverted( // Reset the number of runs to speed up reverting. numRuns = 1 } - ju.UpdateRunStats(numRuns, j.registry.clock.Now().GoTime()) + ju.UpdateRunStats(numRuns, u.now()) } return nil }) } // Canceled sets the status of the tracked job to cancel. -func (j *Job) canceled( - ctx context.Context, txn *kv.Txn, fn func(context.Context, *kv.Txn) error, -) error { - return j.Update(ctx, txn, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { +func (u Updater) canceled(ctx context.Context) error { + return u.Update(ctx, func(txn isql.Txn, md JobMetadata, ju *JobUpdater) error { if md.Status == StatusCanceled { return nil } if md.Status != StatusReverting { return fmt.Errorf("job with status %s cannot be requested to be canceled", md.Status) } - if fn != nil { - if err := fn(ctx, txn); err != nil { - return err - } - } ju.UpdateStatus(StatusCanceled) - md.Payload.FinishedMicros = timeutil.ToUnixMicros(j.registry.clock.Now().GoTime()) + md.Payload.FinishedMicros = timeutil.ToUnixMicros(u.j.registry.clock.Now().GoTime()) ju.UpdatePayload(md.Payload) return nil }) } // Failed marks the tracked job as having failed with the given error. -func (j *Job) failed( - ctx context.Context, txn *kv.Txn, err error, fn func(context.Context, *kv.Txn) error, -) error { - return j.Update(ctx, txn, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { +func (u Updater) failed(ctx context.Context, err error) error { + return u.Update(ctx, func(txn isql.Txn, md JobMetadata, ju *JobUpdater) error { // TODO(spaskob): should we fail if the terminal state is not StatusFailed? if md.Status.Terminal() { // Already done - do nothing. return nil } - if fn != nil { - if err := fn(ctx, txn); err != nil { - return err - } - } + // TODO (sajjad): We don't have any checks for state transitions here. Consequently, // a pause-requested job can transition to failed, which may or may not be // acceptable depending on the job. @@ -598,7 +589,7 @@ func (j *Job) failed( } md.Payload.Error = errStr - md.Payload.FinishedMicros = timeutil.ToUnixMicros(j.registry.clock.Now().GoTime()) + md.Payload.FinishedMicros = timeutil.ToUnixMicros(u.now()) ju.UpdatePayload(md.Payload) return nil }) @@ -606,10 +597,10 @@ func (j *Job) failed( // RevertFailed marks the tracked job as having failed during revert with the // given error. Manual cleanup is required when the job is in this state. -func (j *Job) revertFailed( - ctx context.Context, txn *kv.Txn, err error, fn func(context.Context, *kv.Txn) error, +func (u Updater) revertFailed( + ctx context.Context, err error, fn func(context.Context, isql.Txn) error, ) error { - return j.Update(ctx, txn, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { + return u.Update(ctx, func(txn isql.Txn, md JobMetadata, ju *JobUpdater) error { if md.Status != StatusReverting { return fmt.Errorf("job with status %s cannot fail during a revert", md.Status) } @@ -619,7 +610,7 @@ func (j *Job) revertFailed( } } ju.UpdateStatus(StatusRevertFailed) - md.Payload.FinishedMicros = timeutil.ToUnixMicros(j.registry.clock.Now().GoTime()) + md.Payload.FinishedMicros = timeutil.ToUnixMicros(u.j.registry.clock.Now().GoTime()) md.Payload.Error = err.Error() ju.UpdatePayload(md.Payload) return nil @@ -628,10 +619,8 @@ func (j *Job) revertFailed( // succeeded marks the tracked job as having succeeded and sets its fraction // completed to 1.0. -func (j *Job) succeeded( - ctx context.Context, txn *kv.Txn, fn func(context.Context, *kv.Txn) error, -) error { - return j.Update(ctx, txn, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { +func (u Updater) succeeded(ctx context.Context, fn func(context.Context, isql.Txn) error) error { + return u.Update(ctx, func(txn isql.Txn, md JobMetadata, ju *JobUpdater) error { if md.Status == StatusSucceeded { return nil } @@ -644,7 +633,7 @@ func (j *Job) succeeded( } } ju.UpdateStatus(StatusSucceeded) - md.Payload.FinishedMicros = timeutil.ToUnixMicros(j.registry.clock.Now().GoTime()) + md.Payload.FinishedMicros = timeutil.ToUnixMicros(u.j.registry.clock.Now().GoTime()) ju.UpdatePayload(md.Payload) md.Progress.Progress = &jobspb.Progress_FractionCompleted{ FractionCompleted: 1.0, @@ -655,8 +644,8 @@ func (j *Job) succeeded( } // SetDetails sets the details field of the currently running tracked job. -func (j *Job) SetDetails(ctx context.Context, txn *kv.Txn, details interface{}) error { - return j.Update(ctx, txn, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { +func (u Updater) SetDetails(ctx context.Context, details interface{}) error { + return u.Update(ctx, func(txn isql.Txn, md JobMetadata, ju *JobUpdater) error { if err := md.CheckRunningOrReverting(); err != nil { return err } @@ -667,8 +656,8 @@ func (j *Job) SetDetails(ctx context.Context, txn *kv.Txn, details interface{}) } // SetProgress sets the details field of the currently running tracked job. -func (j *Job) SetProgress(ctx context.Context, txn *kv.Txn, details interface{}) error { - return j.Update(ctx, txn, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { +func (u Updater) SetProgress(ctx context.Context, details interface{}) error { + return u.Update(ctx, func(txn isql.Txn, md JobMetadata, ju *JobUpdater) error { if err := md.CheckRunningOrReverting(); err != nil { return err } @@ -713,19 +702,9 @@ func (j *Job) FractionCompleted() float32 { return progress.GetFractionCompleted() } -// MakeSessionBoundInternalExecutor makes an internal executor, for use in a job -// resumer, and sets it with the provided session data. See the comment on -// sessionBoundInternalExecutorFactory for a more detailed explanation of why -// this exists. -func (j *Job) MakeSessionBoundInternalExecutor( - sd *sessiondata.SessionData, -) sqlutil.InternalExecutor { - return j.registry.internalExecutorFactory.NewInternalExecutor(sd) -} - -// GetInternalExecutorFactory returns the internal executor factory. -func (j *Job) GetInternalExecutorFactory() sqlutil.InternalExecutorFactory { - return j.registry.internalExecutorFactory +// GetInternalDB returns the internal executor factory. +func (j *Job) GetInternalDB() isql.DB { + return j.registry.internalDB } // MarkIdle marks the job as Idle. Idleness should not be toggled frequently @@ -734,17 +713,6 @@ func (j *Job) MarkIdle(isIdle bool) { j.registry.MarkIdle(j, isIdle) } -func (j *Job) runInTxn( - ctx context.Context, txn *kv.Txn, fn func(context.Context, *kv.Txn) error, -) error { - if txn != nil { - // Don't run fn in a retry loop because we need retryable errors to - // propagate up to the transaction's properly-scoped retry loop. - return fn(ctx, txn) - } - return j.registry.db.Txn(ctx, fn) -} - // JobNotFoundError is returned from load when the job does not exist. type JobNotFoundError struct { jobID jobspb.JobID @@ -764,7 +732,15 @@ func HasJobNotFoundError(err error) bool { return errors.HasType(err, (*JobNotFoundError)(nil)) } -func (j *Job) load(ctx context.Context, txn *kv.Txn) error { +func (u Updater) load(ctx context.Context) (retErr error) { + if u.txn == nil { + return u.j.registry.internalDB.Txn(ctx, func( + ctx context.Context, txn isql.Txn, + ) error { + u.txn = txn + return u.load(ctx) + }) + } ctx, sp := tracing.ChildSpan(ctx, "load-job") defer sp.Finish() @@ -772,51 +748,54 @@ func (j *Job) load(ctx context.Context, txn *kv.Txn) error { var progress *jobspb.Progress var createdBy *CreatedByInfo var status Status - - if err := j.runInTxn(ctx, txn, func(ctx context.Context, txn *kv.Txn) error { - const ( - queryNoSessionID = "SELECT payload, progress, created_by_type, created_by_id, status FROM system.jobs WHERE id = $1" - queryWithSessionID = queryNoSessionID + " AND claim_session_id = $2" - ) - sess := sessiondata.RootUserSessionDataOverride - - var err error - var row tree.Datums - if j.session == nil { - row, err = j.registry.ex.QueryRowEx(ctx, "load-job-query", txn, sess, - queryNoSessionID, j.ID()) - } else { - row, err = j.registry.ex.QueryRowEx(ctx, "load-job-query", txn, sess, - queryWithSessionID, j.ID(), j.session.ID().UnsafeBytes()) - } - if err != nil { - return err - } - if row == nil { - return &JobNotFoundError{jobID: j.ID()} - } - payload, err = UnmarshalPayload(row[0]) - if err != nil { - return err - } - progress, err = UnmarshalProgress(row[1]) - if err != nil { - return err - } - createdBy, err = unmarshalCreatedBy(row[2], row[3]) - if err != nil { - return err + j := u.j + defer func() { + if retErr != nil { + return } - status, err = unmarshalStatus(row[4]) + j.mu.Lock() + defer j.mu.Unlock() + j.mu.payload = *payload + j.mu.progress = *progress + j.mu.status = status + j.createdBy = createdBy + }() + + const ( + queryNoSessionID = "SELECT payload, progress, created_by_type, created_by_id, status FROM system.jobs WHERE id = $1" + queryWithSessionID = queryNoSessionID + " AND claim_session_id = $2" + ) + sess := sessiondata.RootUserSessionDataOverride + + var err error + var row tree.Datums + if j.session == nil { + row, err = u.txn.QueryRowEx(ctx, "load-job-query", u.txn.KV(), sess, + queryNoSessionID, j.ID()) + } else { + row, err = u.txn.QueryRowEx(ctx, "load-job-query", u.txn.KV(), sess, + queryWithSessionID, j.ID(), j.session.ID().UnsafeBytes()) + } + if err != nil { return err - }); err != nil { + } + if row == nil { + return &JobNotFoundError{jobID: j.ID()} + } + payload, err = UnmarshalPayload(row[0]) + if err != nil { return err } - j.mu.payload = *payload - j.mu.progress = *progress - j.mu.status = status - j.createdBy = createdBy - return nil + progress, err = UnmarshalProgress(row[1]) + if err != nil { + return err + } + createdBy, err = unmarshalCreatedBy(row[2], row[3]) + if err != nil { + return err + } + status, err = unmarshalStatus(row[4]) + return err } // UnmarshalPayload unmarshals and returns the Payload encoded in the input @@ -992,7 +971,7 @@ func (sj *StartableJob) Cancel(ctx context.Context) error { sj.registry.unregister(sj.ID()) } }() - return sj.registry.CancelRequested(ctx, nil, sj.ID()) + return sj.Job.NoTxn().CancelRequested(ctx) } func (sj *StartableJob) recordStart() (alreadyStarted bool) { diff --git a/pkg/jobs/jobs_test.go b/pkg/jobs/jobs_test.go index d74acd80aa7d..d961206a0058 100644 --- a/pkg/jobs/jobs_test.go +++ b/pkg/jobs/jobs_test.go @@ -32,7 +32,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" @@ -42,9 +41,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -200,7 +199,7 @@ type registryTestSuite struct { } func noopPauseRequestFunc( - ctx context.Context, planHookState interface{}, txn *kv.Txn, progress *jobspb.Progress, + ctx context.Context, planHookState interface{}, txn isql.Txn, progress *jobspb.Progress, ) error { return nil } @@ -289,7 +288,7 @@ func (rts *registryTestSuite) setUp(t *testing.T) { case err := <-rts.resumeCh: return err case <-rts.progressCh: - err := job.FractionProgressed(rts.ctx, nil /* txn */, jobs.FractionUpdater(0)) + err := job.NoTxn().FractionProgressed(rts.ctx, jobs.FractionUpdater(0)) if err != nil { return err } @@ -332,7 +331,7 @@ func (rts *registryTestSuite) setUp(t *testing.T) { rts.mu.a.Success = true return rts.successErr }, - PauseRequest: func(ctx context.Context, execCfg interface{}, txn *kv.Txn, progress *jobspb.Progress) error { + PauseRequest: func(ctx context.Context, execCfg interface{}, txn isql.Txn, progress *jobspb.Progress) error { return rts.onPauseRequest(ctx, execCfg, txn, progress) }, } @@ -362,7 +361,7 @@ func (rts *registryTestSuite) check(t *testing.T, expectedStatus jobs.Status) { if expectedStatus == "" { return nil } - st, err := rts.job.TestingCurrentStatus(rts.ctx, nil /* txn */) + st, err := rts.job.TestingCurrentStatus(rts.ctx) if err != nil { return err } @@ -373,6 +372,10 @@ func (rts *registryTestSuite) check(t *testing.T, expectedStatus jobs.Status) { }) } +func (rts *registryTestSuite) idb() isql.DB { + return rts.s.InternalDB().(isql.DB) +} + func TestRegistryLifecycle(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -382,7 +385,7 @@ func TestRegistryLifecycle(t *testing.T) { rts.setUp(t) defer rts.tearDown() - j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -404,7 +407,7 @@ func TestRegistryLifecycle(t *testing.T) { rts.setUp(t) defer rts.tearDown() - j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -425,7 +428,7 @@ func TestRegistryLifecycle(t *testing.T) { rts.setUp(t) defer rts.tearDown() - j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -459,7 +462,7 @@ func TestRegistryLifecycle(t *testing.T) { rts.setUp(t) defer rts.tearDown() - j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -497,7 +500,7 @@ func TestRegistryLifecycle(t *testing.T) { rts := registryTestSuite{} rts.setUp(t) defer rts.tearDown() - j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -524,7 +527,7 @@ func TestRegistryLifecycle(t *testing.T) { rts := registryTestSuite{} rts.setUp(t) defer rts.tearDown() - j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -550,7 +553,7 @@ func TestRegistryLifecycle(t *testing.T) { rts.setUp(t) defer rts.tearDown() - j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -580,7 +583,7 @@ func TestRegistryLifecycle(t *testing.T) { rts.setUp(t) defer rts.tearDown() - j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -619,7 +622,7 @@ func TestRegistryLifecycle(t *testing.T) { rts := registryTestSuite{} rts.setUp(t) defer rts.tearDown() - job, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + job, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -770,7 +773,7 @@ func TestRegistryLifecycle(t *testing.T) { rts.setUp(t) defer rts.tearDown() - j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -808,7 +811,7 @@ func TestRegistryLifecycle(t *testing.T) { rts.setUp(t) defer rts.tearDown() - j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -878,7 +881,7 @@ func TestRegistryLifecycle(t *testing.T) { return nil } - j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -923,7 +926,7 @@ func TestRegistryLifecycle(t *testing.T) { defer rts.tearDown() // Make marking success fail. - j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -962,12 +965,12 @@ func TestRegistryLifecycle(t *testing.T) { madeUpSpans := []roachpb.Span{ {Key: roachpb.Key("foo")}, } - rts.onPauseRequest = func(ctx context.Context, planHookState interface{}, txn *kv.Txn, progress *jobspb.Progress) error { + rts.onPauseRequest = func(ctx context.Context, planHookState interface{}, txn isql.Txn, progress *jobspb.Progress) error { progress.GetImport().SpanProgress = madeUpSpans return nil } - job, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + job, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) require.NoError(t, err) rts.job = job @@ -998,11 +1001,11 @@ func TestRegistryLifecycle(t *testing.T) { rts.setUp(t) defer rts.tearDown() - rts.onPauseRequest = func(ctx context.Context, planHookState interface{}, txn *kv.Txn, progress *jobspb.Progress) error { + rts.onPauseRequest = func(ctx context.Context, planHookState interface{}, txn isql.Txn, progress *jobspb.Progress) error { return errors.New("boom") } - job, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + job, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) require.NoError(t, err) rts.job = job @@ -1047,7 +1050,7 @@ func TestRegistryLifecycle(t *testing.T) { return nil } - j, err := jobs.TestingCreateAndStartJob(context.Background(), rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(context.Background(), rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -1078,7 +1081,7 @@ func TestRegistryLifecycle(t *testing.T) { defer rts.tearDown() pauseUnpauseJob := func(expectedNumFiles int) { - j, err := jobs.TestingCreateAndStartJob(context.Background(), rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(context.Background(), rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -1127,7 +1130,7 @@ func TestRegistryLifecycle(t *testing.T) { defer rts.tearDown() runJobAndFail := func(expectedNumFiles int) { - j, err := jobs.TestingCreateAndStartJob(context.Background(), rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(context.Background(), rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -1169,7 +1172,7 @@ func TestRegistryLifecycle(t *testing.T) { rts.setUp(t) defer rts.tearDown() rts.sqlDB.Exec(t, `SET CLUSTER SETTING jobs.trace.force_dump_mode='onStop'`) - j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -1336,7 +1339,7 @@ func TestJobLifecycle(t *testing.T) { {0.0, 0.0}, {0.5, 0.5}, {0.5, 0.5}, {0.4, 0.4}, {0.8, 0.8}, {1.0, 1.0}, } for _, f := range progresses { - if err := woodyJob.FractionProgressed(ctx, nil /* txn */, jobs.FractionUpdater(f.actual)); err != nil { + if err := woodyJob.NoTxn().FractionProgressed(ctx, jobs.FractionUpdater(f.actual)); err != nil { t.Fatal(err) } woodyExp.FractionCompleted = f.expected @@ -1346,7 +1349,7 @@ func TestJobLifecycle(t *testing.T) { } // Test Progressed callbacks. - if err := woodyJob.FractionProgressed(ctx, nil /* txn */, func(_ context.Context, details jobspb.ProgressDetails) float32 { + if err := woodyJob.NoTxn().FractionProgressed(ctx, func(_ context.Context, details jobspb.ProgressDetails) float32 { details.(*jobspb.Progress_Restore).Restore.HighWater = roachpb.Key("mariana") return 1.0 }); err != nil { @@ -1393,7 +1396,7 @@ func TestJobLifecycle(t *testing.T) { t.Fatal(err) } - if err := buzzJob.FractionProgressed(ctx, nil /* txn */, jobs.FractionUpdater(.42)); err != nil { + if err := buzzJob.NoTxn().FractionProgressed(ctx, jobs.FractionUpdater(.42)); err != nil { t.Fatal(err) } buzzExp.FractionCompleted = .42 @@ -1545,7 +1548,7 @@ func TestJobLifecycle(t *testing.T) { t.Run("cancelable jobs can be canceled until finished", func(t *testing.T) { { job, exp := createDefaultJob() - if err := registry.CancelRequested(ctx, nil, job.ID()); err != nil { + if err := job.NoTxn().CancelRequested(ctx); err != nil { t.Fatal(err) } if err := exp.verify(job.ID(), jobs.StatusCancelRequested); err != nil { @@ -1594,7 +1597,7 @@ func TestJobLifecycle(t *testing.T) { } }) - t.Run("unpaused jobs cannot be resumed", func(t *testing.T) { + t.Run("Unpaused jobs cannot be resumed", func(t *testing.T) { { job, _ := createDefaultJob() if err := registry.CancelRequested(ctx, nil, job.ID()); err != nil { @@ -1634,7 +1637,7 @@ func TestJobLifecycle(t *testing.T) { t.Run("update before create fails", func(t *testing.T) { // Attempt to create the job but abort the transaction. var job *jobs.Job - require.Regexp(t, "boom", s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.Regexp(t, "boom", s.InternalDB().(isql.DB).Txn(ctx, func(ctx context.Context, txn isql.Txn) error { job, _ = registry.CreateAdoptableJobWithTxn(ctx, jobs.Record{ Details: jobspb.RestoreDetails{}, Progress: jobspb.RestoreProgress{}, @@ -1673,7 +1676,7 @@ func TestJobLifecycle(t *testing.T) { {WallTime: 2, Logical: 0}, } for _, ts := range highWaters { - require.NoError(t, job.Update(ctx, nil, func(_ *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + require.NoError(t, job.NoTxn().Update(ctx, func(_ isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { return jobs.UpdateHighwaterProgressed(ts, md, ju) })) p := job.Progress() @@ -1688,13 +1691,13 @@ func TestJobLifecycle(t *testing.T) { if err := job.Started(ctx); err != nil { t.Fatal(err) } - if err := job.FractionProgressed(ctx, nil /* txn */, jobs.FractionUpdater(-0.1)); !testutils.IsError(err, "outside allowable range") { + if err := job.NoTxn().FractionProgressed(ctx, jobs.FractionUpdater(-0.1)); !testutils.IsError(err, "outside allowable range") { t.Fatalf("expected 'outside allowable range' error, but got %v", err) } - if err := job.FractionProgressed(ctx, nil /* txn */, jobs.FractionUpdater(1.1)); !testutils.IsError(err, "outside allowable range") { + if err := job.NoTxn().FractionProgressed(ctx, jobs.FractionUpdater(1.1)); !testutils.IsError(err, "outside allowable range") { t.Fatalf("expected 'outside allowable range' error, but got %v", err) } - if err := job.Update(ctx, nil, func(_ *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + if err := job.NoTxn().Update(ctx, func(_ isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { return jobs.UpdateHighwaterProgressed(hlc.Timestamp{WallTime: -1}, md, ju) }); !testutils.IsError(err, "outside allowable range") { t.Fatalf("expected 'outside allowable range' error, but got %v", err) @@ -1706,7 +1709,7 @@ func TestJobLifecycle(t *testing.T) { if err := job.Started(ctx); err != nil { t.Fatal(err) } - if err := job.Update(ctx, nil, func(_ *kv.Txn, _ jobs.JobMetadata, ju *jobs.JobUpdater) error { + if err := job.NoTxn().Update(ctx, func(_ isql.Txn, _ jobs.JobMetadata, ju *jobs.JobUpdater) error { return errors.Errorf("boom") }); !testutils.IsError(err, "boom") { t.Fatalf("expected 'boom' error, but got %v", err) @@ -1721,7 +1724,7 @@ func TestJobLifecycle(t *testing.T) { if err := job.Succeeded(ctx); err != nil { t.Fatal(err) } - if err := job.FractionProgressed(ctx, nil /* txn */, jobs.FractionUpdater(0.5)); !testutils.IsError( + if err := job.NoTxn().FractionProgressed(ctx, jobs.FractionUpdater(0.5)); !testutils.IsError( err, `cannot update progress on succeeded job \(id \d+\)`, ) { t.Fatalf("expected 'cannot update progress' error, but got %v", err) @@ -1733,7 +1736,7 @@ func TestJobLifecycle(t *testing.T) { if err := registry.PauseRequested(ctx, nil, job.ID(), ""); err != nil { t.Fatal(err) } - if err := job.FractionProgressed(ctx, nil /* txn */, jobs.FractionUpdater(0.5)); !testutils.IsError( + if err := job.NoTxn().FractionProgressed(ctx, jobs.FractionUpdater(0.5)); !testutils.IsError( err, `cannot update progress on pause-requested job`, ) { t.Fatalf("expected progress error, but got %v", err) @@ -1745,7 +1748,7 @@ func TestJobLifecycle(t *testing.T) { if err := registry.CancelRequested(ctx, nil, job.ID()); err != nil { t.Fatal(err) } - if err := job.FractionProgressed(ctx, nil /* txn */, jobs.FractionUpdater(0.5)); !testutils.IsError( + if err := job.NoTxn().FractionProgressed(ctx, jobs.FractionUpdater(0.5)); !testutils.IsError( err, `cannot update progress on cancel-requested job \(id \d+\)`, ) { t.Fatalf("expected progress error, but got %v", err) @@ -1757,7 +1760,7 @@ func TestJobLifecycle(t *testing.T) { if err := job.Started(ctx); err != nil { t.Fatal(err) } - if err := job.FractionProgressed(ctx, nil /* txn */, jobs.FractionUpdater(0.2)); err != nil { + if err := job.NoTxn().FractionProgressed(ctx, jobs.FractionUpdater(0.2)); err != nil { t.Fatal(err) } if err := job.Succeeded(ctx); err != nil { @@ -1777,14 +1780,14 @@ func TestJobLifecycle(t *testing.T) { require.NoError(t, exp.verify(job.ID(), jobs.StatusRunning)) newDetails := jobspb.ImportDetails{URIs: []string{"new"}} exp.Record.Details = newDetails - require.NoError(t, job.SetDetails(ctx, nil /* txn */, newDetails)) + require.NoError(t, job.NoTxn().SetDetails(ctx, newDetails)) require.NoError(t, exp.verify(job.ID(), jobs.StatusRunning)) - require.NoError(t, job.SetDetails(ctx, nil /* txn */, newDetails)) + require.NoError(t, job.NoTxn().SetDetails(ctx, newDetails)) // Now change job's session id and check that updates are rejected. _, err := exp.DB.Exec(updateClaimStmt, "!@#!@$!$@#", job.ID()) require.NoError(t, err) - require.Error(t, job.SetDetails(ctx, nil /* txn */, newDetails)) + require.Error(t, job.NoTxn().SetDetails(ctx, newDetails)) require.NoError(t, exp.verify(job.ID(), jobs.StatusRunning)) }) @@ -1793,7 +1796,7 @@ func TestJobLifecycle(t *testing.T) { require.NoError(t, exp.verify(job.ID(), jobs.StatusRunning)) _, err := exp.DB.Exec(updateStatusStmt, jobs.StatusCancelRequested, job.ID()) require.NoError(t, err) - require.Error(t, job.SetDetails(ctx, nil /* txn */, jobspb.ImportDetails{URIs: []string{"new"}})) + require.Error(t, job.NoTxn().SetDetails(ctx, jobspb.ImportDetails{URIs: []string{"new"}})) require.NoError(t, exp.verify(job.ID(), jobs.StatusCancelRequested)) }) @@ -1802,13 +1805,13 @@ func TestJobLifecycle(t *testing.T) { require.NoError(t, exp.verify(job.ID(), jobs.StatusRunning)) newProgress := jobspb.ImportProgress{ResumePos: []int64{42}} exp.Record.Progress = newProgress - require.NoError(t, job.SetProgress(ctx, nil /* txn */, newProgress)) + require.NoError(t, job.NoTxn().SetProgress(ctx, newProgress)) require.NoError(t, exp.verify(job.ID(), jobs.StatusRunning)) // Now change job's session id and check that updates are rejected. _, err := exp.DB.Exec(updateClaimStmt, "!@#!@$!$@#", job.ID()) require.NoError(t, err) - require.Error(t, job.SetDetails(ctx, nil /* txn */, newProgress)) + require.Error(t, job.NoTxn().SetDetails(ctx, newProgress)) require.NoError(t, exp.verify(job.ID(), jobs.StatusRunning)) }) @@ -1817,7 +1820,7 @@ func TestJobLifecycle(t *testing.T) { require.NoError(t, exp.verify(job.ID(), jobs.StatusRunning)) _, err := exp.DB.Exec(updateStatusStmt, jobs.StatusPauseRequested, job.ID()) require.NoError(t, err) - require.Error(t, job.SetProgress(ctx, nil /* txn */, jobspb.ImportProgress{ResumePos: []int64{42}})) + require.Error(t, job.NoTxn().SetProgress(ctx, jobspb.ImportProgress{ResumePos: []int64{42}})) require.NoError(t, exp.verify(job.ID(), jobs.StatusPauseRequested)) }) } @@ -2201,11 +2204,11 @@ func TestShowJobWhenComplete(t *testing.T) { status string } var out row - + insqlDB := s.InternalDB().(isql.DB) t.Run("show job", func(t *testing.T) { // Start a job and cancel it so it is in state finished and then query it with // SHOW JOB WHEN COMPLETE. - job, err := jobs.TestingCreateAndStartJob(ctx, registry, s.DB(), mockJob) + job, err := jobs.TestingCreateAndStartJob(ctx, registry, insqlDB, mockJob) if err != nil { t.Fatal(err) } @@ -2243,7 +2246,7 @@ func TestShowJobWhenComplete(t *testing.T) { // query still blocks until the second job is also canceled. var jobsToStart [2]*jobs.StartableJob for i := range jobsToStart { - job, err := jobs.TestingCreateAndStartJob(ctx, registry, s.DB(), mockJob) + job, err := jobs.TestingCreateAndStartJob(ctx, registry, insqlDB, mockJob) if err != nil { t.Fatal(err) } @@ -2330,7 +2333,7 @@ func TestJobInTxn(t *testing.T) { } fn := func(ctx context.Context, _ []sql.PlanNode, _ chan<- tree.Datums) error { var err error - job, err = execCtx.ExtendedEvalContext().QueueJob(ctx, execCtx.Txn(), jobs.Record{ + job, err = execCtx.ExtendedEvalContext().QueueJob(ctx, execCtx.InternalSQLTxn(), jobs.Record{ Description: st.String(), Details: jobspb.BackupDetails{}, Progress: jobspb.BackupProgress{}, @@ -2370,7 +2373,7 @@ func TestJobInTxn(t *testing.T) { } fn := func(ctx context.Context, _ []sql.PlanNode, _ chan<- tree.Datums) error { var err error - job, err = execCtx.ExtendedEvalContext().QueueJob(ctx, execCtx.Txn(), jobs.Record{ + job, err = execCtx.ExtendedEvalContext().QueueJob(ctx, execCtx.InternalSQLTxn(), jobs.Record{ Description: "RESTORE", Details: jobspb.RestoreDetails{}, Progress: jobspb.RestoreProgress{}, @@ -2415,7 +2418,6 @@ func TestJobInTxn(t *testing.T) { sqlRunner.Exec(t, "SHOW JOB WHEN COMPLETE $1", job.ID()) require.Equal(t, int32(0), atomic.LoadInt32(&hasRun), "job has run in transaction before txn commit") - require.True(t, timeutil.Since(start) < jobs.DefaultAdoptInterval, "job should have been adopted immediately") }) @@ -2471,7 +2473,7 @@ func TestStartableJobMixedVersion(t *testing.T) { clusterversion.TestingBinaryMinSupportedVersion, false, /* initializeVersion */ ) - s, sqlDB, db := serverutils.StartServer(t, base.TestServerArgs{ + s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{ Settings: st, Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ @@ -2490,7 +2492,8 @@ func TestStartableJobMixedVersion(t *testing.T) { }, jobs.UsesTenantCostControl) var j *jobs.StartableJob jobID := jr.MakeJobID() - require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + insqlDB := s.InternalDB().(isql.DB) + require.NoError(t, insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { err = jr.CreateStartableJobWithTxn(ctx, &j, jobID, txn, jobs.Record{ Details: jobspb.ImportDetails{}, Progress: jobspb.ImportProgress{}, @@ -2513,7 +2516,7 @@ func TestStartableJob(t *testing.T) { defer jobs.ResetConstructors()() ctx := context.Background() - s, _, db := serverutils.StartServer(t, base.TestServerArgs{}) + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) jr := s.JobRegistry().(*jobs.Registry) var resumeFunc atomic.Value @@ -2540,9 +2543,10 @@ func TestStartableJob(t *testing.T) { Details: jobspb.RestoreDetails{}, Progress: jobspb.RestoreProgress{}, } + insqlDB := s.InternalDB().(isql.DB) createStartableJob := func(t *testing.T) (sj *jobs.StartableJob) { jobID := jr.MakeJobID() - require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + require.NoError(t, insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { return jr.CreateStartableJobWithTxn(ctx, &sj, jobID, txn, rec) })) return sj @@ -2556,35 +2560,36 @@ func TestStartableJob(t *testing.T) { require.NoError(t, sj.AwaitCompletion(ctx)) }) t.Run("Start called with active txn", func(t *testing.T) { - txn := db.NewTxn(ctx, "test") - defer func() { - require.NoError(t, txn.Rollback(ctx)) - }() - var sj *jobs.StartableJob - err := jr.CreateStartableJobWithTxn(ctx, &sj, jr.MakeJobID(), txn, rec) - require.NoError(t, err) - err = sj.Start(ctx) - require.Regexp(t, `cannot resume .* job which is not committed`, err) + require.Regexp(t, "boom", insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + var sj *jobs.StartableJob + err := jr.CreateStartableJobWithTxn(ctx, &sj, jr.MakeJobID(), txn, rec) + require.NoError(t, err) + err = sj.Start(ctx) + require.Regexp(t, `cannot resume .* job which is not committed`, err) + return errors.New("boom") + })) }) t.Run("Start called with aborted txn", func(t *testing.T) { - txn := db.NewTxn(ctx, "test") var sj *jobs.StartableJob - err := jr.CreateStartableJobWithTxn(ctx, &sj, jr.MakeJobID(), txn, rec) - require.NoError(t, err) - require.NoError(t, txn.Rollback(ctx)) - err = sj.Start(ctx) + _ = insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + err := jr.CreateStartableJobWithTxn(ctx, &sj, jr.MakeJobID(), txn, rec) + if err != nil { + return err + } + return txn.KV().Rollback(ctx) + }) + err := sj.Start(ctx) require.Regexp(t, `cannot resume .* job which is not committed`, err) }) t.Run("CleanupOnRollback called with active txn", func(t *testing.T) { - txn := db.NewTxn(ctx, "test") - defer func() { - require.NoError(t, txn.Rollback(ctx)) - }() - var sj *jobs.StartableJob - err := jr.CreateStartableJobWithTxn(ctx, &sj, jr.MakeJobID(), txn, rec) - require.NoError(t, err) - err = sj.CleanupOnRollback(ctx) - require.Regexp(t, `cannot call CleanupOnRollback for a StartableJob with a non-finalized transaction`, err) + require.Regexp(t, "boom", insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + var sj *jobs.StartableJob + err := jr.CreateStartableJobWithTxn(ctx, &sj, jr.MakeJobID(), txn, rec) + require.NoError(t, err) + err = sj.CleanupOnRollback(ctx) + require.Regexp(t, `cannot call CleanupOnRollback for a StartableJob with a non-finalized transaction`, err) + return errors.New("boom") + })) }) t.Run("CleanupOnRollback called with committed txn", func(t *testing.T) { sj := createStartableJob(t) @@ -2592,24 +2597,21 @@ func TestStartableJob(t *testing.T) { require.Regexp(t, `cannot call CleanupOnRollback for a StartableJob created by a committed transaction`, err) }) t.Run("CleanupOnRollback positive case", func(t *testing.T) { - txn := db.NewTxn(ctx, "test") var sj *jobs.StartableJob - err := jr.CreateStartableJobWithTxn(ctx, &sj, jr.MakeJobID(), txn, rec) - require.NoError(t, err) - require.NoError(t, txn.Rollback(ctx)) + require.Regexp(t, "boom", insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + err := jr.CreateStartableJobWithTxn(ctx, &sj, jr.MakeJobID(), txn, rec) + require.NoError(t, err) + return errors.New("boom") + })) require.NoError(t, sj.CleanupOnRollback(ctx)) for _, id := range jr.CurrentlyRunningJobs() { require.NotEqual(t, id, sj.ID()) } }) t.Run("Cancel", func(t *testing.T) { - txn := db.NewTxn(ctx, "test") - var sj *jobs.StartableJob - err := jr.CreateStartableJobWithTxn(ctx, &sj, jr.MakeJobID(), txn, rec) - require.NoError(t, err) - require.NoError(t, txn.Commit(ctx)) + sj := createStartableJob(t) require.NoError(t, sj.Cancel(ctx)) - status, err := sj.TestingCurrentStatus(ctx, nil /* txn */) + status, err := sj.TestingCurrentStatus(ctx) require.NoError(t, err) require.Equal(t, jobs.StatusCancelRequested, status) // Start should fail since we have already called cancel on the job. @@ -2652,7 +2654,7 @@ func TestStartableJob(t *testing.T) { }) clientResults := make(chan tree.Datums) jobID := jr.MakeJobID() - require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + require.NoError(t, insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { return jr.CreateStartableJobWithTxn(ctx, &sj, jobID, txn, rec) })) return sj, clientResults, blockResume, cleanup @@ -2683,7 +2685,7 @@ func TestStartableJob(t *testing.T) { testutils.SucceedsSoon(t, func() error { loaded, err := jr.LoadJob(ctx, sj.ID()) require.NoError(t, err) - st, err := loaded.TestingCurrentStatus(ctx, nil /* txn */) + st, err := loaded.TestingCurrentStatus(ctx) require.NoError(t, err) if st != jobs.StatusSucceeded { return errors.Errorf("expected %s, got %s", jobs.StatusSucceeded, st) @@ -2721,7 +2723,7 @@ func TestStartableJobTxnRetry(t *testing.T) { return nil }, } - s, _, db := serverutils.StartServer(t, params) + s, _, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop(ctx) jr := s.JobRegistry().(*jobs.Registry) jobs.RegisterConstructor(jobspb.TypeRestore, func(job *jobs.Job, settings *cluster.Settings) jobs.Resumer { @@ -2733,10 +2735,11 @@ func TestStartableJobTxnRetry(t *testing.T) { Username: username.TestUserName(), } + db := s.InternalDB().(isql.DB) jobID := jr.MakeJobID() var sj *jobs.StartableJob - require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - txn.SetDebugName(txnName) + require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + txn.KV().SetDebugName(txnName) return jr.CreateStartableJobWithTxn(ctx, &sj, jobID, txn, rec) })) require.True(t, haveInjectedRetry) @@ -3055,12 +3058,12 @@ func TestLoseLeaseDuringExecution(t *testing.T) { return jobs.FakeResumer{ OnResume: func(ctx context.Context) error { defer close(resumed) - _, err := s.InternalExecutor().(sqlutil.InternalExecutor).Exec( + _, err := s.InternalExecutor().(isql.Executor).Exec( ctx, "set-claim-null", nil, /* txn */ `UPDATE system.jobs SET claim_session_id = NULL WHERE id = $1`, j.ID()) assert.NoError(t, err) - err = j.Update(ctx, nil /* txn */, func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + err = j.NoTxn().Update(ctx, func(txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { return nil }) resumed <- err @@ -3257,7 +3260,7 @@ func TestJobsRetry(t *testing.T) { rts.mockJob.SetNonCancelable(rts.ctx, func(ctx context.Context, nonCancelable bool) bool { return true }) - j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -3298,7 +3301,7 @@ func TestJobsRetry(t *testing.T) { rts := registryTestSuite{} rts.setUp(t) defer rts.tearDown() - j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -3346,7 +3349,7 @@ func TestJobsRetry(t *testing.T) { rts.mockJob.SetNonCancelable(rts.ctx, func(ctx context.Context, nonCancelable bool) bool { return true }) - j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.s.DB(), rts.mockJob) + j, err := jobs.TestingCreateAndStartJob(rts.ctx, rts.registry, rts.idb(), rts.mockJob) if err != nil { t.Fatal(err) } @@ -3387,14 +3390,14 @@ func TestPausepoints(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - s, sqlDB, db := serverutils.StartServer(t, base.TestServerArgs{ + s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), }, }) registry := s.JobRegistry().(*jobs.Registry) defer s.Stopper().Stop(ctx) - + idb := s.InternalDB().(isql.DB) jobs.RegisterConstructor(jobspb.TypeImport, func(job *jobs.Job, settings *cluster.Settings) jobs.Resumer { return jobs.FakeResumer{ OnResume: func(ctx context.Context) error { @@ -3429,7 +3432,7 @@ func TestPausepoints(t *testing.T) { jobID := registry.MakeJobID() var sj *jobs.StartableJob - require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + require.NoError(t, idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { return registry.CreateStartableJobWithTxn(ctx, &sj, jobID, txn, rec) })) require.NoError(t, sj.Start(ctx)) @@ -3438,7 +3441,7 @@ func TestPausepoints(t *testing.T) { } else { require.Error(t, sj.AwaitCompletion(ctx)) } - status, err := sj.TestingCurrentStatus(ctx, nil) + status, err := sj.TestingCurrentStatus(ctx) // Map pause-requested to paused to avoid races. if status == jobs.StatusPauseRequested { status = jobs.StatusPaused diff --git a/pkg/jobs/jobsprotectedts/BUILD.bazel b/pkg/jobs/jobsprotectedts/BUILD.bazel index 30d0c39f57d2..1105b751bc20 100644 --- a/pkg/jobs/jobsprotectedts/BUILD.bazel +++ b/pkg/jobs/jobsprotectedts/BUILD.bazel @@ -14,7 +14,6 @@ go_library( "//pkg/jobs", "//pkg/jobs/jobspb", "//pkg/keys", - "//pkg/kv", "//pkg/kv/kvserver/protectedts", "//pkg/kv/kvserver/protectedts/ptpb", "//pkg/kv/kvserver/protectedts/ptreconcile", @@ -22,7 +21,7 @@ go_library( "//pkg/scheduledjobs", "//pkg/sql/catalog", "//pkg/sql/catalog/descpb", - "//pkg/sql/sqlutil", + "//pkg/sql/isql", "//pkg/util/ctxgroup", "//pkg/util/hlc", "//pkg/util/uuid", diff --git a/pkg/jobs/jobsprotectedts/jobs_protected_ts.go b/pkg/jobs/jobsprotectedts/jobs_protected_ts.go index 5da0493f0651..5192b18a35f5 100644 --- a/pkg/jobs/jobsprotectedts/jobs_protected_ts.go +++ b/pkg/jobs/jobsprotectedts/jobs_protected_ts.go @@ -16,12 +16,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptreconcile" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" @@ -52,12 +51,10 @@ func GetMetaType(metaType MetaType) string { // MakeStatusFunc returns a function which determines whether the job or // schedule implied with this value of meta should be removed by the reconciler. -func MakeStatusFunc( - jr *jobs.Registry, ie sqlutil.InternalExecutor, metaType MetaType, -) ptreconcile.StatusFunc { +func MakeStatusFunc(jr *jobs.Registry, metaType MetaType) ptreconcile.StatusFunc { switch metaType { case Jobs: - return func(ctx context.Context, txn *kv.Txn, meta []byte) (shouldRemove bool, _ error) { + return func(ctx context.Context, txn isql.Txn, meta []byte) (shouldRemove bool, _ error) { jobID, err := decodeID(meta) if err != nil { return false, err @@ -69,16 +66,17 @@ func MakeStatusFunc( if err != nil { return false, err } - isTerminal := j.CheckTerminalStatus(ctx, txn) + isTerminal := j.WithTxn(txn).CheckTerminalStatus(ctx) return isTerminal, nil } case Schedules: - return func(ctx context.Context, txn *kv.Txn, meta []byte) (shouldRemove bool, _ error) { + return func(ctx context.Context, txn isql.Txn, meta []byte) (shouldRemove bool, _ error) { scheduleID, err := decodeID(meta) if err != nil { return false, err } - _, err = jobs.LoadScheduledJob(ctx, scheduledjobs.ProdJobSchedulerEnv, scheduleID, ie, txn) + _, err = jobs.ScheduledJobTxn(txn). + Load(ctx, scheduledjobs.ProdJobSchedulerEnv, scheduleID) if jobs.HasScheduledJobNotFoundError(err) { return true, nil } diff --git a/pkg/jobs/jobsprotectedts/jobs_protected_ts_manager.go b/pkg/jobs/jobsprotectedts/jobs_protected_ts_manager.go index 4f34819c2a1a..2de7fff06254 100644 --- a/pkg/jobs/jobsprotectedts/jobs_protected_ts_manager.go +++ b/pkg/jobs/jobsprotectedts/jobs_protected_ts_manager.go @@ -18,11 +18,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/uuid" @@ -37,9 +37,9 @@ const timedProtectTimeStampGCPct = 0.8 // install protected timestamps after a certain percentage of the GC interval // is hit. type Manager struct { - db *kv.DB + db isql.DB codec keys.SQLCodec - protectedTSProvider protectedts.Provider + protectedTSProvider protectedts.Manager systemConfig config.SystemConfigProvider jr *jobs.Registry } @@ -80,9 +80,9 @@ func getProtectedTSOnJob(details jobspb.Details) *uuid.UUID { // NewManager creates a new protected timestamp manager // for jobs. func NewManager( - db *kv.DB, + db isql.DB, codec keys.SQLCodec, - protectedTSProvider protectedts.Provider, + protectedTSProvider protectedts.Manager, systemConfig config.SystemConfigProvider, jr *jobs.Registry, ) *Manager { @@ -170,10 +170,11 @@ func (p *Manager) Protect( return nil, nil } // Set up a new protected timestamp ID and install it on the job. - err := job.Update(ctx, nil, func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + err := job.NoTxn().Update(ctx, func(txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { // Check if the protected timestamp is visible in the txn. protectedtsID := getProtectedTSOnJob(md.Payload.UnwrapDetails()) // If it's been removed lets create a new one. + pts := p.protectedTSProvider.WithTxn(txn) if protectedtsID == nil { newID := uuid.MakeV4() protectedtsID = &newID @@ -182,10 +183,10 @@ func (p *Manager) Protect( ju.UpdatePayload(md.Payload) rec := MakeRecord(*protectedtsID, int64(job.ID()), readAsOf, nil, Jobs, target) - return p.protectedTSProvider.Protect(ctx, txn, rec) + return pts.Protect(ctx, rec) } // Refresh the existing timestamp, otherwise. - return p.protectedTSProvider.UpdateTimestamp(ctx, txn, *protectedtsID, readAsOf) + return pts.UpdateTimestamp(ctx, *protectedtsID, readAsOf) }) if err != nil { return nil, err @@ -207,7 +208,7 @@ func (p *Manager) Unprotect(ctx context.Context, job *jobs.Job) error { } // If we do find one then we need to clean up the protected timestamp, // and remove it from the job. - return job.Update(ctx, nil, func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + return job.NoTxn().Update(ctx, func(txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { // The job will get refreshed, so check one more time the protected // timestamp still exists. The callback returned from Protect works // on a previously cached copy. @@ -218,6 +219,6 @@ func (p *Manager) Unprotect(ctx context.Context, job *jobs.Job) error { updatedDetails := setProtectedTSOnJob(md.Payload.UnwrapDetails(), nil) md.Payload.Details = jobspb.WrapPayloadDetails(updatedDetails) ju.UpdatePayload(md.Payload) - return p.protectedTSProvider.Release(ctx, txn, *protectedtsID) + return p.protectedTSProvider.WithTxn(txn).Release(ctx, *protectedtsID) }) } diff --git a/pkg/jobs/progress.go b/pkg/jobs/progress.go index eda09c5c8639..d27d7af5ba86 100644 --- a/pkg/jobs/progress.go +++ b/pkg/jobs/progress.go @@ -76,7 +76,7 @@ func NewChunkProgressLogger( completed: startFraction, reported: startFraction, Report: func(ctx context.Context, pct float32) error { - return j.FractionProgressed(ctx, nil /* txn */, func(ctx context.Context, details jobspb.ProgressDetails) float32 { + return j.NoTxn().FractionProgressed(ctx, func(ctx context.Context, details jobspb.ProgressDetails) float32 { if progressedFn != nil { progressedFn(ctx, details) } diff --git a/pkg/jobs/registry.go b/pkg/jobs/registry.go index efa765c2d2cc..4eee9ed4f5c6 100644 --- a/pkg/jobs/registry.go +++ b/pkg/jobs/registry.go @@ -22,18 +22,18 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/multitenant" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/server/tracedumper" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/builtins" + "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/envutil" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -96,9 +96,7 @@ type Registry struct { ac log.AmbientContext stopper *stop.Stopper - db *kv.DB - ex sqlutil.InternalExecutor - ief sqlutil.InternalExecutorFactory + db isql.DB clock *hlc.Clock clusterID *base.ClusterIDContainer nodeID *base.SQLIDContainer @@ -132,7 +130,7 @@ type Registry struct { // field. Modifying the TableCollection is basically a per-query operation // and should be a per-query setting. #34304 is the issue for creating/ // improving this API. - internalExecutorFactory sqlutil.InternalExecutorFactory + internalDB isql.DB // if non-empty, indicates path to file that prevents any job adoptions. preventAdoptionFile string @@ -162,6 +160,16 @@ type Registry struct { TestingResumerCreationKnobs map[jobspb.Type]func(Resumer) Resumer } +func (r *Registry) UpdateJobWithTxn( + ctx context.Context, jobID jobspb.JobID, txn isql.Txn, useReadLock bool, updateFunc UpdateFn, +) error { + job, err := r.LoadJobWithTxn(ctx, jobID, txn) + if err != nil { + return err + } + return job.WithTxn(txn).Update(ctx, updateFunc) +} + // jobExecCtxMaker is a wrapper around sql.NewInternalPlanner. It returns an // *sql.planner as an interface{} due to package dependency cycles. It should // be cast to that type in the sql package when it is used. Returns a cleanup @@ -188,9 +196,7 @@ func MakeRegistry( ac log.AmbientContext, stopper *stop.Stopper, clock *hlc.Clock, - db *kv.DB, - ex sqlutil.InternalExecutor, - ief sqlutil.InternalExecutorFactory, + db isql.DB, clusterID *base.ClusterIDContainer, nodeID *base.SQLIDContainer, sqlInstance sqlliveness.Instance, @@ -207,8 +213,6 @@ func MakeRegistry( stopper: stopper, clock: clock, db: db, - ex: ex, - ief: ief, clusterID: clusterID, nodeID: nodeID, sqlInstance: sqlInstance, @@ -235,12 +239,11 @@ func MakeRegistry( return r } -// SetInternalExecutorFactory sets the -// InternalExecutorFactory that will be used by the job registry +// SetInternalDB sets the DB that will be used by the job registry // executor. We expose this separately from the constructor to avoid a circular // dependency. -func (r *Registry) SetInternalExecutorFactory(factory sqlutil.InternalExecutorFactory) { - r.internalExecutorFactory = factory +func (r *Registry) SetInternalDB(db isql.DB) { + r.internalDB = db } // MetricsStruct returns the metrics for production monitoring of each job type. @@ -333,7 +336,7 @@ func (r *Registry) makeProgress(record *Record) jobspb.Progress { // one job to create, otherwise the function returns an error. The function // returns the IDs of the jobs created. func (r *Registry) CreateJobsWithTxn( - ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, records []*Record, + ctx context.Context, txn isql.Txn, records []*Record, ) ([]jobspb.JobID, error) { created := make([]jobspb.JobID, 0, len(records)) for toCreate := records; len(toCreate) > 0; { @@ -342,7 +345,7 @@ func (r *Registry) CreateJobsWithTxn( if batchSize > maxBatchSize { batchSize = maxBatchSize } - createdInBatch, err := r.createJobsInBatchWithTxn(ctx, txn, ie, toCreate[:batchSize]) + createdInBatch, err := createJobsInBatchWithTxn(ctx, r, txn, toCreate[:batchSize]) if err != nil { return nil, err } @@ -354,24 +357,23 @@ func (r *Registry) CreateJobsWithTxn( // createJobsInBatchWithTxn creates a batch of jobs from given records in a // transaction. -func (r *Registry) createJobsInBatchWithTxn( - ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, records []*Record, +func createJobsInBatchWithTxn( + ctx context.Context, r *Registry, txn isql.Txn, records []*Record, ) ([]jobspb.JobID, error) { s, err := r.sqlInstance.Session(ctx) if err != nil { return nil, errors.Wrap(err, "error getting live session") } - start := timeutil.Now() - if txn != nil { - start = txn.ReadTimestamp().GoTime() - } + start := txn.KV().ReadTimestamp().GoTime() modifiedMicros := timeutil.ToUnixMicros(start) - stmt, args, jobIDs, err := r.batchJobInsertStmt(ctx, s.ID(), records, modifiedMicros) + stmt, args, jobIDs, err := batchJobInsertStmt(ctx, r, s.ID(), records, modifiedMicros) if err != nil { return nil, err } - _, err = ie.ExecEx( - ctx, "job-rows-batch-insert", txn, sessiondata.RootUserSessionDataOverride, stmt, args..., + _, err = txn.ExecEx( + ctx, "job-rows-batch-insert", txn.KV(), + sessiondata.RootUserSessionDataOverride, + stmt, args..., ) if err != nil { return nil, err @@ -382,8 +384,12 @@ func (r *Registry) createJobsInBatchWithTxn( // batchJobInsertStmt creates an INSERT statement and its corresponding arguments // for batched jobs creation. -func (r *Registry) batchJobInsertStmt( - ctx context.Context, sessionID sqlliveness.SessionID, records []*Record, modifiedMicros int64, +func batchJobInsertStmt( + ctx context.Context, + r *Registry, + sessionID sqlliveness.SessionID, + records []*Record, + modifiedMicros int64, ) (string, []interface{}, []jobspb.JobID, error) { instanceID := r.ID() columns := []string{`id`, `created`, `status`, `payload`, `progress`, `claim_session_id`, `claim_instance_id`, `job_type`} @@ -484,7 +490,7 @@ func (r *Registry) batchJobInsertStmt( // the job in the jobs table, marks it pending and gives the current node a // lease. func (r *Registry) CreateJobWithTxn( - ctx context.Context, record Record, jobID jobspb.JobID, txn *kv.Txn, + ctx context.Context, record Record, jobID jobspb.JobID, txn isql.Txn, ) (*Job, error) { // TODO(sajjad): Clean up the interface - remove jobID from the params as // Record now has JobID field. @@ -493,50 +499,76 @@ func (r *Registry) CreateJobWithTxn( if err != nil { return nil, err } + do := func(ctx context.Context, txn isql.Txn) error { + s, err := r.sqlInstance.Session(ctx) + if err != nil { + return errors.Wrap(err, "error getting live session") + } + j.session = s + start := timeutil.Now() + if txn != nil { + start = txn.KV().ReadTimestamp().GoTime() + } + jobType := j.mu.payload.Type() + j.mu.progress.ModifiedMicros = timeutil.ToUnixMicros(start) + payloadBytes, err := protoutil.Marshal(&j.mu.payload) + if err != nil { + return err + } + progressBytes, err := protoutil.Marshal(&j.mu.progress) + if err != nil { + return err + } - s, err := r.sqlInstance.Session(ctx) - if err != nil { - return nil, errors.Wrap(err, "error getting live session") - } - j.session = s - start := timeutil.Now() - if txn != nil { - start = txn.ReadTimestamp().GoTime() - } - jobType := j.mu.payload.Type() - j.mu.progress.ModifiedMicros = timeutil.ToUnixMicros(start) - payloadBytes, err := protoutil.Marshal(&j.mu.payload) - if err != nil { - return nil, err - } - progressBytes, err := protoutil.Marshal(&j.mu.progress) - if err != nil { - return nil, err - } - - cols := [7]string{"id", "status", "payload", "progress", "claim_session_id", "claim_instance_id", "job_type"} - numCols := len(cols) - vals := [7]interface{}{jobID, StatusRunning, payloadBytes, progressBytes, s.ID().UnsafeBytes(), r.ID(), jobType.String()} - placeholders := func() string { - var p strings.Builder - for i := 0; i < numCols; i++ { - if i > 0 { - p.WriteByte(',') + created, err := tree.MakeDTimestamp(start, time.Microsecond) + if err != nil { + return errors.NewAssertionErrorWithWrappedErrf(err, "failed to construct job created timestamp") + } + cols := [...]string{"id", "created", "status", "payload", "progress", "claim_session_id", "claim_instance_id", "job_type"} + const totalNumCols = len(cols) + vals := [totalNumCols]interface{}{jobID, created, StatusRunning, payloadBytes, progressBytes, s.ID().UnsafeBytes(), r.ID(), jobType.String()} + numCols := totalNumCols + placeholders := func() string { + var p strings.Builder + for i := 0; i < numCols; i++ { + if i > 0 { + p.WriteByte(',') + } + p.WriteByte('$') + p.WriteString(strconv.Itoa(i + 1)) } - p.WriteByte('$') - p.WriteString(strconv.Itoa(i + 1)) + return p.String() } - return p.String() + // TODO(jayant): remove this version gate in 24.1 + // To run the upgrade below, migration and schema change jobs will need + // to be created using the old schema of the jobs table. + if !r.settings.Version.IsActive(ctx, clusterversion.V23_1AddTypeColumnToJobsTable) { + numCols -= 1 + } + + // We need to override the database in case we're in a situation where the + // database in question is being dropped. + override := sessiondata.RootUserSessionDataOverride + override.Database = catconstants.SystemDatabaseName + insertStmt := fmt.Sprintf(`INSERT INTO system.jobs (%s) VALUES (%s)`, strings.Join(cols[:numCols], ","), placeholders()) + _, err = txn.ExecEx( + ctx, "job-row-insert", txn.KV(), + override, + insertStmt, vals[:numCols]..., + ) + return err } - // TODO(jayant): remove this version gate in 24.1 - // To run the upgrade below, migration and schema change jobs will need - // to be created using the old schema of the jobs table. - if !r.settings.Version.IsActive(ctx, clusterversion.V23_1AddTypeColumnToJobsTable) { - numCols -= 1 + + run := r.db.Txn + if txn != nil { + run = func( + ctx context.Context, f func(context.Context, isql.Txn) error, + _ ...isql.TxnOption, + ) error { + return f(ctx, txn) + } } - insertStmt := fmt.Sprintf(`INSERT INTO system.jobs (%s) VALUES (%s)`, strings.Join(cols[:numCols], ","), placeholders()) - if _, err = j.registry.ex.Exec(ctx, "job-row-insert", txn, insertStmt, vals[:numCols]..., - ); err != nil { + if err := run(ctx, do); err != nil { return nil, err } return j, nil @@ -545,7 +577,7 @@ func (r *Registry) CreateJobWithTxn( // CreateAdoptableJobWithTxn creates a job which will be adopted for execution // at a later time by some node in the cluster. func (r *Registry) CreateAdoptableJobWithTxn( - ctx context.Context, record Record, jobID jobspb.JobID, txn *kv.Txn, + ctx context.Context, record Record, jobID jobspb.JobID, txn isql.Txn, ) (*Job, error) { // TODO(sajjad): Clean up the interface - remove jobID from the params as // Record now has JobID field. @@ -554,14 +586,14 @@ func (r *Registry) CreateAdoptableJobWithTxn( if err != nil { return nil, err } - if err := j.runInTxn(ctx, txn, func(ctx context.Context, txn *kv.Txn) error { + do := func(ctx context.Context, txn isql.Txn) error { // Note: although the following uses ReadTimestamp and // ReadTimestamp can diverge from the value of now() throughout a // transaction, this may be OK -- we merely required ModifiedMicro // to be equal *or greater* than previously inserted timestamps // computed by now(). For now ReadTimestamp can only move forward // and the assertion ReadTimestamp >= now() holds at all times. - j.mu.progress.ModifiedMicros = timeutil.ToUnixMicros(txn.ReadTimestamp().GoTime()) + j.mu.progress.ModifiedMicros = timeutil.ToUnixMicros(txn.KV().ReadTimestamp().GoTime()) payloadBytes, err := protoutil.Marshal(&j.mu.payload) if err != nil { return err @@ -590,10 +622,23 @@ func (r *Registry) CreateAdoptableJobWithTxn( created_by_id ) VALUES ($1, $2, $3, $4, $5, $6);` - _, err = j.registry.ex.Exec(ctx, "job-insert", txn, stmt, + _, err = txn.ExecEx(ctx, "job-insert", txn.KV(), sessiondata.InternalExecutorOverride{ + User: username.NodeUserName(), + Database: catconstants.SystemDatabaseName, + }, stmt, jobID, StatusRunning, payloadBytes, progressBytes, createdByType, createdByID) return err - }); err != nil { + } + run := r.db.Txn + if txn != nil { + run = func( + ctx context.Context, f func(context.Context, isql.Txn) error, + _ ...isql.TxnOption, + ) error { + return f(ctx, txn) + } + } + if err := run(ctx, do); err != nil { return nil, errors.Wrap(err, "CreateAdoptableJobInTxn") } return j, nil @@ -623,8 +668,11 @@ VALUES ($1, $2, $3, $4, $5, $6);` // span is created and the job registered exactly once, if and only if the // transaction commits. This is a fragile API. func (r *Registry) CreateStartableJobWithTxn( - ctx context.Context, sj **StartableJob, jobID jobspb.JobID, txn *kv.Txn, record Record, + ctx context.Context, sj **StartableJob, jobID jobspb.JobID, txn isql.Txn, record Record, ) error { + if txn == nil { + return errors.AssertionFailedf("cannot create a startable job without a txn") + } alreadyInitialized := *sj != nil if alreadyInitialized { if jobID != (*sj).Job.ID() { @@ -669,7 +717,7 @@ func (r *Registry) CreateStartableJobWithTxn( } (*sj).Job = j (*sj).resumer = resumer - (*sj).txn = txn + (*sj).txn = txn.KV() return nil } @@ -693,7 +741,7 @@ func (r *Registry) LoadClaimedJob(ctx context.Context, jobID jobspb.JobID) (*Job if err != nil { return nil, err } - if err := j.load(ctx, nil); err != nil { + if err := j.NoTxn().load(ctx); err != nil { return nil, err } return j, nil @@ -703,13 +751,13 @@ func (r *Registry) LoadClaimedJob(ctx context.Context, jobID jobspb.JobID) (*Job // the txn argument. Passing a nil transaction is equivalent to calling LoadJob // in that a transaction will be automatically created. func (r *Registry) LoadJobWithTxn( - ctx context.Context, jobID jobspb.JobID, txn *kv.Txn, + ctx context.Context, jobID jobspb.JobID, txn isql.Txn, ) (*Job, error) { j := &Job{ id: jobID, registry: r, } - if err := j.load(ctx, txn); err != nil { + if err := j.WithTxn(txn).load(ctx); err != nil { return nil, err } return j, nil @@ -717,10 +765,10 @@ func (r *Registry) LoadJobWithTxn( // GetJobInfo fetches the latest info record for the given job and infoKey. func (r *Registry) GetJobInfo( - ctx context.Context, jobID jobspb.JobID, infoKey []byte, txn *kv.Txn, + ctx context.Context, jobID jobspb.JobID, infoKey []byte, txn isql.Txn, ) ([]byte, bool, error) { - row, err := r.ex.QueryRowEx( - ctx, "job-info-get", txn, + row, err := txn.QueryRowEx( + ctx, "job-info-get", txn.KV(), sessiondata.NodeUserSessionDataOverride, "SELECT value FROM system.job_info WHERE job_id = $1 AND info_key = $2 ORDER BY written DESC LIMIT 1", jobID, infoKey, @@ -749,11 +797,11 @@ func (r *Registry) IterateJobInfo( jobID jobspb.JobID, infoPrefix []byte, fn func(infoKey []byte, value []byte) error, - txn *kv.Txn, + txn isql.Txn, ) (retErr error) { // TODO(dt): verify this predicate hits the index. - rows, err := r.ex.QueryIteratorEx( - ctx, "job-info-iter", txn, + rows, err := txn.QueryIteratorEx( + ctx, "job-info-iter", txn.KV(), sessiondata.NodeUserSessionDataOverride, `SELECT info_key, value FROM system.job_info @@ -764,7 +812,7 @@ func (r *Registry) IterateJobInfo( if err != nil { return err } - defer func(it sqlutil.InternalRows) { retErr = errors.CombineErrors(retErr, it.Close()) }(rows) + defer func(it isql.Rows) { retErr = errors.CombineErrors(retErr, it.Close()) }(rows) var prevKey []byte var ok bool @@ -802,15 +850,15 @@ func (r *Registry) IterateJobInfo( // infoKey using the same transaction, effectively replacing any older row with // a row with the new value. func (r *Registry) WriteJobInfo( - ctx context.Context, jobID jobspb.JobID, infoKey, value []byte, txn *kv.Txn, + ctx context.Context, jobID jobspb.JobID, infoKey, value []byte, txn isql.Txn, ) error { // Assert we have a non-nil txn with which to delete and then write. if txn == nil { return errors.AssertionFailedf("a txn is required to write job info record") } // First clear out any older revisions of this info. - _, err := r.ex.ExecEx( - ctx, "job-info-write", txn, + _, err := txn.ExecEx( + ctx, "job-info-write", txn.KV(), sessiondata.NodeUserSessionDataOverride, "DELETE FROM system.job_info WHERE job_id = $1 AND info_key = $2", jobID, infoKey, @@ -820,8 +868,8 @@ func (r *Registry) WriteJobInfo( } // Write the new info, using the same transaction. - _, err = r.ex.ExecEx( - ctx, "job-info-write", txn, + _, err = txn.ExecEx( + ctx, "job-info-write", txn.KV(), sessiondata.NodeUserSessionDataOverride, `INSERT INTO system.job_info (job_id, info_key, written, value) VALUES ($1, $2, now(), $3)`, jobID, infoKey, value, @@ -837,15 +885,6 @@ func (r *Registry) WriteJobInfo( // locking the row from readers. Most updates of a job do not expect contention // and may do extra work and thus should not do locking. Cases where the job // is used to coordinate resources from multiple nodes may benefit from locking. -func (r *Registry) UpdateJobWithTxn( - ctx context.Context, jobID jobspb.JobID, txn *kv.Txn, useReadLock bool, updateFunc UpdateFn, -) error { - j := &Job{ - id: jobID, - registry: r, - } - return j.update(ctx, txn, useReadLock, updateFunc) -} // TODO (sajjad): make maxAdoptionsPerLoop a cluster setting. var maxAdoptionsPerLoop = envutil.EnvOrDefaultInt(`COCKROACH_JOB_ADOPTIONS_PER_PERIOD`, 10) @@ -904,15 +943,15 @@ func (r *Registry) Start(ctx context.Context, stopper *stop.Stopper) error { // removeClaimsFromDeadSessions queries the jobs table for non-terminal // jobs and nullifies their claims if the claims are owned by known dead sessions. removeClaimsFromDeadSessions := func(ctx context.Context, s sqlliveness.Session) { - if err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := r.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { // Run the expiration transaction at low priority to ensure that it does // not contend with foreground reads. Note that the adoption and cancellation // queries also use low priority so they will interact nicely. - if err := txn.SetUserPriority(roachpb.MinUserPriority); err != nil { + if err := txn.KV().SetUserPriority(roachpb.MinUserPriority); err != nil { return errors.WithAssertionFailure(err) } - _, err := r.ex.ExecEx( - ctx, "expire-sessions", txn, + _, err := txn.ExecEx( + ctx, "expire-sessions", txn.KV(), sessiondata.RootUserSessionDataOverride, removeClaimsForDeadSessionsQuery, s.ID().UnsafeBytes(), @@ -954,15 +993,15 @@ func (r *Registry) Start(ctx context.Context, stopper *stop.Stopper) error { // removeClaimsFromJobs queries the jobs table for non-terminal jobs and // nullifies their claims if the claims are owned by the current session. removeClaimsFromSession := func(ctx context.Context, s sqlliveness.Session) { - if err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := r.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { // Run the expiration transaction at low priority to ensure that it does // not contend with foreground reads. Note that the adoption and cancellation // queries also use low priority so they will interact nicely. - if err := txn.SetUserPriority(roachpb.MinUserPriority); err != nil { + if err := txn.KV().SetUserPriority(roachpb.MinUserPriority); err != nil { return errors.WithAssertionFailure(err) } - _, err := r.ex.ExecEx( - ctx, "remove-claims-for-session", txn, + _, err := txn.ExecEx( + ctx, "remove-claims-for-session", txn.KV(), sessiondata.RootUserSessionDataOverride, removeClaimsForSessionQuery, s.ID().UnsafeBytes(), ) @@ -1115,7 +1154,7 @@ const expiredJobsQuery = "SELECT id, payload, status, created FROM system.jobs " func (r *Registry) cleanupOldJobsPage( ctx context.Context, olderThan time.Time, minID jobspb.JobID, pageSize int, ) (done bool, maxID jobspb.JobID, retErr error) { - it, err := r.ex.QueryIterator(ctx, "gc-jobs", nil /* txn */, expiredJobsQuery, olderThan, minID, pageSize) + it, err := r.db.Executor().QueryIterator(ctx, "gc-jobs", nil /* txn */, expiredJobsQuery, olderThan, minID, pageSize) if err != nil { return false, 0, err } @@ -1156,13 +1195,13 @@ func (r *Registry) cleanupOldJobsPage( const stmt = `DELETE FROM system.jobs WHERE id = ANY($1)` const infoStmt = `DELETE FROM system.job_info WHERE job_id = ANY($1)` var nDeleted, nDeletedInfos int - if nDeleted, err = r.ex.Exec( + if nDeleted, err = r.db.Executor().Exec( ctx, "gc-jobs", nil /* txn */, stmt, toDelete, ); err != nil { log.Warningf(ctx, "error cleaning up %d jobs: %v", len(toDelete.Array), err) return false, 0, errors.Wrap(err, "deleting old jobs") } - nDeletedInfos, err = r.ex.Exec( + nDeletedInfos, err = r.db.Executor().Exec( ctx, "gc-job-infos", nil /* txn */, infoStmt, toDelete, ) if err != nil { @@ -1184,7 +1223,7 @@ func (r *Registry) cleanupOldJobsPage( // getJobFn attempts to get a resumer from the given job id. If the job id // does not have a resumer then it returns an error message suitable for users. func (r *Registry) getJobFn( - ctx context.Context, txn *kv.Txn, id jobspb.JobID, + ctx context.Context, txn isql.Txn, id jobspb.JobID, ) (*Job, Resumer, error) { job, err := r.LoadJobWithTxn(ctx, id, txn) if err != nil { @@ -1197,34 +1236,18 @@ func (r *Registry) getJobFn( return job, resumer, nil } -// CancelRequested marks the job as cancel-requested using the specified txn (may be nil). -func (r *Registry) CancelRequested(ctx context.Context, txn *kv.Txn, id jobspb.JobID) error { +// cancelRequested marks the job as cancel-requested using the specified txn (may be nil). +func (r *Registry) cancelRequested(ctx context.Context, txn isql.Txn, id jobspb.JobID) error { job, _, err := r.getJobFn(ctx, txn, id) if err != nil { - // Special case schema change jobs to mark the job as canceled. - if job != nil { - payload := job.Payload() - // TODO(mjibson): Use an unfortunate workaround to enable canceling of - // schema change jobs by comparing the string description. When a schema - // change job fails or is canceled, a new job is created with the ROLL BACK - // prefix. These rollback jobs cannot be canceled. We could add a field to - // the payload proto to indicate if this job is cancelable or not, but in - // a split version cluster an older node could pick up the schema change - // and fail to clear/set that field appropriately. Thus it seems that the - // safest way for now (i.e., without a larger jobs/schema change refactor) - // is to hack this up with a string comparison. - if payload.Type() == jobspb.TypeSchemaChange && !strings.HasPrefix(payload.Description, "ROLL BACK") { - return job.cancelRequested(ctx, txn, nil) - } - } return err } - return job.cancelRequested(ctx, txn, nil) + return job.maybeWithTxn(txn).CancelRequested(ctx) } // PauseRequested marks the job with id as paused-requested using the specified txn (may be nil). func (r *Registry) PauseRequested( - ctx context.Context, txn *kv.Txn, id jobspb.JobID, reason string, + ctx context.Context, txn isql.Txn, id jobspb.JobID, reason string, ) error { job, resumer, err := r.getJobFn(ctx, txn, id) if err != nil { @@ -1234,37 +1257,37 @@ func (r *Registry) PauseRequested( if pr, ok := resumer.(PauseRequester); ok { onPauseRequested = pr.OnPauseRequest } - return job.PauseRequested(ctx, txn, onPauseRequested, reason) + return job.WithTxn(txn).PauseRequestedWithFunc(ctx, onPauseRequested, reason) } // Succeeded marks the job with id as succeeded. -func (r *Registry) Succeeded(ctx context.Context, txn *kv.Txn, id jobspb.JobID) error { +func (r *Registry) Succeeded(ctx context.Context, txn isql.Txn, id jobspb.JobID) error { job, _, err := r.getJobFn(ctx, txn, id) if err != nil { return err } - return job.succeeded(ctx, txn, nil) + return job.WithTxn(txn).succeeded(ctx, nil) } // Failed marks the job with id as failed. func (r *Registry) Failed( - ctx context.Context, txn *kv.Txn, id jobspb.JobID, causingError error, + ctx context.Context, txn isql.Txn, id jobspb.JobID, causingError error, ) error { job, _, err := r.getJobFn(ctx, txn, id) if err != nil { return err } - return job.failed(ctx, txn, causingError, nil) + return job.WithTxn(txn).failed(ctx, causingError) } // Unpause changes the paused job with id to running or reverting using the // specified txn (may be nil). -func (r *Registry) Unpause(ctx context.Context, txn *kv.Txn, id jobspb.JobID) error { +func (r *Registry) Unpause(ctx context.Context, txn isql.Txn, id jobspb.JobID) error { job, _, err := r.getJobFn(ctx, txn, id) if err != nil { return err } - return job.unpaused(ctx, txn) + return job.WithTxn(txn).Unpaused(ctx) } // Resumer is a resumable job, and is associated with a Job object. Jobs can be @@ -1356,7 +1379,7 @@ type PauseRequester interface { // OnPauseRequest is called in the transaction that moves a job to PauseRequested. // If an error is returned, the pause request will fail. execCtx is a // sql.JobExecCtx. - OnPauseRequest(ctx context.Context, execCtx interface{}, txn *kv.Txn, details *jobspb.Progress) error + OnPauseRequest(ctx context.Context, execCtx interface{}, txn isql.Txn, details *jobspb.Progress) error } // JobResultsReporter is an interface for reporting the results of the job execution. @@ -1452,7 +1475,7 @@ func (r *Registry) stepThroughStateMachine( resumeCtx, undo := pprofutil.SetProfilerLabelsFromCtxTags(resumeCtx) defer undo() - if err := job.started(ctx, nil /* txn */); err != nil { + if err := job.NoTxn().started(ctx); err != nil { return err } @@ -1517,7 +1540,7 @@ func (r *Registry) stepThroughStateMachine( return errors.NewAssertionErrorWithWrappedErrf(jobErr, "job %d: unexpected status %s provided to state machine", job.ID(), status) case StatusCanceled: - if err := job.canceled(ctx, nil /* txn */, nil /* fn */); err != nil { + if err := job.NoTxn().canceled(ctx); err != nil { // If we can't transactionally mark the job as canceled then it will be // restarted during the next adopt loop and reverting will be retried. return errors.WithSecondaryError( @@ -1533,7 +1556,7 @@ func (r *Registry) stepThroughStateMachine( return errors.NewAssertionErrorWithWrappedErrf(jobErr, "job %d: successful but unexpected error provided", job.ID()) } - err := job.succeeded(ctx, nil /* txn */, nil /* fn */) + err := job.NoTxn().succeeded(ctx, nil /* fn */) switch { case err == nil: telemetry.Inc(TelemetryMetrics[jobType].Successful) @@ -1545,7 +1568,7 @@ func (r *Registry) stepThroughStateMachine( } return err case StatusReverting: - if err := job.reverted(ctx, nil /* txn */, jobErr, nil /* fn */); err != nil { + if err := job.NoTxn().reverted(ctx, jobErr, nil /* fn */); err != nil { // If we can't transactionally mark the job as reverting then it will be // restarted during the next adopt loop and it will be retried. return errors.WithSecondaryError( @@ -1585,7 +1608,7 @@ func (r *Registry) stepThroughStateMachine( if jobErr == nil { return errors.AssertionFailedf("job %d: has StatusFailed but no error was provided", job.ID()) } - if err := job.failed(ctx, nil /* txn */, jobErr, nil /* fn */); err != nil { + if err := job.NoTxn().failed(ctx, jobErr); err != nil { // If we can't transactionally mark the job as failed then it will be // restarted during the next adopt loop and reverting will be retried. return errors.WithSecondaryError( @@ -1604,7 +1627,7 @@ func (r *Registry) stepThroughStateMachine( return errors.AssertionFailedf("job %d: has StatusRevertFailed but no error was provided", job.ID()) } - if err := job.revertFailed(ctx, nil /* txn */, jobErr, nil /* fn */); err != nil { + if err := job.NoTxn().revertFailed(ctx, jobErr, nil /* fn */); err != nil { // If we can't transactionally mark the job as failed then it will be // restarted during the next adopt loop and reverting will be retried. return errors.WithSecondaryError( @@ -1731,8 +1754,8 @@ func (r *Registry) maybeRecordExecutionFailure(ctx context.Context, err error, j return } - updateErr := j.Update(ctx, nil, func( - txn *kv.Txn, md JobMetadata, ju *JobUpdater, + updateErr := j.NoTxn().Update(ctx, func( + txn isql.Txn, md JobMetadata, ju *JobUpdater, ) error { pl := md.Payload { // Append the entry to the log diff --git a/pkg/jobs/registry_external_test.go b/pkg/jobs/registry_external_test.go index 1b38dfbaed28..02daa8b906e6 100644 --- a/pkg/jobs/registry_external_test.go +++ b/pkg/jobs/registry_external_test.go @@ -26,16 +26,15 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" @@ -375,13 +374,14 @@ func TestGCDurationControl(t *testing.T) { jobs.RegisterConstructor(jobspb.TypeImport, func(_ *jobs.Job, cs *cluster.Settings) jobs.Resumer { return jobs.FakeResumer{} }, jobs.UsesTenantCostControl) - s, sqlDB, kvDB := serverutils.StartServer(t, args) + s, sqlDB, _ := serverutils.StartServer(t, args) defer s.Stopper().Stop(ctx) registry := s.JobRegistry().(*jobs.Registry) // Create and run a dummy job. + idb := s.InternalDB().(isql.DB) id := registry.MakeJobID() - require.NoError(t, kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { _, err := registry.CreateJobWithTxn(ctx, jobs.Record{ // Job does not accept an empty Details field, so arbitrarily provide // ImportDetails. @@ -393,7 +393,7 @@ func TestGCDurationControl(t *testing.T) { })) require.NoError(t, registry.WaitForJobs( - ctx, s.InternalExecutor().(sqlutil.InternalExecutor), []jobspb.JobID{id}, + ctx, []jobspb.JobID{id}, )) tdb := sqlutils.MakeSQLRunner(sqlDB) @@ -465,7 +465,6 @@ func TestErrorsPopulatedOnRetry(t *testing.T) { ctx := context.Background() defer s.Stopper().Stop(ctx) registry := s.JobRegistry().(*jobs.Registry) - ie := s.InternalExecutor().(sqlutil.InternalExecutor) mkJob := func(t *testing.T) jobspb.JobID { id := registry.MakeJobID() _, err := registry.CreateJobWithTxn(ctx, jobs.Record{ @@ -605,7 +604,7 @@ SELECT unnest(execution_errors) checkLogEntry(t, id, jobs.StatusRunning, secondStart, thirdStart, err2) } close(thirdRun.resume) - require.NoError(t, registry.WaitForJobs(ctx, ie, []jobspb.JobID{id})) + require.NoError(t, registry.WaitForJobs(ctx, []jobspb.JobID{id})) }) t.Run("fail or cancel error", func(t *testing.T) { id := mkJob(t) @@ -646,7 +645,7 @@ SELECT unnest(execution_errors) checkLogEntry(t, id, jobs.StatusReverting, thirdStart, fourthStart, err3) } close(fourthRun.resume) - require.Regexp(t, err2, registry.WaitForJobs(ctx, ie, []jobspb.JobID{id})) + require.Regexp(t, err2, registry.WaitForJobs(ctx, []jobspb.JobID{id})) }) t.Run("truncation", func(t *testing.T) { id := mkJob(t) @@ -723,6 +722,6 @@ SELECT unnest(execution_errors) checkLogEntry(t, id, jobs.StatusReverting, sixthStart, seventhStart, err6) } close(seventhRun.resume) - require.Regexp(t, err3, registry.WaitForJobs(ctx, ie, []jobspb.JobID{id})) + require.Regexp(t, err3, registry.WaitForJobs(ctx, []jobspb.JobID{id})) }) } diff --git a/pkg/jobs/registry_test.go b/pkg/jobs/registry_test.go index a8ac6122d83a..0d5bd4f436a2 100644 --- a/pkg/jobs/registry_test.go +++ b/pkg/jobs/registry_test.go @@ -32,8 +32,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" @@ -323,8 +323,8 @@ func TestBatchJobsCreation(t *testing.T) { } ctx := context.Background() - s, sqlDB, kvDB := serverutils.StartServer(t, args) - ief := s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory) + s, sqlDB, _ := serverutils.StartServer(t, args) + ief := s.InternalDB().(isql.DB) tdb := sqlutils.MakeSQLRunner(sqlDB) defer s.Stopper().Stop(ctx) r := s.JobRegistry().(*Registry) @@ -349,9 +349,9 @@ func TestBatchJobsCreation(t *testing.T) { } // Create jobs in a batch. var jobIDs []jobspb.JobID - require.NoError(t, ief.TxnWithExecutor(ctx, kvDB, nil /* sessionData */, func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor) error { + require.NoError(t, ief.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { var err error - jobIDs, err = r.CreateJobsWithTxn(ctx, txn, ie, records) + jobIDs, err = r.CreateJobsWithTxn(ctx, txn, records) return err })) require.Equal(t, len(jobIDs), test.batchSize) @@ -385,10 +385,10 @@ func TestRetriesWithExponentialBackoff(t *testing.T) { // createJob creates a mock job. createJob := func( - ctx context.Context, s serverutils.TestServerInterface, r *Registry, tdb *sqlutils.SQLRunner, kvDB *kv.DB, + ctx context.Context, s serverutils.TestServerInterface, r *Registry, tdb *sqlutils.SQLRunner, db isql.DB, ) (jobspb.JobID, time.Time) { jobID := r.MakeJobID() - require.NoError(t, kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { _, err := r.CreateJobWithTxn(ctx, Record{ Details: jobspb.ImportDetails{}, Progress: jobspb.ImportProgress{}, @@ -421,13 +421,13 @@ func TestRetriesWithExponentialBackoff(t *testing.T) { // pauseOrCancelJob pauses or cancels a job. If pauseJob is true, the job is paused, // otherwise the job is canceled. pauseOrCancelJob := func( - t *testing.T, ctx context.Context, db *kv.DB, registry *Registry, jobID jobspb.JobID, pauseJob bool, + t *testing.T, ctx context.Context, db isql.DB, registry *Registry, jobID jobspb.JobID, pauseJob bool, ) { - assert.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + assert.NoError(t, db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { if pauseJob { return registry.PauseRequested(ctx, txn, jobID, "") } - return registry.CancelRequested(ctx, txn, jobID) + return registry.cancelRequested(ctx, txn, jobID) })) } // nextDelay returns the next delay based calculated from the given retryCnt @@ -444,6 +444,7 @@ func TestRetriesWithExponentialBackoff(t *testing.T) { s serverutils.TestServerInterface tdb *sqlutils.SQLRunner kvDB *kv.DB + idb isql.DB registry *Registry clock *timeutil.ManualTime resumeCh chan struct{} @@ -502,6 +503,7 @@ func TestRetriesWithExponentialBackoff(t *testing.T) { close(bti.failOrCancelCh) bti.s.Stopper().Stop(ctx) } + bti.idb = bti.s.InternalDB().(isql.DB) bti.tdb = sqlutils.MakeSQLRunner(sqlDB) bti.registry = bti.s.JobRegistry().(*Registry) bti.resumeCh = make(chan struct{}) @@ -610,7 +612,9 @@ func TestRetriesWithExponentialBackoff(t *testing.T) { cleanup := testInfraSetUp(ctx, &bti) defer cleanup() - jobID, lastRun := createJob(ctx, bti.s, bti.registry, bti.tdb, bti.kvDB) + jobID, lastRun := createJob( + ctx, bti.s, bti.registry, bti.tdb, bti.idb, + ) retryCnt := 0 expectedResumed := int64(0) runTest(t, jobID, retryCnt, expectedResumed, lastRun, &bti, func(_ int64) { @@ -633,12 +637,13 @@ func TestRetriesWithExponentialBackoff(t *testing.T) { cleanup := testInfraSetUp(ctx, &bti) defer cleanup() - jobID, lastRun := createJob(ctx, bti.s, bti.registry, bti.tdb, bti.kvDB) + jobID, lastRun := createJob(ctx, bti.s, bti.registry, bti.tdb, bti.idb) retryCnt := 0 expectedResumed := int64(0) runTest(t, jobID, retryCnt, expectedResumed, lastRun, &bti, func(_ int64) { <-bti.resumeCh - pauseOrCancelJob(t, ctx, bti.kvDB, bti.registry, jobID, pause) + insqlDB := bti.s.InternalDB().(isql.DB) + pauseOrCancelJob(t, ctx, insqlDB, bti.registry, jobID, pause) bti.errCh <- nil <-bti.transitionCh waitUntilStatus(t, bti.tdb, jobID, StatusPaused) @@ -658,7 +663,9 @@ func TestRetriesWithExponentialBackoff(t *testing.T) { cleanup := testInfraSetUp(ctx, &bti) defer cleanup() - jobID, lastRun := createJob(ctx, bti.s, bti.registry, bti.tdb, bti.kvDB) + jobID, lastRun := createJob( + ctx, bti.s, bti.registry, bti.tdb, bti.idb, + ) bti.clock.AdvanceTo(lastRun) <-bti.resumeCh bti.errCh <- errors.Errorf("injecting error to revert") @@ -680,10 +687,12 @@ func TestRetriesWithExponentialBackoff(t *testing.T) { cleanup := testInfraSetUp(ctx, &bti) defer cleanup() - jobID, lastRun := createJob(ctx, bti.s, bti.registry, bti.tdb, bti.kvDB) + jobID, lastRun := createJob( + ctx, bti.s, bti.registry, bti.tdb, bti.idb, + ) bti.clock.AdvanceTo(lastRun) <-bti.resumeCh - pauseOrCancelJob(t, ctx, bti.kvDB, bti.registry, jobID, cancel) + pauseOrCancelJob(t, ctx, bti.idb, bti.registry, jobID, cancel) bti.errCh <- nil <-bti.failOrCancelCh bti.errCh <- MarkAsRetryJobError(errors.New("injecting error in reverting state")) @@ -710,7 +719,9 @@ func TestRetriesWithExponentialBackoff(t *testing.T) { cleanup := testInfraSetUp(ctx, &bti) defer cleanup() - jobID, lastRun := createJob(ctx, bti.s, bti.registry, bti.tdb, bti.kvDB) + jobID, lastRun := createJob( + ctx, bti.s, bti.registry, bti.tdb, bti.idb, + ) bti.clock.AdvanceTo(lastRun) <-bti.resumeCh bti.errCh <- errors.Errorf("injecting error to revert") @@ -721,7 +732,7 @@ func TestRetriesWithExponentialBackoff(t *testing.T) { retryCnt := 1 runTest(t, jobID, retryCnt, expectedResumed, lastRun, &bti, func(_ int64) { <-bti.failOrCancelCh - pauseOrCancelJob(t, ctx, bti.kvDB, bti.registry, jobID, pause) + pauseOrCancelJob(t, ctx, bti.idb, bti.registry, jobID, pause) // We have to return error here because, otherwise, the job will be marked as // failed regardless of the fact that it is currently pause-requested in the // jobs table. This is because we currently do not check the current status @@ -802,7 +813,7 @@ func TestExponentialBackoffSettings(t *testing.T) { }, }, } - s, sdb, kvDB := serverutils.StartServer(t, args) + s, sdb, _ := serverutils.StartServer(t, args) defer s.Stopper().Stop(ctx) tdb = sqlutils.MakeSQLRunner(sdb) // Create and run a dummy job. @@ -811,7 +822,8 @@ func TestExponentialBackoffSettings(t *testing.T) { }, UsesTenantCostControl) registry := s.JobRegistry().(*Registry) id := registry.MakeJobID() - require.NoError(t, kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + idb := s.InternalDB().(isql.DB) + require.NoError(t, idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { _, err := registry.CreateJobWithTxn(ctx, Record{ // Job does not accept an empty Details field, so arbitrarily provide // ImportDetails. @@ -963,11 +975,11 @@ func TestRunWithoutLoop(t *testing.T) { ctx := context.Background() settings := cluster.MakeTestingClusterSettings() intervalBaseSetting.Override(ctx, &settings.SV, 1e6) - s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{ + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{ Settings: settings, }) - ief := s.InternalExecutorFactory().(sqlutil.InternalExecutorFactory) + idb := s.InternalDB().(isql.DB) defer s.Stopper().Stop(ctx) r := s.JobRegistry().(*Registry) @@ -983,13 +995,11 @@ func TestRunWithoutLoop(t *testing.T) { }) } var jobIDs []jobspb.JobID - require.NoError(t, ief.TxnWithExecutor(ctx, kvDB, nil /* sessionData */, func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor) (err error) { - jobIDs, err = r.CreateJobsWithTxn(ctx, txn, ie, records) + require.NoError(t, idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { + jobIDs, err = r.CreateJobsWithTxn(ctx, txn, records) return err })) - require.EqualError(t, r.Run( - ctx, s.InternalExecutor().(sqlutil.InternalExecutor), jobIDs, - ), "boom") + require.EqualError(t, r.Run(ctx, jobIDs), "boom") // No adoption loops should have been run. require.Equal(t, int64(0), r.metrics.AdoptIterations.Count()) require.Equal(t, int64(N), atomic.LoadInt64(&ran)) @@ -1002,7 +1012,7 @@ func TestJobIdleness(t *testing.T) { ctx := context.Background() intervalOverride := time.Millisecond - s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{ + s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{ // Ensure no other jobs are created and adoptions and cancellations are quick Knobs: base.TestingKnobs{ SpanConfig: &spanconfig.TestingKnobs{ @@ -1017,7 +1027,7 @@ func TestJobIdleness(t *testing.T) { }, }) defer s.Stopper().Stop(ctx) - + idb := s.InternalDB().(isql.DB) r := s.JobRegistry().(*Registry) resumeStartChan := make(chan struct{}) @@ -1036,7 +1046,7 @@ func TestJobIdleness(t *testing.T) { createJob := func() *Job { jobID := r.MakeJobID() - require.NoError(t, kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { _, err := r.CreateJobWithTxn(ctx, Record{ Details: jobspb.ImportDetails{}, Progress: jobspb.ImportProgress{}, @@ -1179,69 +1189,77 @@ func TestDisablingJobAdoptionClearsClaimSessionID(t *testing.T) { func TestJobInfoAccessors(t *testing.T) { defer leaktest.AfterTest(t)() - s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) ctx := context.Background() defer s.Stopper().Stop(ctx) + idb := s.InternalDB().(isql.DB) r := s.JobRegistry().(*Registry) kPrefix, kA, kB, kC := []byte("🔑"), []byte("🔑A"), []byte("🔑B"), []byte("🔑C") v1, v2 := []byte("val1"), []byte("val2") // Key doesn't exist yet. - _, ok, err := r.GetJobInfo(ctx, 1, kA, nil) + getJobInfo := func(id jobspb.JobID, key []byte) (v []byte, ok bool, err error) { + err = idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + v, ok, err = r.GetJobInfo(ctx, id, key, txn) + return err + }) + return v, ok, err + } + _, ok, err := getJobInfo(1, kA) require.NoError(t, err) require.False(t, ok) // Write kA = v1. - require.NoError(t, kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { return r.WriteJobInfo(ctx, 1, kA, v1, txn) })) // Check that key is now found with value v1. - v, ok, err := r.GetJobInfo(ctx, 1, kA, nil) + v, ok, err := getJobInfo(1, kA) require.NoError(t, err) require.True(t, ok) require.Equal(t, v1, v) // Overwrite kA = v2. - require.NoError(t, kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { return r.WriteJobInfo(ctx, 1, kA, v2, txn) })) // Check that key is now v1. - v, ok, err = r.GetJobInfo(ctx, 1, kA, nil) + v, ok, err = getJobInfo(1, kA) require.NoError(t, err) require.True(t, ok) require.Equal(t, v2, v) // Verify a different is not found. - _, ok, err = r.GetJobInfo(ctx, 1, kB, nil) + _, ok, err = getJobInfo(1, kB) require.NoError(t, err) require.False(t, ok) // Verify that the same key for a different job is not found. - _, ok, err = r.GetJobInfo(ctx, 2, kB, nil) + _, ok, err = getJobInfo(2, kB) require.NoError(t, err) require.False(t, ok) // Write and revise some info keys a, b and c (out of order, just for fun). - require.NoError(t, kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { return r.WriteJobInfo(ctx, 2, kB, v2, txn) })) - require.NoError(t, kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { return r.WriteJobInfo(ctx, 2, kA, v1, txn) })) - require.NoError(t, kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { return r.WriteJobInfo(ctx, 2, kC, v2, txn) })) - require.NoError(t, kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { return r.WriteJobInfo(ctx, 2, kA, v2, txn) })) // Iterate the common prefix of a, b and c. var i int - require.NoError(t, kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { return r.IterateJobInfo(ctx, 2, kPrefix, func(key, value []byte) error { i++ switch i { @@ -1260,7 +1278,7 @@ func TestJobInfoAccessors(t *testing.T) { // Iterate the specific prefix of just a. found := false - require.NoError(t, kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { return r.IterateJobInfo(ctx, 2, kA, func(key, value []byte) error { require.Equal(t, kA, key) require.Equal(t, v2, value) @@ -1271,7 +1289,7 @@ func TestJobInfoAccessors(t *testing.T) { require.True(t, found) // Iterate a different job. - require.NoError(t, kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + require.NoError(t, idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { return r.IterateJobInfo(ctx, 3, kPrefix, func(key, value []byte) error { t.Fatalf("unexpected record for job 3: %v = %v", key, value) return nil @@ -1295,10 +1313,26 @@ func TestJobRecordMissingUsername(t *testing.T) { Details: jobspb.ImportDetails{}, Progress: jobspb.ImportProgress{}, } - _, err := r.CreateAdoptableJobWithTxn(ctx, invalidRecord, 0, nil) - assert.EqualError(t, err, "job record missing username; could not make payload") - _, err = r.CreateJobWithTxn(ctx, invalidRecord, 0, nil) - assert.EqualError(t, err, "job record missing username; could not make payload") - _, err = r.CreateJobsWithTxn(ctx, nil, r.internalExecutorFactory.MakeInternalExecutorWithoutTxn(), []*Record{&invalidRecord}) - assert.EqualError(t, err, "job record missing username; could not make payload") + idb := r.internalDB + { + err := idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + _, err := r.CreateAdoptableJobWithTxn(ctx, invalidRecord, 0, txn) + return err + }) + assert.EqualError(t, err, "job record missing username; could not make payload") + } + { + err := idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + _, err := r.CreateJobWithTxn(ctx, invalidRecord, 0, txn) + return err + }) + assert.EqualError(t, err, "job record missing username; could not make payload") + } + { + err := idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + _, err := r.CreateJobsWithTxn(ctx, txn, []*Record{&invalidRecord}) + return err + }) + assert.EqualError(t, err, "job record missing username; could not make payload") + } } diff --git a/pkg/jobs/scheduled_job.go b/pkg/jobs/scheduled_job.go index 548add7d918c..64e10e88d75f 100644 --- a/pkg/jobs/scheduled_job.go +++ b/pkg/jobs/scheduled_job.go @@ -18,13 +18,12 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" "github.com/robfig/cron/v3" @@ -93,31 +92,35 @@ func HasScheduledJobNotFoundError(err error) bool { return errors.HasType(err, (*scheduledJobNotFoundError)(nil)) } -// LoadScheduledJob loads scheduled job record from the database. -func LoadScheduledJob( - ctx context.Context, - env scheduledjobs.JobSchedulerEnv, - id int64, - ex sqlutil.InternalExecutor, - txn *kv.Txn, -) (*ScheduledJob, error) { - row, cols, err := ex.QueryRowExWithCols(ctx, "lookup-schedule", txn, - sessiondata.RootUserSessionDataOverride, - fmt.Sprintf("SELECT * FROM %s WHERE schedule_id = %d", - env.ScheduledJobsTableName(), id)) +func ScheduledJobDB(db isql.DB) ScheduledJobStorage { + return scheduledJobStorageDB{db: db} +} - if err != nil { - return nil, errors.CombineErrors(err, &scheduledJobNotFoundError{scheduleID: id}) - } - if row == nil { - return nil, &scheduledJobNotFoundError{scheduleID: id} - } +func ScheduledJobTxn(txn isql.Txn) ScheduledJobStorage { + return scheduledJobStorageTxn{txn: txn} +} - j := NewScheduledJob(env) - if err := j.InitFromDatums(row, cols); err != nil { - return nil, err - } - return j, nil +type ScheduledJobStorage interface { + // Load loads scheduled job record from the database. + Load(ctx context.Context, env scheduledjobs.JobSchedulerEnv, id int64) (*ScheduledJob, error) + + // DeleteByID removes this schedule with the given ID. + // If an error is returned, it is callers responsibility to handle it (e.g. rollback transaction). + DeleteByID(ctx context.Context, env scheduledjobs.JobSchedulerEnv, id int64) error + + // Create persists this schedule in the system.scheduled_jobs table. + // Sets j.scheduleID to the ID of the newly created schedule. + // Only the values initialized in this schedule are written to the specified transaction. + // If an error is returned, it is callers responsibility to handle it (e.g. rollback transaction). + Create(ctx context.Context, j *ScheduledJob) error + + // Delete removes this schedule. + // If an error is returned, it is callers responsibility to handle it (e.g. rollback transaction). + Delete(ctx context.Context, j *ScheduledJob) error + + // Update saves changes made to this schedule. + // If an error is returned, it is callers responsibility to handle it (e.g. rollback transaction). + Update(ctx context.Context, j *ScheduledJob) error } // ScheduleID returns schedule ID. @@ -359,11 +362,103 @@ func (j *ScheduledJob) InitFromDatums(datums []tree.Datum, cols []colinfo.Result return nil } -// Create persists this schedule in the system.scheduled_jobs table. -// Sets j.scheduleID to the ID of the newly created schedule. -// Only the values initialized in this schedule are written to the specified transaction. -// If an error is returned, it is callers responsibility to handle it (e.g. rollback transaction). -func (j *ScheduledJob) Create(ctx context.Context, ex sqlutil.InternalExecutor, txn *kv.Txn) error { +type scheduledJobStorageDB struct{ db isql.DB } + +func (s scheduledJobStorageDB) DeleteByID( + ctx context.Context, env scheduledjobs.JobSchedulerEnv, id int64, +) error { + return s.run(ctx, func(ctx context.Context, txn scheduledJobStorageTxn) error { + return txn.DeleteByID(ctx, env, id) + }) +} + +func (s scheduledJobStorageDB) Load( + ctx context.Context, env scheduledjobs.JobSchedulerEnv, id int64, +) (*ScheduledJob, error) { + var j *ScheduledJob + if err := s.run(ctx, func( + ctx context.Context, txn scheduledJobStorageTxn, + ) (err error) { + j, err = txn.Load(ctx, env, id) + return err + }); err != nil { + return nil, err + } + return j, nil +} + +func (s scheduledJobStorageDB) run( + ctx context.Context, f func(ctx context.Context, txn scheduledJobStorageTxn) error, +) error { + return s.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { + return f(ctx, scheduledJobStorageTxn{txn}) + }) +} + +func (s scheduledJobStorageDB) runAction( + ctx context.Context, + f func(scheduledJobStorageTxn, context.Context, *ScheduledJob) error, + j *ScheduledJob, +) error { + return s.run(ctx, func(ctx context.Context, txn scheduledJobStorageTxn) error { + return f(txn, ctx, j) + }) +} + +func (s scheduledJobStorageDB) Create(ctx context.Context, j *ScheduledJob) error { + return s.runAction(ctx, scheduledJobStorageTxn.Create, j) +} + +func (s scheduledJobStorageDB) Delete(ctx context.Context, j *ScheduledJob) error { + return s.runAction(ctx, scheduledJobStorageTxn.Delete, j) +} + +func (s scheduledJobStorageDB) Update(ctx context.Context, j *ScheduledJob) error { + return s.runAction(ctx, scheduledJobStorageTxn.Update, j) +} + +type scheduledJobStorageTxn struct{ txn isql.Txn } + +func (s scheduledJobStorageTxn) DeleteByID( + ctx context.Context, env scheduledjobs.JobSchedulerEnv, id int64, +) error { + _, err := s.txn.ExecEx( + ctx, + "delete-schedule", + s.txn.KV(), + sessiondata.RootUserSessionDataOverride, + fmt.Sprintf( + "DELETE FROM %s WHERE schedule_id = $1", + env.ScheduledJobsTableName(), + ), + id, + ) + return err +} + +func (s scheduledJobStorageTxn) Load( + ctx context.Context, env scheduledjobs.JobSchedulerEnv, id int64, +) (*ScheduledJob, error) { + row, cols, err := s.txn.QueryRowExWithCols(ctx, "lookup-schedule", s.txn.KV(), + sessiondata.RootUserSessionDataOverride, + fmt.Sprintf("SELECT * FROM %s WHERE schedule_id = %d", + env.ScheduledJobsTableName(), id)) + + if err != nil { + return nil, errors.CombineErrors(err, &scheduledJobNotFoundError{scheduleID: id}) + } + if row == nil { + return nil, &scheduledJobNotFoundError{scheduleID: id} + } + + j := NewScheduledJob(env) + if err := j.InitFromDatums(row, cols); err != nil { + return nil, err + } + return j, nil +} + +func (s scheduledJobStorageTxn) Create(ctx context.Context, j *ScheduledJob) error { if j.rec.ScheduleID != 0 { return errors.New("cannot specify schedule id when creating new cron job") } @@ -377,7 +472,7 @@ func (j *ScheduledJob) Create(ctx context.Context, ex sqlutil.InternalExecutor, return err } - row, retCols, err := ex.QueryRowExWithCols(ctx, "sched-create", txn, + row, retCols, err := s.txn.QueryRowExWithCols(ctx, "sched-create", s.txn.KV(), sessiondata.RootUserSessionDataOverride, fmt.Sprintf("INSERT INTO %s (%s) VALUES(%s) RETURNING schedule_id", j.env.ScheduledJobsTableName(), strings.Join(cols, ","), generatePlaceholders(len(qargs))), @@ -394,9 +489,20 @@ func (j *ScheduledJob) Create(ctx context.Context, ex sqlutil.InternalExecutor, return j.InitFromDatums(row, retCols) } -// Update saves changes made to this schedule. -// If an error is returned, it is callers responsibility to handle it (e.g. rollback transaction). -func (j *ScheduledJob) Update(ctx context.Context, ex sqlutil.InternalExecutor, txn *kv.Txn) error { +func (s scheduledJobStorageTxn) Delete(ctx context.Context, j *ScheduledJob) error { + if j.rec.ScheduleID == 0 { + return errors.New("cannot delete schedule: missing schedule id") + } + _, err := s.txn.ExecEx(ctx, "sched-delete", s.txn.KV(), + sessiondata.InternalExecutorOverride{User: username.RootUserName()}, + fmt.Sprintf("DELETE FROM %s WHERE schedule_id = %d", + j.env.ScheduledJobsTableName(), j.ScheduleID()), + ) + + return err +} + +func (s scheduledJobStorageTxn) Update(ctx context.Context, j *ScheduledJob) error { if !j.isDirty() { return nil } @@ -414,7 +520,7 @@ func (j *ScheduledJob) Update(ctx context.Context, ex sqlutil.InternalExecutor, return nil // Nothing changed. } - n, err := ex.ExecEx(ctx, "sched-update", txn, + n, err := s.txn.ExecEx(ctx, "sched-update", s.txn.KV(), sessiondata.RootUserSessionDataOverride, fmt.Sprintf("UPDATE %s SET (%s) = (%s) WHERE schedule_id = %d", j.env.ScheduledJobsTableName(), strings.Join(cols, ","), @@ -433,20 +539,8 @@ func (j *ScheduledJob) Update(ctx context.Context, ex sqlutil.InternalExecutor, return nil } -// Delete removes this schedule. -// If an error is returned, it is callers responsibility to handle it (e.g. rollback transaction). -func (j *ScheduledJob) Delete(ctx context.Context, ex sqlutil.InternalExecutor, txn *kv.Txn) error { - if j.rec.ScheduleID == 0 { - return errors.New("cannot delete schedule: missing schedule id") - } - _, err := ex.ExecEx(ctx, "sched-delete", txn, - sessiondata.RootUserSessionDataOverride, - fmt.Sprintf("DELETE FROM %s WHERE schedule_id = %d", - j.env.ScheduledJobsTableName(), j.ScheduleID()), - ) - - return err -} +var _ ScheduledJobStorage = (*scheduledJobStorageTxn)(nil) +var _ ScheduledJobStorage = (*scheduledJobStorageDB)(nil) // marshalChanges marshals all changes in the in-memory representation and returns // the names of the columns and marshaled values. diff --git a/pkg/jobs/scheduled_job_executor.go b/pkg/jobs/scheduled_job_executor.go index 59066cc96e37..b6600bfb4466 100644 --- a/pkg/jobs/scheduled_job_executor.go +++ b/pkg/jobs/scheduled_job_executor.go @@ -14,10 +14,9 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/errors" @@ -29,24 +28,24 @@ type ScheduledJobExecutor interface { // Modifications to the ScheduledJob object will be persisted. ExecuteJob( ctx context.Context, + txn isql.Txn, cfg *scheduledjobs.JobExecutionConfig, env scheduledjobs.JobSchedulerEnv, schedule *ScheduledJob, - txn *kv.Txn, ) error - // Notifies that the system.job started by the ScheduledJob completed. - // Implementation may use provided transaction to perform any additional mutations. - // Modifications to the ScheduledJob object will be persisted. + // NotifyJobTermination notifies that the system.job started by the + // ScheduledJob completed. Implementation may use provided transaction to + // perform any additional mutations. Modifications to the ScheduledJob + // object will be persisted. NotifyJobTermination( ctx context.Context, + txn isql.Txn, jobID jobspb.JobID, jobStatus Status, details jobspb.Details, env scheduledjobs.JobSchedulerEnv, schedule *ScheduledJob, - ex sqlutil.InternalExecutor, - txn *kv.Txn, ) error // Metrics returns optional metric.Struct object for this executor. @@ -57,11 +56,9 @@ type ScheduledJobExecutor interface { // the passed in `schedule`. GetCreateScheduleStatement( ctx context.Context, + txn isql.Txn, env scheduledjobs.JobSchedulerEnv, - txn *kv.Txn, - descsCol *descs.Collection, sj *ScheduledJob, - ex sqlutil.InternalExecutor, ) (string, error) } @@ -76,7 +73,7 @@ type ScheduledJobController interface { scheduleControllerEnv scheduledjobs.ScheduleControllerEnv, env scheduledjobs.JobSchedulerEnv, schedule *ScheduledJob, - txn *kv.Txn, + txn isql.Txn, descsCol *descs.Collection, ) (int, error) } @@ -178,19 +175,18 @@ func DefaultHandleFailedRun(schedule *ScheduledJob, fmtOrMsg string, args ...int // with the job status changes. func NotifyJobTermination( ctx context.Context, + txn isql.Txn, env scheduledjobs.JobSchedulerEnv, jobID jobspb.JobID, jobStatus Status, jobDetails jobspb.Details, scheduleID int64, - ex sqlutil.InternalExecutor, - txn *kv.Txn, ) error { if env == nil { env = scheduledjobs.ProdJobSchedulerEnv } - - schedule, err := LoadScheduledJob(ctx, env, scheduleID, ex, txn) + schedules := ScheduledJobTxn(txn) + schedule, err := schedules.Load(ctx, env, scheduleID) if err != nil { return err } @@ -200,11 +196,11 @@ func NotifyJobTermination( } // Delegate handling of the job termination to the executor. - err = executor.NotifyJobTermination(ctx, jobID, jobStatus, jobDetails, env, schedule, ex, txn) + err = executor.NotifyJobTermination(ctx, txn, jobID, jobStatus, jobDetails, env, schedule) if err != nil { return err } // Update this schedule in case executor made changes to it. - return schedule.Update(ctx, ex, txn) + return schedules.Update(ctx, schedule) } diff --git a/pkg/jobs/scheduled_job_executor_test.go b/pkg/jobs/scheduled_job_executor_test.go index c2b585c39a28..373ebb29e3d8 100644 --- a/pkg/jobs/scheduled_job_executor_test.go +++ b/pkg/jobs/scheduled_job_executor_test.go @@ -15,10 +15,8 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/metric" @@ -32,11 +30,11 @@ type statusTrackingExecutor struct { } func (s *statusTrackingExecutor) ExecuteJob( - _ context.Context, - _ *scheduledjobs.JobExecutionConfig, - _ scheduledjobs.JobSchedulerEnv, - _ *ScheduledJob, - _ *kv.Txn, + ctx context.Context, + txn isql.Txn, + cfg *scheduledjobs.JobExecutionConfig, + env scheduledjobs.JobSchedulerEnv, + schedule *ScheduledJob, ) error { s.numExec++ return nil @@ -44,13 +42,12 @@ func (s *statusTrackingExecutor) ExecuteJob( func (s *statusTrackingExecutor) NotifyJobTermination( ctx context.Context, + txn isql.Txn, jobID jobspb.JobID, jobStatus Status, - _ jobspb.Details, + details jobspb.Details, env scheduledjobs.JobSchedulerEnv, schedule *ScheduledJob, - ex sqlutil.InternalExecutor, - txn *kv.Txn, ) error { s.counts[jobStatus]++ return nil @@ -61,12 +58,7 @@ func (s *statusTrackingExecutor) Metrics() metric.Struct { } func (s *statusTrackingExecutor) GetCreateScheduleStatement( - ctx context.Context, - env scheduledjobs.JobSchedulerEnv, - txn *kv.Txn, - descsCol *descs.Collection, - sj *ScheduledJob, - ex sqlutil.InternalExecutor, + ctx context.Context, txn isql.Txn, env scheduledjobs.JobSchedulerEnv, sj *ScheduledJob, ) (string, error) { return "", errors.AssertionFailedf("unimplemented method: 'GetCreateScheduleStatement'") } @@ -103,12 +95,16 @@ func TestJobTerminationNotification(t *testing.T) { // Create a single job. schedule := h.newScheduledJobForExecutor("test_job", executorName, nil) ctx := context.Background() - require.NoError(t, schedule.Create(ctx, h.cfg.InternalExecutor, nil)) + schedules := ScheduledJobDB(h.cfg.DB) + require.NoError(t, schedules.Create(ctx, schedule)) // Pretend it completes multiple runs with terminal statuses. for _, s := range []Status{StatusCanceled, StatusFailed, StatusSucceeded} { - require.NoError(t, NotifyJobTermination( - ctx, h.env, 123, s, nil, schedule.ScheduleID(), h.cfg.InternalExecutor, nil)) + require.NoError(t, h.cfg.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return NotifyJobTermination( + ctx, txn, h.env, 123, s, nil, schedule.ScheduleID(), + ) + })) } // Verify counts. diff --git a/pkg/jobs/scheduled_job_test.go b/pkg/jobs/scheduled_job_test.go index 4b9dab4181d4..3730d0dddf93 100644 --- a/pkg/jobs/scheduled_job_test.go +++ b/pkg/jobs/scheduled_job_test.go @@ -27,9 +27,10 @@ func TestCreateScheduledJob(t *testing.T) { h, cleanup := newTestHelper(t) defer cleanup() + schedules := ScheduledJobDB(h.cfg.DB) j := h.newScheduledJob(t, "test_job", "test sql") require.NoError(t, j.SetSchedule("@daily")) - require.NoError(t, j.Create(context.Background(), h.cfg.InternalExecutor, nil)) + require.NoError(t, schedules.Create(context.Background(), j)) require.True(t, j.ScheduleID() > 0) } @@ -41,8 +42,9 @@ func TestCreatePausedScheduledJob(t *testing.T) { j := h.newScheduledJob(t, "test_job", "test sql") require.NoError(t, j.SetSchedule("@daily")) + schedules := ScheduledJobDB(h.cfg.DB) j.Pause() - require.NoError(t, j.Create(context.Background(), h.cfg.InternalExecutor, nil)) + require.NoError(t, schedules.Create(context.Background(), j)) require.True(t, j.ScheduleID() > 0) require.True(t, j.NextRun().Equal(time.Time{})) } @@ -61,8 +63,8 @@ func TestSetsSchedule(t *testing.T) { // The job is expected to run at midnight the next day. // We want to ensure nextRun correctly persisted in the cron table. expectedNextRun := h.env.Now().Truncate(24 * time.Hour).Add(24 * time.Hour) - - require.NoError(t, j.Create(context.Background(), h.cfg.InternalExecutor, nil)) + schedules := ScheduledJobDB(h.cfg.DB) + require.NoError(t, schedules.Create(context.Background(), j)) loaded := h.loadSchedule(t, j.ScheduleID()) require.Equal(t, j.ScheduleID(), loaded.ScheduleID()) @@ -79,7 +81,8 @@ func TestCreateOneOffJob(t *testing.T) { j := h.newScheduledJob(t, "test_job", "test sql") j.SetNextRun(timeutil.Now()) - require.NoError(t, j.Create(context.Background(), h.cfg.InternalExecutor, nil)) + schedules := ScheduledJobDB(h.cfg.DB) + require.NoError(t, schedules.Create(context.Background(), j)) require.True(t, j.ScheduleID() > 0) loaded := h.loadSchedule(t, j.ScheduleID()) @@ -93,14 +96,15 @@ func TestPauseUnpauseJob(t *testing.T) { h, cleanup := newTestHelper(t) defer cleanup() + schedules := ScheduledJobDB(h.cfg.DB) ctx := context.Background() j := h.newScheduledJob(t, "test_job", "test sql") require.NoError(t, j.SetSchedule("@daily")) - require.NoError(t, j.Create(ctx, h.cfg.InternalExecutor, nil)) + require.NoError(t, schedules.Create(ctx, j)) // Pause and save. j.Pause() - require.NoError(t, j.Update(ctx, h.cfg.InternalExecutor, nil)) + require.NoError(t, schedules.Update(ctx, j)) // Verify job is paused loaded := h.loadSchedule(t, j.ScheduleID()) @@ -109,7 +113,7 @@ func TestPauseUnpauseJob(t *testing.T) { // Un-pausing the job resets next run time. require.NoError(t, j.ScheduleNextRun()) - require.NoError(t, j.Update(ctx, h.cfg.InternalExecutor, nil)) + require.NoError(t, schedules.Update(ctx, j)) // Verify job is no longer paused loaded = h.loadSchedule(t, j.ScheduleID()) diff --git a/pkg/jobs/test_helpers.go b/pkg/jobs/test_helpers.go index b9c58db1e299..43b027517232 100644 --- a/pkg/jobs/test_helpers.go +++ b/pkg/jobs/test_helpers.go @@ -14,8 +14,8 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server/tracedumper" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/log" ) @@ -44,7 +44,7 @@ func WithJobID(jobID jobspb.JobID) TestCreateAndStartJobOption { // RegisterConstructor. The ctx passed to this function is not the context the // job will be started with (canceling ctx will not cause the job to cancel). func TestingCreateAndStartJob( - ctx context.Context, r *Registry, db *kv.DB, record Record, opts ...TestCreateAndStartJobOption, + ctx context.Context, r *Registry, db isql.DB, record Record, opts ...TestCreateAndStartJobOption, ) (*StartableJob, error) { var rj *StartableJob c := config{ @@ -53,7 +53,7 @@ func TestingCreateAndStartJob( for _, opt := range opts { opt(&c) } - if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + if err := db.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { return r.CreateStartableJobWithTxn(ctx, &rj, c.jobID, txn, record) }); err != nil { if rj != nil { diff --git a/pkg/jobs/testutils_test.go b/pkg/jobs/testutils_test.go index 768a7d2042cc..c5eb20f78155 100644 --- a/pkg/jobs/testutils_test.go +++ b/pkg/jobs/testutils_test.go @@ -18,13 +18,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/jobs/jobstest" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/timeutil" @@ -82,7 +81,7 @@ func newTestHelperForTables( argsFn(&args) } - s, db, kvDB := serverutils.StartServer(t, args) + s, db, _ := serverutils.StartServer(t, args) sqlDB := sqlutils.MakeSQLRunner(db) @@ -96,10 +95,9 @@ func newTestHelperForTables( env: env, server: s, cfg: &scheduledjobs.JobExecutionConfig{ - Settings: s.ClusterSettings(), - InternalExecutor: s.InternalExecutor().(sqlutil.InternalExecutor), - DB: kvDB, - TestingKnobs: knobs, + Settings: s.ClusterSettings(), + DB: s.InternalDB().(isql.DB), + TestingKnobs: knobs, }, sqlDB: sqlDB, execSchedules: execSchedules, @@ -139,7 +137,7 @@ func (h *testHelper) newScheduledJobForExecutor( // loadSchedule loads all columns for the specified scheduled job. func (h *testHelper) loadSchedule(t *testing.T, id int64) *ScheduledJob { j := NewScheduledJob(h.env) - row, cols, err := h.cfg.InternalExecutor.QueryRowExWithCols( + row, cols, err := h.cfg.DB.Executor().QueryRowExWithCols( context.Background(), "sched-load", nil, sessiondata.RootUserSessionDataOverride, fmt.Sprintf( @@ -171,10 +169,10 @@ func registerScopedScheduledJobExecutor(name string, ex ScheduledJobExecutor) fu // addFakeJob adds a fake job associated with the specified scheduleID. // Returns the id of the newly created job. func addFakeJob( - t *testing.T, h *testHelper, scheduleID int64, status Status, txn *kv.Txn, + t *testing.T, h *testHelper, scheduleID int64, status Status, txn isql.Txn, ) jobspb.JobID { payload := []byte("fake payload") - datums, err := h.cfg.InternalExecutor.QueryRowEx(context.Background(), "fake-job", txn, + datums, err := txn.QueryRowEx(context.Background(), "fake-job", txn.KV(), sessiondata.RootUserSessionDataOverride, fmt.Sprintf(` INSERT INTO %s (created_by_type, created_by_id, status, payload) diff --git a/pkg/jobs/update.go b/pkg/jobs/update.go index 37e7f4b2d23c..6a9cf39ad3b3 100644 --- a/pkg/jobs/update.go +++ b/pkg/jobs/update.go @@ -18,7 +18,8 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/security/username" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness" @@ -37,7 +38,210 @@ import ( // // The function is free to modify contents of JobMetadata in place (but the // changes will be ignored unless JobUpdater is used). -type UpdateFn func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error +type UpdateFn func(txn isql.Txn, md JobMetadata, ju *JobUpdater) error + +type Updater struct { + j *Job + txn isql.Txn +} + +func (j *Job) NoTxn() Updater { + return Updater{j: j} +} + +func (j *Job) WithTxn(txn isql.Txn) Updater { + return Updater{j: j, txn: txn} +} + +func (j *Job) maybeWithTxn(txn isql.Txn) Updater { + if txn != nil { + return j.WithTxn(txn) + } + return j.NoTxn() +} + +func (u Updater) update(ctx context.Context, useReadLock bool, updateFn UpdateFn) (retErr error) { + if u.txn == nil { + return u.j.registry.internalDB.Txn(ctx, func( + ctx context.Context, txn isql.Txn, + ) error { + u.txn = txn + return u.update(ctx, useReadLock, updateFn) + }) + } + ctx, sp := tracing.ChildSpan(ctx, "update-job") + defer sp.Finish() + + var payload *jobspb.Payload + var progress *jobspb.Progress + var status Status + var runStats *RunStats + j := u.j + defer func() { + if retErr != nil { + retErr = errors.Wrapf(retErr, "job %d", j.id) + return + } + j.mu.Lock() + defer j.mu.Unlock() + if payload != nil { + j.mu.payload = *payload + } + if progress != nil { + j.mu.progress = *progress + } + if runStats != nil { + j.mu.runStats = runStats + } + if status != "" { + j.mu.status = status + } + }() + + row, err := u.txn.QueryRowEx( + ctx, "select-job", u.txn.KV(), + sessiondata.RootUserSessionDataOverride, + getSelectStmtForJobUpdate(j.session != nil, useReadLock), j.ID(), + ) + if err != nil { + return err + } + if row == nil { + return errors.Errorf("not found in system.jobs table") + } + + if status, err = unmarshalStatus(row[0]); err != nil { + return err + } + if payload, err = UnmarshalPayload(row[1]); err != nil { + return err + } + if progress, err = UnmarshalProgress(row[2]); err != nil { + return err + } + if j.session != nil { + if row[3] == tree.DNull { + return errors.Errorf( + "with status %q: expected session %q but found NULL", + status, j.session.ID()) + } + storedSession := []byte(*row[3].(*tree.DBytes)) + if !bytes.Equal(storedSession, j.session.ID().UnsafeBytes()) { + return errors.Errorf( + "with status %q: expected session %q but found %q", + status, j.session.ID(), sqlliveness.SessionID(storedSession)) + } + } else { + log.VInfof(ctx, 1, "job %d: update called with no session ID", j.ID()) + } + + md := JobMetadata{ + ID: j.ID(), + Status: status, + Payload: payload, + Progress: progress, + } + + offset := 0 + if j.session != nil { + offset = 1 + } + var lastRun *tree.DTimestamp + var ok bool + lastRun, ok = row[3+offset].(*tree.DTimestamp) + if !ok { + return errors.AssertionFailedf("expected timestamp last_run, but got %T", lastRun) + } + var numRuns *tree.DInt + numRuns, ok = row[4+offset].(*tree.DInt) + if !ok { + return errors.AssertionFailedf("expected int num_runs, but got %T", numRuns) + } + md.RunStats = &RunStats{ + NumRuns: int(*numRuns), + LastRun: lastRun.Time, + } + + var ju JobUpdater + if err := updateFn(u.txn, md, &ju); err != nil { + return err + } + if j.registry.knobs.BeforeUpdate != nil { + if err := j.registry.knobs.BeforeUpdate(md, ju.md); err != nil { + return err + } + } + + if !ju.hasUpdates() { + return nil + } + + // Build a statement of the following form, depending on which properties + // need updating: + // + // UPDATE system.jobs + // SET + // [status = $2,] + // [payload = $y,] + // [progress = $z] + // WHERE + // id = $1 + + var setters []string + params := []interface{}{j.ID()} // $1 is always the job ID. + addSetter := func(column string, value interface{}) { + params = append(params, value) + setters = append(setters, fmt.Sprintf("%s = $%d", column, len(params))) + } + + if ju.md.Status != "" { + addSetter("status", ju.md.Status) + } + + if ju.md.Payload != nil { + payload = ju.md.Payload + payloadBytes, err := protoutil.Marshal(payload) + if err != nil { + return err + } + addSetter("payload", payloadBytes) + } + + if ju.md.Progress != nil { + progress = ju.md.Progress + progress.ModifiedMicros = timeutil.ToUnixMicros(u.now()) + progressBytes, err := protoutil.Marshal(progress) + if err != nil { + return err + } + addSetter("progress", progressBytes) + } + + if ju.md.RunStats != nil { + runStats = ju.md.RunStats + addSetter("last_run", ju.md.RunStats.LastRun) + addSetter("num_runs", ju.md.RunStats.NumRuns) + } + + updateStmt := fmt.Sprintf( + "UPDATE system.jobs SET %s WHERE id = $1", + strings.Join(setters, ", "), + ) + n, err := u.txn.ExecEx( + ctx, "job-update", u.txn.KV(), + sessiondata.InternalExecutorOverride{User: username.NodeUserName()}, + updateStmt, params..., + ) + if err != nil { + return err + } + if n != 1 { + return errors.Errorf( + "expected exactly one row affected, but %d rows affected by job update", n, + ) + } + return nil +} // RunStats consists of job-run statistics: num of runs and last-run timestamp. type RunStats struct { @@ -134,183 +338,13 @@ func UpdateHighwaterProgressed(highWater hlc.Timestamp, md JobMetadata, ju *JobU // // Note that there are various convenience wrappers (like FractionProgressed) // defined in jobs.go. -func (j *Job) Update(ctx context.Context, txn *kv.Txn, updateFn UpdateFn) error { +func (u Updater) Update(ctx context.Context, updateFn UpdateFn) error { const useReadLock = false - return j.update(ctx, txn, useReadLock, updateFn) + return u.update(ctx, useReadLock, updateFn) } -func (j *Job) update(ctx context.Context, txn *kv.Txn, useReadLock bool, updateFn UpdateFn) error { - ctx, sp := tracing.ChildSpan(ctx, "update-job") - defer sp.Finish() - - var payload *jobspb.Payload - var progress *jobspb.Progress - var status Status - var runStats *RunStats - - if err := j.runInTxn(ctx, txn, func(ctx context.Context, txn *kv.Txn) error { - payload, progress, runStats = nil, nil, nil - var err error - var row tree.Datums - row, err = j.registry.ex.QueryRowEx( - ctx, "select-job", txn, - sessiondata.RootUserSessionDataOverride, - getSelectStmtForJobUpdate(j.session != nil, useReadLock), j.ID(), - ) - if err != nil { - return err - } - if row == nil { - return errors.Errorf("not found in system.jobs table") - } - - if status, err = unmarshalStatus(row[0]); err != nil { - return err - } - if payload, err = UnmarshalPayload(row[1]); err != nil { - return err - } - if progress, err = UnmarshalProgress(row[2]); err != nil { - return err - } - if j.session != nil { - if row[3] == tree.DNull { - return errors.Errorf( - "with status %q: expected session %q but found NULL", - status, j.session.ID()) - } - storedSession := []byte(*row[3].(*tree.DBytes)) - if !bytes.Equal(storedSession, j.session.ID().UnsafeBytes()) { - return errors.Errorf( - "with status %q: expected session %q but found %q", - status, j.session.ID(), sqlliveness.SessionID(storedSession)) - } - } else { - log.VInfof(ctx, 1, "job %d: update called with no session ID", j.ID()) - } - - md := JobMetadata{ - ID: j.ID(), - Status: status, - Payload: payload, - Progress: progress, - } - - offset := 0 - if j.session != nil { - offset = 1 - } - var lastRun *tree.DTimestamp - var ok bool - lastRun, ok = row[3+offset].(*tree.DTimestamp) - if !ok { - return errors.AssertionFailedf("expected timestamp last_run, but got %T", lastRun) - } - var numRuns *tree.DInt - numRuns, ok = row[4+offset].(*tree.DInt) - if !ok { - return errors.AssertionFailedf("expected int num_runs, but got %T", numRuns) - } - md.RunStats = &RunStats{ - NumRuns: int(*numRuns), - LastRun: lastRun.Time, - } - - var ju JobUpdater - if err := updateFn(txn, md, &ju); err != nil { - return err - } - if j.registry.knobs.BeforeUpdate != nil { - if err := j.registry.knobs.BeforeUpdate(md, ju.md); err != nil { - return err - } - } - - if !ju.hasUpdates() { - return nil - } - - // Build a statement of the following form, depending on which properties - // need updating: - // - // UPDATE system.jobs - // SET - // [status = $2,] - // [payload = $y,] - // [progress = $z] - // WHERE - // id = $1 - - var setters []string - params := []interface{}{j.ID()} // $1 is always the job ID. - addSetter := func(column string, value interface{}) { - params = append(params, value) - setters = append(setters, fmt.Sprintf("%s = $%d", column, len(params))) - } - - if ju.md.Status != "" { - addSetter("status", ju.md.Status) - } - - if ju.md.Payload != nil { - payload = ju.md.Payload - payloadBytes, err := protoutil.Marshal(payload) - if err != nil { - return err - } - addSetter("payload", payloadBytes) - } - - if ju.md.Progress != nil { - progress = ju.md.Progress - progress.ModifiedMicros = timeutil.ToUnixMicros(txn.ReadTimestamp().GoTime()) - progressBytes, err := protoutil.Marshal(progress) - if err != nil { - return err - } - addSetter("progress", progressBytes) - } - - if ju.md.RunStats != nil { - runStats = ju.md.RunStats - addSetter("last_run", ju.md.RunStats.LastRun) - addSetter("num_runs", ju.md.RunStats.NumRuns) - } - - updateStmt := fmt.Sprintf( - "UPDATE system.jobs SET %s WHERE id = $1", - strings.Join(setters, ", "), - ) - n, err := j.registry.ex.Exec(ctx, "job-update", txn, updateStmt, params...) - if err != nil { - return err - } - if n != 1 { - return errors.Errorf( - "expected exactly one row affected, but %d rows affected by job update", n, - ) - } - return nil - }); err != nil { - return errors.Wrapf(err, "job %d", j.id) - } - func() { - j.mu.Lock() - defer j.mu.Unlock() - if payload != nil { - j.mu.payload = *payload - } - if progress != nil { - j.mu.progress = *progress - } - if runStats != nil { - j.mu.runStats = runStats - } - if status != "" { - j.mu.status = status - } - }() - return nil +func (u Updater) now() time.Time { + return u.j.registry.clock.Now().GoTime() } // getSelectStmtForJobUpdate constructs the select statement used in Job.update. diff --git a/pkg/jobs/utils.go b/pkg/jobs/utils.go index 932ca7567252..ad9935ead054 100644 --- a/pkg/jobs/utils.go +++ b/pkg/jobs/utils.go @@ -14,9 +14,8 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/errors" ) @@ -28,8 +27,7 @@ import ( func RunningJobExists( ctx context.Context, jobID jobspb.JobID, - ie sqlutil.InternalExecutor, - txn *kv.Txn, + txn isql.Txn, payloadPredicate func(payload *jobspb.Payload) bool, ) (exists bool, retErr error) { const stmt = ` @@ -41,10 +39,10 @@ WHERE status IN ` + NonTerminalStatusTupleString + ` ORDER BY created` - it, err := ie.QueryIterator( + it, err := txn.QueryIterator( ctx, "get-jobs", - txn, + txn.KV(), stmt, ) if err != nil { diff --git a/pkg/jobs/wait.go b/pkg/jobs/wait.go index 1f00591df5ab..90227c572dba 100644 --- a/pkg/jobs/wait.go +++ b/pkg/jobs/wait.go @@ -20,7 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/retry" "github.com/cockroachdb/cockroach/pkg/util/timeutil" @@ -54,31 +53,24 @@ func (r *Registry) NotifyToResume(ctx context.Context, jobs ...jobspb.JobID) { // WaitForJobs waits for a given list of jobs to reach some sort // of terminal state. -func (r *Registry) WaitForJobs( - ctx context.Context, ex sqlutil.InternalExecutor, jobs []jobspb.JobID, -) error { +func (r *Registry) WaitForJobs(ctx context.Context, jobs []jobspb.JobID) error { log.Infof(ctx, "waiting for %d %v queued jobs to complete", len(jobs), jobs) jobFinishedLocally, cleanup := r.installWaitingSet(jobs...) defer cleanup() - return r.waitForJobs(ctx, ex, jobs, jobFinishedLocally) + return r.waitForJobs(ctx, jobs, jobFinishedLocally) } // WaitForJobsIgnoringJobErrors is like WaitForJobs but it only // returns an error in the case that polling the jobs table fails. -func (r *Registry) WaitForJobsIgnoringJobErrors( - ctx context.Context, ex sqlutil.InternalExecutor, jobs []jobspb.JobID, -) error { +func (r *Registry) WaitForJobsIgnoringJobErrors(ctx context.Context, jobs []jobspb.JobID) error { log.Infof(ctx, "waiting for %d %v queued jobs to complete", len(jobs), jobs) jobFinishedLocally, cleanup := r.installWaitingSet(jobs...) defer cleanup() - return r.waitForJobsToBeTerminalOrPaused(ctx, ex, jobs, jobFinishedLocally) + return r.waitForJobsToBeTerminalOrPaused(ctx, jobs, jobFinishedLocally) } func (r *Registry) waitForJobsToBeTerminalOrPaused( - ctx context.Context, - ex sqlutil.InternalExecutor, - jobs []jobspb.JobID, - jobFinishedLocally <-chan struct{}, + ctx context.Context, jobs []jobspb.JobID, jobFinishedLocally <-chan struct{}, ) error { if len(jobs) == 0 { return nil @@ -115,7 +107,7 @@ func (r *Registry) waitForJobsToBeTerminalOrPaused( if fn := r.knobs.BeforeWaitForJobsQuery; fn != nil { fn() } - row, err := ex.QueryRowEx( + row, err := r.db.Executor().QueryRowEx( ctx, "poll-show-jobs", nil, /* txn */ @@ -140,10 +132,7 @@ func (r *Registry) waitForJobsToBeTerminalOrPaused( } func (r *Registry) waitForJobs( - ctx context.Context, - ex sqlutil.InternalExecutor, - jobs []jobspb.JobID, - jobFinishedLocally <-chan struct{}, + ctx context.Context, jobs []jobspb.JobID, jobFinishedLocally <-chan struct{}, ) error { if len(jobs) == 0 { return nil @@ -154,7 +143,7 @@ func (r *Registry) waitForJobs( len(jobs), jobs, timeutil.Since(start)) }() - if err := r.waitForJobsToBeTerminalOrPaused(ctx, ex, jobs, jobFinishedLocally); err != nil { + if err := r.waitForJobsToBeTerminalOrPaused(ctx, jobs, jobFinishedLocally); err != nil { return err } @@ -203,16 +192,14 @@ func makeWaitForJobsQuery(jobs []jobspb.JobID) string { // Run starts previously unstarted jobs from a list of scheduled // jobs. Canceling ctx interrupts the waiting but doesn't cancel the jobs. -func (r *Registry) Run( - ctx context.Context, ex sqlutil.InternalExecutor, jobs []jobspb.JobID, -) error { +func (r *Registry) Run(ctx context.Context, jobs []jobspb.JobID) error { if len(jobs) == 0 { return nil } done, cleanup := r.installWaitingSet(jobs...) defer cleanup() r.NotifyToResume(ctx, jobs...) - return r.waitForJobs(ctx, ex, jobs, done) + return r.waitForJobs(ctx, jobs, done) } // jobWaitingSets stores the set of waitingSets currently waiting on a job ID. diff --git a/pkg/kv/db.go b/pkg/kv/db.go index 4c4dc3463881..6f1126935f7b 100644 --- a/pkg/kv/db.go +++ b/pkg/kv/db.go @@ -895,7 +895,9 @@ func (db *DB) NewTxn(ctx context.Context, debugName string) *Txn { // conscious about what they want. func (db *DB) Txn(ctx context.Context, retryable func(context.Context, *Txn) error) error { return db.TxnWithAdmissionControl( - ctx, roachpb.AdmissionHeader_OTHER, admissionpb.NormalPri, retryable) + ctx, roachpb.AdmissionHeader_OTHER, admissionpb.NormalPri, + SteppingDisabled, retryable, + ) } // TxnWithAdmissionControl is like Txn, but uses a configurable admission @@ -904,6 +906,7 @@ func (db *DB) TxnWithAdmissionControl( ctx context.Context, source roachpb.AdmissionHeader_Source, priority admissionpb.WorkPriority, + steppingMode SteppingMode, retryable func(context.Context, *Txn) error, ) error { // TODO(radu): we should open a tracing Span here (we need to figure out how @@ -915,6 +918,7 @@ func (db *DB) TxnWithAdmissionControl( nodeID, _ := db.ctx.NodeID.OptionalNodeID() // zero if not available txn := NewTxnWithAdmissionControl(ctx, db, nodeID, source, priority) txn.SetDebugName("unnamed") + txn.ConfigureStepping(ctx, steppingMode) return runTxn(ctx, txn, retryable) } diff --git a/pkg/kv/kvclient/rangefeed/db_adapter.go b/pkg/kv/kvclient/rangefeed/db_adapter.go index 67e35d410b15..4d5c0534102d 100644 --- a/pkg/kv/kvclient/rangefeed/db_adapter.go +++ b/pkg/kv/kvclient/rangefeed/db_adapter.go @@ -187,6 +187,7 @@ func (dbc *dbAdapter) scanSpan( return dbc.db.TxnWithAdmissionControl(ctx, roachpb.AdmissionHeader_ROOT_KV, admissionPri, + kv.SteppingDisabled, func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetFixedTimestamp(ctx, asOf); err != nil { return err diff --git a/pkg/kv/kvserver/BUILD.bazel b/pkg/kv/kvserver/BUILD.bazel index e0f0c91fa734..bcfafec221b8 100644 --- a/pkg/kv/kvserver/BUILD.bazel +++ b/pkg/kv/kvserver/BUILD.bazel @@ -398,10 +398,10 @@ go_test( "//pkg/sql/catalog/dbdesc", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/systemschema", + "//pkg/sql/isql", "//pkg/sql/rowenc/keyside", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/storage", "//pkg/storage/enginepb", "//pkg/storage/fs", diff --git a/pkg/kv/kvserver/client_protectedts_test.go b/pkg/kv/kvserver/client_protectedts_test.go index b4ec681cd048..a76965347732 100644 --- a/pkg/kv/kvserver/client_protectedts_test.go +++ b/pkg/kv/kvserver/client_protectedts_test.go @@ -22,8 +22,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptstorage" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptutil" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" @@ -173,16 +173,15 @@ ORDER BY raw_start_key ASC LIMIT 1`) beforeWrites := s0.Clock().Now() gcSoon() - pts := ptstorage.New(s0.ClusterSettings(), s0.InternalExecutor().(*sql.InternalExecutor), - nil /* knobs */) - ptsWithDB := ptstorage.WithDatabase(pts, s0.DB()) + pts := ptstorage.New(s0.ClusterSettings(), nil) + ptsWithDB := ptstorage.WithDatabase(pts, s0.InternalDB().(isql.DB)) ptsRec := ptpb.Record{ ID: uuid.MakeV4().GetBytes(), Timestamp: s0.Clock().Now(), Mode: ptpb.PROTECT_AFTER, Target: ptpb.MakeSchemaObjectsTarget([]descpb.ID{getTableID()}), } - require.NoError(t, ptsWithDB.Protect(ctx, nil /* txn */, &ptsRec)) + require.NoError(t, ptsWithDB.Protect(ctx, &ptsRec)) upsertUntilBackpressure() // We need to be careful choosing a time. We're a little limited because the // ttl is defined in seconds and we need to wait for the threshold to be @@ -220,8 +219,8 @@ ORDER BY raw_start_key ASC LIMIT 1`) failedRec.ID = uuid.MakeV4().GetBytes() failedRec.Timestamp = beforeWrites failedRec.Timestamp.Logical = 0 - require.NoError(t, ptsWithDB.Protect(ctx, nil /* txn */, &failedRec)) - _, err = ptsWithDB.GetRecord(ctx, nil /* txn */, failedRec.ID.GetUUID()) + require.NoError(t, ptsWithDB.Protect(ctx, &failedRec)) + _, err = ptsWithDB.GetRecord(ctx, failedRec.ID.GetUUID()) require.NoError(t, err) // Verify that the record did indeed make its way down into KV where the @@ -239,7 +238,7 @@ ORDER BY raw_start_key ASC LIMIT 1`) laterRec.ID = uuid.MakeV4().GetBytes() laterRec.Timestamp = afterWrites laterRec.Timestamp.Logical = 0 - require.NoError(t, ptsWithDB.Protect(ctx, nil /* txn */, &laterRec)) + require.NoError(t, ptsWithDB.Protect(ctx, &laterRec)) require.NoError( t, ptutil.TestingVerifyProtectionTimestampExistsOnSpans( @@ -249,7 +248,7 @@ ORDER BY raw_start_key ASC LIMIT 1`) // Release the record that had succeeded and ensure that GC eventually // happens up to the protected timestamp of the new record. - require.NoError(t, ptsWithDB.Release(ctx, nil, ptsRec.ID.GetUUID())) + require.NoError(t, ptsWithDB.Release(ctx, ptsRec.ID.GetUUID())) testutils.SucceedsSoon(t, func() error { trace, _, err = s.Enqueue(ctx, "mvccGC", repl, false /* skipShouldQueue */, false /* async */) require.NoError(t, err) @@ -265,9 +264,9 @@ ORDER BY raw_start_key ASC LIMIT 1`) }) // Release the failed record. - require.NoError(t, ptsWithDB.Release(ctx, nil, failedRec.ID.GetUUID())) - require.NoError(t, ptsWithDB.Release(ctx, nil, laterRec.ID.GetUUID())) - state, err := ptsWithDB.GetState(ctx, nil) + require.NoError(t, ptsWithDB.Release(ctx, failedRec.ID.GetUUID())) + require.NoError(t, ptsWithDB.Release(ctx, laterRec.ID.GetUUID())) + state, err := ptsWithDB.GetState(ctx) require.NoError(t, err) require.Len(t, state.Records, 0) require.Equal(t, int(state.NumRecords), len(state.Records)) diff --git a/pkg/kv/kvserver/client_replica_test.go b/pkg/kv/kvserver/client_replica_test.go index ff76e0860151..94f0596ccf11 100644 --- a/pkg/kv/kvserver/client_replica_test.go +++ b/pkg/kv/kvserver/client_replica_test.go @@ -47,6 +47,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/bootstrap" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/kvclientutils" @@ -4317,6 +4318,7 @@ func TestStrictGCEnforcement(t *testing.T) { require.NoError(t, r.ReadProtectedTimestampsForTesting(ctx)) } } + insqlDB = tc.Server(0).InternalDB().(isql.DB) ) { @@ -4451,12 +4453,12 @@ func TestStrictGCEnforcement(t *testing.T) { // Create a protected timestamp, and make sure it's not respected since the // KVSubscriber is blocked. rec := mkRecord() - require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return ptp.Protect(ctx, txn, &rec) + require.NoError(t, insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return ptp.WithTxn(txn).Protect(ctx, &rec) })) defer func() { - require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return ptp.Release(ctx, txn, rec.ID.GetUUID()) + require.NoError(t, insqlDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return ptp.WithTxn(txn).Release(ctx, rec.ID.GetUUID()) })) }() assertScanRejected(t) diff --git a/pkg/kv/kvserver/client_spanconfigs_test.go b/pkg/kv/kvserver/client_spanconfigs_test.go index 417892d01f7a..ef14f6557d6b 100644 --- a/pkg/kv/kvserver/client_spanconfigs_test.go +++ b/pkg/kv/kvserver/client_spanconfigs_test.go @@ -21,8 +21,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/spanconfig/spanconfigstore" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -61,7 +61,7 @@ func TestSpanConfigUpdateAppliedToReplica(t *testing.T) { s, _, _ := serverutils.StartServer(t, args) defer s.Stopper().Stop(context.Background()) - _, err := s.InternalExecutor().(sqlutil.InternalExecutor).ExecEx(ctx, "inline-exec", nil, + _, err := s.InternalExecutor().(isql.Executor).ExecEx(ctx, "inline-exec", nil, sessiondata.RootUserSessionDataOverride, `SET CLUSTER SETTING spanconfig.store.enabled = true`) require.NoError(t, err) @@ -124,7 +124,7 @@ func TestFallbackSpanConfigOverride(t *testing.T) { s, _, _ := serverutils.StartServer(t, args) defer s.Stopper().Stop(context.Background()) - _, err := s.InternalExecutor().(sqlutil.InternalExecutor).ExecEx(ctx, "inline-exec", nil, + _, err := s.InternalDB().(isql.DB).Executor().ExecEx(ctx, "inline-exec", nil, sessiondata.RootUserSessionDataOverride, `SET CLUSTER SETTING spanconfig.store.enabled = true`) require.NoError(t, err) diff --git a/pkg/kv/kvserver/protectedts/BUILD.bazel b/pkg/kv/kvserver/protectedts/BUILD.bazel index d28ca25f5e44..da471647b7c7 100644 --- a/pkg/kv/kvserver/protectedts/BUILD.bazel +++ b/pkg/kv/kvserver/protectedts/BUILD.bazel @@ -12,11 +12,11 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/base", - "//pkg/kv", "//pkg/kv/kvserver/protectedts/ptpb", "//pkg/roachpb", "//pkg/settings", "//pkg/spanconfig", + "//pkg/sql/isql", "//pkg/util/hlc", "//pkg/util/metric", "//pkg/util/stop", diff --git a/pkg/kv/kvserver/protectedts/protectedts.go b/pkg/kv/kvserver/protectedts/protectedts.go index 2fedb889f32f..c27e145225a9 100644 --- a/pkg/kv/kvserver/protectedts/protectedts.go +++ b/pkg/kv/kvserver/protectedts/protectedts.go @@ -15,10 +15,10 @@ package protectedts import ( "context" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/spanconfig" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/cockroach/pkg/util/stop" @@ -37,7 +37,7 @@ var ErrExists = errors.New("protected timestamp record already exists") // Provider is the central coordinator for the protectedts subsystem. // It exists to abstract interaction with subsystem. type Provider interface { - Storage + Manager Cache Reconciler @@ -45,6 +45,10 @@ type Provider interface { Metrics() metric.Struct } +type Manager interface { + WithTxn(txn isql.Txn) Storage +} + // Storage provides clients with a mechanism to transactionally protect and // release protected timestamps for a set of spans. // @@ -67,7 +71,7 @@ type Storage interface { // // An error will be returned if the ID of the provided record already exists // so callers should be sure to generate new IDs when creating records. - Protect(context.Context, *kv.Txn, *ptpb.Record) error + Protect(context.Context, *ptpb.Record) error // GetRecord retreives the record with the specified UUID as well as the MVCC // timestamp at which it was written. If no corresponding record exists @@ -78,30 +82,30 @@ type Storage interface { // should be protected as well as the timestamp at which the Record providing // that protection is known to be alive. The ReadTimestamp of the Txn used in // this method can be used to provide such a timestamp. - GetRecord(context.Context, *kv.Txn, uuid.UUID) (*ptpb.Record, error) + GetRecord(context.Context, uuid.UUID) (*ptpb.Record, error) // MarkVerified will mark a protected timestamp as verified. // // This method is generally used by an implementation of Verifier. - MarkVerified(context.Context, *kv.Txn, uuid.UUID) error + MarkVerified(context.Context, uuid.UUID) error // Release allows spans which were previously protected to now be garbage // collected. // // If the specified UUID does not exist ErrNotFound is returned but the // passed txn remains safe for future use. - Release(context.Context, *kv.Txn, uuid.UUID) error + Release(context.Context, uuid.UUID) error // GetMetadata retrieves the metadata with the provided Txn. - GetMetadata(context.Context, *kv.Txn) (ptpb.Metadata, error) + GetMetadata(context.Context) (ptpb.Metadata, error) // GetState retrieves the entire state of protectedts.Storage with the // provided Txn. - GetState(context.Context, *kv.Txn) (ptpb.State, error) + GetState(context.Context) (ptpb.State, error) // UpdateTimestamp updates the timestamp protected by the record with the // specified UUID. - UpdateTimestamp(ctx context.Context, txn *kv.Txn, id uuid.UUID, timestamp hlc.Timestamp) error + UpdateTimestamp(ctx context.Context, id uuid.UUID, timestamp hlc.Timestamp) error } // Iterator iterates records in a cache until wantMore is false or all Records diff --git a/pkg/kv/kvserver/protectedts/ptcache/BUILD.bazel b/pkg/kv/kvserver/protectedts/ptcache/BUILD.bazel index df1b46288ab9..95854f43873d 100644 --- a/pkg/kv/kvserver/protectedts/ptcache/BUILD.bazel +++ b/pkg/kv/kvserver/protectedts/ptcache/BUILD.bazel @@ -7,11 +7,11 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptcache", visibility = ["//visibility:public"], deps = [ - "//pkg/kv", "//pkg/kv/kvserver/protectedts", "//pkg/kv/kvserver/protectedts/ptpb", "//pkg/roachpb", "//pkg/settings/cluster", + "//pkg/sql/isql", "//pkg/util/hlc", "//pkg/util/log", "//pkg/util/stop", @@ -35,6 +35,7 @@ go_test( ":ptcache", "//pkg/base", "//pkg/keys", + "//pkg/kv", "//pkg/kv/kvserver", "//pkg/kv/kvserver/protectedts", "//pkg/kv/kvserver/protectedts/ptpb", @@ -45,7 +46,7 @@ go_test( "//pkg/server", "//pkg/sql", "//pkg/sql/catalog/systemschema", - "//pkg/sql/sqlutil", + "//pkg/sql/isql", "//pkg/testutils", "//pkg/testutils/serverutils", "//pkg/testutils/testcluster", diff --git a/pkg/kv/kvserver/protectedts/ptcache/cache.go b/pkg/kv/kvserver/protectedts/ptcache/cache.go index af1e2b6375d6..230a625488a6 100644 --- a/pkg/kv/kvserver/protectedts/ptcache/cache.go +++ b/pkg/kv/kvserver/protectedts/ptcache/cache.go @@ -14,11 +14,11 @@ import ( "context" "time" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/stop" @@ -31,8 +31,8 @@ import ( // Cache implements protectedts.Cache. type Cache struct { - db *kv.DB - storage protectedts.Storage + db isql.DB + storage protectedts.Manager stopper *stop.Stopper settings *cluster.Settings sf *singleflight.Group @@ -56,8 +56,8 @@ type Cache struct { // Config configures a Cache. type Config struct { - DB *kv.DB - Storage protectedts.Storage + DB isql.DB + Storage protectedts.Manager Settings *cluster.Settings } @@ -231,23 +231,24 @@ func (c *Cache) doSingleFlightUpdate(ctx context.Context) (interface{}, error) { state ptpb.State ts hlc.Timestamp ) - err := c.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + err := c.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { // NB: because this is a read-only transaction, the commit will be a no-op; // returning nil here means the transaction will commit and will never need // to change its read timestamp. defer func() { if err == nil { - ts = txn.ReadTimestamp() + ts = txn.KV().ReadTimestamp() } }() - md, err := c.storage.GetMetadata(ctx, txn) + pts := c.storage.WithTxn(txn) + md, err := pts.GetMetadata(ctx) if err != nil { return errors.Wrap(err, "failed to fetch protectedts metadata") } if versionChanged = md.Version != prev.Version; !versionChanged { return nil } - if state, err = c.storage.GetState(ctx, txn); err != nil { + if state, err = pts.GetState(ctx); err != nil { return errors.Wrap(err, "failed to fetch protectedts state") } return nil diff --git a/pkg/kv/kvserver/protectedts/ptcache/cache_test.go b/pkg/kv/kvserver/protectedts/ptcache/cache_test.go index 6ba7c63662ad..cfe9fdb25f3f 100644 --- a/pkg/kv/kvserver/protectedts/ptcache/cache_test.go +++ b/pkg/kv/kvserver/protectedts/ptcache/cache_test.go @@ -19,6 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptcache" @@ -27,7 +28,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -40,6 +41,37 @@ import ( "github.com/stretchr/testify/require" ) +func withDatabase(ptm protectedts.Manager, idb isql.DB) *storageWithLastCommit { + var m storageWithLastCommit + m.internalDBWithLastCommit.DB = idb + m.Storage = ptstorage.WithDatabase(ptm, &m.internalDBWithLastCommit) + return &m +} + +type storageWithLastCommit struct { + internalDBWithLastCommit + protectedts.Storage +} + +type internalDBWithLastCommit struct { + isql.DB + lastCommit hlc.Timestamp +} + +func (idb *internalDBWithLastCommit) Txn( + ctx context.Context, f func(context.Context, isql.Txn) error, opts ...isql.TxnOption, +) error { + var kvTxn *kv.Txn + err := idb.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + kvTxn = txn.KV() + return f(ctx, txn) + }) + if err == nil { + idb.lastCommit = kvTxn.CommitTimestamp() + } + return err +} + // TestCacheBasic exercises the basic behavior of the Cache. func TestCacheBasic(t *testing.T) { defer leaktest.AfterTest(t)() @@ -53,20 +85,19 @@ func TestCacheBasic(t *testing.T) { }, }) defer s.Stopper().Stop(ctx) - p := ptstorage.WithDatabase( - ptstorage.New(s.ClusterSettings(), - s.InternalExecutor().(sqlutil.InternalExecutor), - &protectedts.TestingKnobs{DisableProtectedTimestampForMultiTenant: true}), - s.DB(), - ) + insqlDB := s.InternalDB().(isql.DB) + m := ptstorage.New(s.ClusterSettings(), &protectedts.TestingKnobs{ + DisableProtectedTimestampForMultiTenant: true, + }) + p := withDatabase(m, insqlDB) // Set the poll interval to be very short. protectedts.PollInterval.Override(ctx, &s.ClusterSettings().SV, 500*time.Microsecond) c := ptcache.New(ptcache.Config{ Settings: s.ClusterSettings(), - DB: s.DB(), - Storage: p, + DB: insqlDB, + Storage: m, }) require.NoError(t, c.Start(ctx, s.Stopper())) @@ -78,7 +109,7 @@ func TestCacheBasic(t *testing.T) { // Then we'll add a record and make sure it gets seen. sp := tableSpan(42) - r, createdAt := protect(t, s, p, s.Clock().Now(), sp) + r, createdAt := protect(t, p, s.Clock().Now(), sp) testutils.SucceedsSoon(t, func() error { var coveredBy []*ptpb.Record seenTS := c.Iterate(ctx, sp.Key, sp.EndKey, @@ -96,7 +127,7 @@ func TestCacheBasic(t *testing.T) { }) // Then release the record and make sure that that gets seen. - require.Nil(t, p.Release(ctx, nil /* txn */, r.ID.GetUUID())) + require.Nil(t, p.Release(ctx, r.ID.GetUUID())) testutils.SucceedsSoon(t, func() error { var coveredBy []*ptpb.Record _ = c.Iterate(ctx, sp.Key, sp.EndKey, @@ -128,21 +159,17 @@ func TestRefresh(t *testing.T) { }, }) defer s.Stopper().Stop(ctx) - p := ptstorage.WithDatabase( - ptstorage.New( - s.ClusterSettings(), - s.InternalExecutor().(sqlutil.InternalExecutor), - ptsKnobs), - s.DB(), - ) + db := s.InternalDB().(isql.DB) + m := ptstorage.New(s.ClusterSettings(), ptsKnobs) + p := withDatabase(m, db) // Set the poll interval to be very long. protectedts.PollInterval.Override(ctx, &s.ClusterSettings().SV, 500*time.Hour) c := ptcache.New(ptcache.Config{ Settings: s.ClusterSettings(), - DB: s.DB(), - Storage: p, + DB: db, + Storage: m, }) require.NoError(t, c.Start(ctx, s.Stopper())) t.Run("already up-to-date", func(t *testing.T) { @@ -157,7 +184,7 @@ func TestRefresh(t *testing.T) { st.verifyCounters(t, 1, 0) // just need to scan meta }) t.Run("needs refresh, with change", func(t *testing.T) { - _, createdAt := protect(t, s, p, s.Clock().Now(), metaTableSpan) + _, createdAt := protect(t, p, s.Clock().Now(), metaTableSpan) st.resetCounters() require.NoError(t, c.Refresh(ctx, createdAt)) st.verifyCounters(t, 2, 1) // need to scan meta and then scan everything @@ -194,7 +221,7 @@ func TestRefresh(t *testing.T) { require.Regexp(t, "boom", c.Refresh(ctx, s.Clock().Now()).Error()) }) t.Run("error propagates while fetching records", func(t *testing.T) { - protect(t, s, p, s.Clock().Now(), metaTableSpan) + protect(t, p, s.Clock().Now(), metaTableSpan) st.setFilter(func(ba *roachpb.BatchRequest) *roachpb.Error { if scanReq, ok := ba.GetArg(roachpb.Scan); ok { scan := scanReq.(*roachpb.ScanRequest) @@ -209,7 +236,7 @@ func TestRefresh(t *testing.T) { }) t.Run("Iterate does not hold mutex", func(t *testing.T) { inIterate := make(chan chan struct{}) - rec, createdAt := protect(t, s, p, s.Clock().Now(), metaTableSpan) + rec, createdAt := protect(t, p, s.Clock().Now(), metaTableSpan) require.NoError(t, c.Refresh(ctx, createdAt)) go c.Iterate(ctx, keys.MinKey, keys.MaxKey, func(r *ptpb.Record) (wantMore bool) { if r.ID.GetUUID() != rec.ID.GetUUID() { @@ -227,7 +254,7 @@ func TestRefresh(t *testing.T) { // operation, amd then refresh after it. This will demonstrate that the // iteration call does not block concurrent refreshes. ch := <-inIterate - require.NoError(t, p.Release(ctx, nil /* txn */, rec.ID.GetUUID())) + require.NoError(t, p.Release(ctx, rec.ID.GetUUID())) require.NoError(t, c.Refresh(ctx, s.Clock().Now())) // Signal the Iterate loop to exit and wait for it to close the channel. close(ch) @@ -247,12 +274,13 @@ func TestStart(t *testing.T) { }, }, }) - p := s.ExecutorConfig().(sql.ExecutorConfig).ProtectedTimestampProvider + execCfg := s.ExecutorConfig().(sql.ExecutorConfig) + p := execCfg.ProtectedTimestampProvider // Set the poll interval to be very long. protectedts.PollInterval.Override(ctx, &s.ClusterSettings().SV, 500*time.Hour) c := ptcache.New(ptcache.Config{ Settings: s.ClusterSettings(), - DB: s.DB(), + DB: execCfg.InternalDB, Storage: p, }) return s, c @@ -278,19 +306,17 @@ func TestQueryRecord(t *testing.T) { ctx := context.Background() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) - p := ptstorage.WithDatabase( - ptstorage.New( - s.ClusterSettings(), - s.InternalExecutor().(sqlutil.InternalExecutor), - &protectedts.TestingKnobs{DisableProtectedTimestampForMultiTenant: true}), - s.DB(), - ) + db := s.InternalDB().(isql.DB) + storage := ptstorage.New(s.ClusterSettings(), &protectedts.TestingKnobs{ + DisableProtectedTimestampForMultiTenant: true, + }) + p := withDatabase(storage, db) // Set the poll interval to be very long. protectedts.PollInterval.Override(ctx, &s.ClusterSettings().SV, 500*time.Hour) c := ptcache.New(ptcache.Config{ Settings: s.ClusterSettings(), - DB: s.DB(), - Storage: p, + DB: db, + Storage: storage, }) require.NoError(t, c.Start(ctx, s.Stopper())) @@ -298,8 +324,8 @@ func TestQueryRecord(t *testing.T) { waitForAsOfAfter(t, c, hlc.Timestamp{}) // Create two records. sp42 := tableSpan(42) - r1, createdAt1 := protect(t, s, p, s.Clock().Now(), sp42) - r2, createdAt2 := protect(t, s, p, s.Clock().Now(), sp42) + r1, createdAt1 := protect(t, p, s.Clock().Now(), sp42) + r2, createdAt2 := protect(t, p, s.Clock().Now(), sp42) // Ensure they both don't exist and that the read timestamps precede the // create timestamps. exists1, asOf := c.QueryRecord(ctx, r1.ID.GetUUID()) @@ -317,8 +343,8 @@ func TestQueryRecord(t *testing.T) { require.True(t, exists2) require.True(t, !asOf.Less(createdAt2)) // Release 2 and then create 3. - require.NoError(t, p.Release(ctx, nil /* txn */, r2.ID.GetUUID())) - r3, createdAt3 := protect(t, s, p, s.Clock().Now(), sp42) + require.NoError(t, p.Release(ctx, r2.ID.GetUUID())) + r3, createdAt3 := protect(t, p, s.Clock().Now(), sp42) exists2, asOf = c.QueryRecord(ctx, r2.ID.GetUUID()) require.True(t, exists2) require.True(t, asOf.Less(createdAt3)) @@ -339,30 +365,29 @@ func TestIterate(t *testing.T) { ctx := context.Background() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) - p := ptstorage.WithDatabase( - ptstorage.New(s.ClusterSettings(), - s.InternalExecutor().(sqlutil.InternalExecutor), - &protectedts.TestingKnobs{DisableProtectedTimestampForMultiTenant: true}), - s.DB(), - ) + db := s.InternalDB().(isql.DB) + m := ptstorage.New(s.ClusterSettings(), &protectedts.TestingKnobs{ + DisableProtectedTimestampForMultiTenant: true, + }) + p := withDatabase(m, db) // Set the poll interval to be very long. protectedts.PollInterval.Override(ctx, &s.ClusterSettings().SV, 500*time.Hour) c := ptcache.New(ptcache.Config{ Settings: s.ClusterSettings(), - DB: s.DB(), - Storage: p, + DB: db, + Storage: m, }) require.NoError(t, c.Start(ctx, s.Stopper())) sp42 := tableSpan(42) sp43 := tableSpan(43) sp44 := tableSpan(44) - r1, _ := protect(t, s, p, s.Clock().Now(), sp42) - r2, _ := protect(t, s, p, s.Clock().Now(), sp43) - r3, _ := protect(t, s, p, s.Clock().Now(), sp44) - r4, _ := protect(t, s, p, s.Clock().Now(), sp42, sp43) + r1, _ := protect(t, p, s.Clock().Now(), sp42) + r2, _ := protect(t, p, s.Clock().Now(), sp43) + r3, _ := protect(t, p, s.Clock().Now(), sp44) + r4, _ := protect(t, p, s.Clock().Now(), sp42, sp43) require.NoError(t, c.Refresh(ctx, s.Clock().Now())) t.Run("all", func(t *testing.T) { var recs records @@ -429,14 +454,14 @@ func TestGetProtectionTimestamps(t *testing.T) { for _, testCase := range []struct { name string - test func(t *testing.T, p protectedts.Storage, c *ptcache.Cache, cleanup func(...*ptpb.Record)) + test func(t *testing.T, p *storageWithLastCommit, c *ptcache.Cache, cleanup func(...*ptpb.Record)) }{ { name: "multiple records apply to a single span", - test: func(t *testing.T, p protectedts.Storage, c *ptcache.Cache, cleanup func(...*ptpb.Record)) { - r1, _ := protect(t, s, p, ts(10), sp42) - r2, _ := protect(t, s, p, ts(11), sp42) - r3, _ := protect(t, s, p, ts(6), sp42) + test: func(t *testing.T, p *storageWithLastCommit, c *ptcache.Cache, cleanup func(...*ptpb.Record)) { + r1, _ := protect(t, p, ts(10), sp42) + r2, _ := protect(t, p, ts(11), sp42) + r3, _ := protect(t, p, ts(6), sp42) require.NoError(t, c.Refresh(ctx, s.Clock().Now())) protectionTimestamps, _, err := c.GetProtectionTimestamps(ctx, sp42) @@ -450,9 +475,9 @@ func TestGetProtectionTimestamps(t *testing.T) { }, { name: "no records apply", - test: func(t *testing.T, p protectedts.Storage, c *ptcache.Cache, cleanup func(...*ptpb.Record)) { - r1, _ := protect(t, s, p, ts(5), sp43) - r2, _ := protect(t, s, p, ts(10), sp44) + test: func(t *testing.T, p *storageWithLastCommit, c *ptcache.Cache, cleanup func(...*ptpb.Record)) { + r1, _ := protect(t, p, ts(5), sp43) + r2, _ := protect(t, p, ts(10), sp44) require.NoError(t, c.Refresh(ctx, s.Clock().Now())) protectionTimestamps, _, err := c.GetProtectionTimestamps(ctx, sp42) require.NoError(t, err) @@ -462,15 +487,15 @@ func TestGetProtectionTimestamps(t *testing.T) { }, { name: "multiple overlapping spans multiple records", - test: func(t *testing.T, p protectedts.Storage, c *ptcache.Cache, cleanup func(...*ptpb.Record)) { - r1, _ := protect(t, s, p, ts(10), sp42) - r2, _ := protect(t, s, p, ts(15), sp42) - r3, _ := protect(t, s, p, ts(5), sp43) - r4, _ := protect(t, s, p, ts(6), sp43) - r5, _ := protect(t, s, p, ts(25), keys.EverythingSpan) + test: func(t *testing.T, p *storageWithLastCommit, c *ptcache.Cache, cleanup func(...*ptpb.Record)) { + r1, _ := protect(t, p, ts(10), sp42) + r2, _ := protect(t, p, ts(15), sp42) + r3, _ := protect(t, p, ts(5), sp43) + r4, _ := protect(t, p, ts(6), sp43) + r5, _ := protect(t, p, ts(25), keys.EverythingSpan) // Also add a record that doesn't overlap with the requested span and // ensure it isn't retrieved below. - r6, _ := protect(t, s, p, ts(20), sp44) + r6, _ := protect(t, p, ts(20), sp44) require.NoError(t, c.Refresh(ctx, s.Clock().Now())) protectionTimestamps, _, err := c.GetProtectionTimestamps(ctx, sp4243) @@ -486,23 +511,20 @@ func TestGetProtectionTimestamps(t *testing.T) { }, } { t.Run(testCase.name, func(t *testing.T) { - p := ptstorage.WithDatabase( - ptstorage.New(s.ClusterSettings(), - s.InternalExecutor().(sqlutil.InternalExecutor), - &protectedts.TestingKnobs{DisableProtectedTimestampForMultiTenant: true}), - s.DB(), - ) - + storage := ptstorage.New(s.ClusterSettings(), &protectedts.TestingKnobs{ + DisableProtectedTimestampForMultiTenant: true, + }) + p := withDatabase(storage, s.InternalDB().(isql.DB)) c := ptcache.New(ptcache.Config{ Settings: s.ClusterSettings(), - DB: s.DB(), - Storage: p, + DB: s.InternalDB().(isql.DB), + Storage: storage, }) require.NoError(t, c.Start(ctx, s.Stopper())) testCase.test(t, p, c, func(records ...*ptpb.Record) { for _, r := range records { - require.NoError(t, p.Release(ctx, nil, r.ID.GetUUID())) + require.NoError(t, p.Release(ctx, r.ID.GetUUID())) } }) }) @@ -513,20 +535,18 @@ func TestSettingChangedLeadsToFetch(t *testing.T) { ctx := context.Background() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) - p := ptstorage.WithDatabase( - ptstorage.New(s.ClusterSettings(), - s.InternalExecutor().(sqlutil.InternalExecutor), - &protectedts.TestingKnobs{DisableProtectedTimestampForMultiTenant: true}), - s.DB(), - ) + db := s.InternalDB().(isql.DB) + m := ptstorage.New(s.ClusterSettings(), &protectedts.TestingKnobs{ + DisableProtectedTimestampForMultiTenant: true, + }) // Set the poll interval to be very long. protectedts.PollInterval.Override(ctx, &s.ClusterSettings().SV, 500*time.Hour) c := ptcache.New(ptcache.Config{ Settings: s.ClusterSettings(), - DB: s.DB(), - Storage: p, + DB: db, + Storage: m, }) require.NoError(t, c.Start(ctx, s.Stopper())) @@ -564,11 +584,7 @@ func tableSpan(tableID uint32) roachpb.Span { } func protect( - t *testing.T, - s serverutils.TestServerInterface, - p protectedts.Storage, - protectTS hlc.Timestamp, - spans ...roachpb.Span, + t *testing.T, p *storageWithLastCommit, protectTS hlc.Timestamp, spans ...roachpb.Span, ) (r *ptpb.Record, createdAt hlc.Timestamp) { r = &ptpb.Record{ ID: uuid.MakeV4().GetBytes(), @@ -577,12 +593,10 @@ func protect( DeprecatedSpans: spans, } ctx := context.Background() - txn := s.DB().NewTxn(ctx, "test") - require.NoError(t, p.Protect(ctx, txn, r)) - require.NoError(t, txn.Commit(ctx)) - _, err := p.GetRecord(ctx, nil, r.ID.GetUUID()) + require.NoError(t, p.Protect(ctx, r)) + createdAt = p.lastCommit + _, err := p.GetRecord(ctx, r.ID.GetUUID()) require.NoError(t, err) - createdAt = txn.CommitTimestamp() return r, createdAt } diff --git a/pkg/kv/kvserver/protectedts/ptprovider/BUILD.bazel b/pkg/kv/kvserver/protectedts/ptprovider/BUILD.bazel index 622639453739..08917eba79c2 100644 --- a/pkg/kv/kvserver/protectedts/ptprovider/BUILD.bazel +++ b/pkg/kv/kvserver/protectedts/ptprovider/BUILD.bazel @@ -7,14 +7,13 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptprovider", visibility = ["//visibility:public"], deps = [ - "//pkg/kv", "//pkg/kv/kvserver", "//pkg/kv/kvserver/protectedts", "//pkg/kv/kvserver/protectedts/ptcache", "//pkg/kv/kvserver/protectedts/ptreconcile", "//pkg/kv/kvserver/protectedts/ptstorage", "//pkg/settings/cluster", - "//pkg/sql/sqlutil", + "//pkg/sql/isql", "//pkg/util/metric", "//pkg/util/stop", "@com_github_cockroachdb_errors//:errors", diff --git a/pkg/kv/kvserver/protectedts/ptprovider/provider.go b/pkg/kv/kvserver/protectedts/ptprovider/provider.go index 377039e80c53..d0ab299afc23 100644 --- a/pkg/kv/kvserver/protectedts/ptprovider/provider.go +++ b/pkg/kv/kvserver/protectedts/ptprovider/provider.go @@ -15,14 +15,13 @@ package ptprovider import ( "context" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptcache" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptreconcile" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptstorage" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/errors" @@ -31,16 +30,15 @@ import ( // Config configures the Provider. type Config struct { Settings *cluster.Settings - DB *kv.DB + DB isql.DB Stores *kvserver.Stores ReconcileStatusFuncs ptreconcile.StatusFuncs - InternalExecutor sqlutil.InternalExecutor Knobs *protectedts.TestingKnobs } // Provider is the concrete implementation of protectedts.Provider interface. type Provider struct { - protectedts.Storage + protectedts.Manager protectedts.Cache protectedts.Reconciler metric.Struct @@ -51,7 +49,7 @@ func New(cfg Config) (protectedts.Provider, error) { if err := validateConfig(cfg); err != nil { return nil, err } - storage := ptstorage.New(cfg.Settings, cfg.InternalExecutor, cfg.Knobs) + storage := ptstorage.New(cfg.Settings, cfg.Knobs) reconciler := ptreconcile.New(cfg.Settings, cfg.DB, storage, cfg.ReconcileStatusFuncs) cache := ptcache.New(ptcache.Config{ DB: cfg.DB, @@ -60,7 +58,7 @@ func New(cfg Config) (protectedts.Provider, error) { }) return &Provider{ - Storage: storage, + Manager: storage, Cache: cache, Reconciler: reconciler, Struct: reconciler.Metrics(), @@ -73,8 +71,6 @@ func validateConfig(cfg Config) error { return errors.Errorf("invalid nil Settings") case cfg.DB == nil: return errors.Errorf("invalid nil DB") - case cfg.InternalExecutor == nil: - return errors.Errorf("invalid nil InternalExecutor") default: return nil } diff --git a/pkg/kv/kvserver/protectedts/ptreconcile/BUILD.bazel b/pkg/kv/kvserver/protectedts/ptreconcile/BUILD.bazel index f990a6b58c67..c224e62b5ebe 100644 --- a/pkg/kv/kvserver/protectedts/ptreconcile/BUILD.bazel +++ b/pkg/kv/kvserver/protectedts/ptreconcile/BUILD.bazel @@ -10,11 +10,11 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptreconcile", visibility = ["//visibility:public"], deps = [ - "//pkg/kv", "//pkg/kv/kvserver/protectedts", "//pkg/kv/kvserver/protectedts/ptpb", "//pkg/settings", "//pkg/settings/cluster", + "//pkg/sql/isql", "//pkg/util/log", "//pkg/util/metric", "//pkg/util/stop", @@ -36,15 +36,16 @@ go_test( ":ptreconcile", "//pkg/base", "//pkg/keys", - "//pkg/kv", "//pkg/kv/kvserver/protectedts", "//pkg/kv/kvserver/protectedts/ptpb", + "//pkg/kv/kvserver/protectedts/ptstorage", "//pkg/roachpb", "//pkg/security/securityassets", "//pkg/security/securitytest", "//pkg/server", "//pkg/settings/cluster", "//pkg/sql", + "//pkg/sql/isql", "//pkg/testutils", "//pkg/testutils/serverutils", "//pkg/testutils/testcluster", diff --git a/pkg/kv/kvserver/protectedts/ptreconcile/reconciler.go b/pkg/kv/kvserver/protectedts/ptreconcile/reconciler.go index 14a522a66c15..fbbe6a9ca680 100644 --- a/pkg/kv/kvserver/protectedts/ptreconcile/reconciler.go +++ b/pkg/kv/kvserver/protectedts/ptreconcile/reconciler.go @@ -17,11 +17,11 @@ import ( "math/rand" "time" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/timeutil" @@ -41,7 +41,7 @@ var ReconcileInterval = settings.RegisterDurationSetting( // StatusFunc is used to check on the status of a Record based on its Meta // field. type StatusFunc func( - ctx context.Context, txn *kv.Txn, meta []byte, + ctx context.Context, txn isql.Txn, meta []byte, ) (shouldRemove bool, _ error) // StatusFuncs maps from MetaType to a StatusFunc. @@ -52,15 +52,15 @@ type StatusFuncs map[string]StatusFunc // meta in conjunction with the configured StatusFunc. type Reconciler struct { settings *cluster.Settings - db *kv.DB - pts protectedts.Storage + db isql.DB + pts protectedts.Manager metrics Metrics statusFuncs StatusFuncs } // New constructs a Reconciler. func New( - st *cluster.Settings, db *kv.DB, storage protectedts.Storage, statusFuncs StatusFuncs, + st *cluster.Settings, db isql.DB, storage protectedts.Manager, statusFuncs StatusFuncs, ) *Reconciler { return &Reconciler{ settings: st, @@ -119,9 +119,9 @@ func (r *Reconciler) run(ctx context.Context, stopper *stop.Stopper) { func (r *Reconciler) reconcile(ctx context.Context) { // Load protected timestamp records. var state ptpb.State - if err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := r.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { var err error - state, err = r.pts.GetState(ctx, txn) + state, err = r.pts.WithTxn(txn).GetState(ctx) return err }); err != nil { r.metrics.ReconciliationErrors.Inc(1) @@ -135,7 +135,7 @@ func (r *Reconciler) reconcile(ctx context.Context) { continue } var didRemove bool - if err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + if err := r.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { didRemove = false // reset for retries shouldRemove, err := task(ctx, txn, rec.Meta) if err != nil { @@ -144,7 +144,7 @@ func (r *Reconciler) reconcile(ctx context.Context) { if !shouldRemove { return nil } - err = r.pts.Release(ctx, txn, rec.ID.GetUUID()) + err = r.pts.WithTxn(txn).Release(ctx, rec.ID.GetUUID()) if err != nil && !errors.Is(err, protectedts.ErrNotExists) { return err } diff --git a/pkg/kv/kvserver/protectedts/ptreconcile/reconciler_test.go b/pkg/kv/kvserver/protectedts/ptreconcile/reconciler_test.go index ca24342b7a52..da2c8675c77d 100644 --- a/pkg/kv/kvserver/protectedts/ptreconcile/reconciler_test.go +++ b/pkg/kv/kvserver/protectedts/ptreconcile/reconciler_test.go @@ -17,13 +17,14 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptreconcile" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptstorage" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -50,7 +51,9 @@ func TestReconciler(t *testing.T) { // Now I want to create some artifacts that should get reconciled away and // then make sure that they do and others which should not do not. s0 := tc.Server(0) + insqlDB := s0.InternalDB().(isql.DB) ptp := s0.ExecutorConfig().(sql.ExecutorConfig).ProtectedTimestampProvider + pts := ptstorage.WithDatabase(ptp, insqlDB) settings := cluster.MakeTestingClusterSettings() const testTaskType = "foo" @@ -59,10 +62,11 @@ func TestReconciler(t *testing.T) { toRemove map[string]struct{} }{} state.toRemove = map[string]struct{}{} - r := ptreconcile.New(settings, s0.DB(), ptp, + + r := ptreconcile.New(settings, insqlDB, ptp, ptreconcile.StatusFuncs{ testTaskType: func( - ctx context.Context, txn *kv.Txn, meta []byte, + ctx context.Context, txn isql.Txn, meta []byte, ) (shouldRemove bool, err error) { state.mu.Lock() defer state.mu.Unlock() @@ -84,9 +88,7 @@ func TestReconciler(t *testing.T) { } else { rec1.Target = ptpb.MakeClusterTarget() } - require.NoError(t, s0.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return ptp.Protect(ctx, txn, &rec1) - })) + require.NoError(t, pts.Protect(ctx, &rec1)) t.Run("update settings", func(t *testing.T) { ptreconcile.ReconcileInterval.Override(ctx, &settings.SV, time.Millisecond) @@ -112,10 +114,8 @@ func TestReconciler(t *testing.T) { } return nil }) - require.Regexp(t, protectedts.ErrNotExists, s0.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - _, err := ptp.GetRecord(ctx, txn, rec1.ID.GetUUID()) - return err - })) + _, err := pts.GetRecord(ctx, rec1.ID.GetUUID()) + require.Regexp(t, protectedts.ErrNotExists, err) }) }) } diff --git a/pkg/kv/kvserver/protectedts/ptstorage/BUILD.bazel b/pkg/kv/kvserver/protectedts/ptstorage/BUILD.bazel index d5004f08f20b..257901192534 100644 --- a/pkg/kv/kvserver/protectedts/ptstorage/BUILD.bazel +++ b/pkg/kv/kvserver/protectedts/ptstorage/BUILD.bazel @@ -14,13 +14,12 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptstorage", visibility = ["//visibility:public"], deps = [ - "//pkg/kv", "//pkg/kv/kvserver/protectedts", "//pkg/kv/kvserver/protectedts/ptpb", "//pkg/settings/cluster", + "//pkg/sql/isql", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/util/hlc", "//pkg/util/log", "//pkg/util/protoutil", @@ -56,9 +55,9 @@ go_test( "//pkg/sql/catalog", "//pkg/sql/catalog/colinfo", "//pkg/sql/catalog/descpb", + "//pkg/sql/isql", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/sql/tests", "//pkg/testutils", "//pkg/testutils/serverutils", @@ -68,7 +67,6 @@ go_test( "//pkg/util/log/severity", "//pkg/util/protoutil", "//pkg/util/randutil", - "//pkg/util/syncutil", "//pkg/util/uuid", "@com_github_cockroachdb_errors//:errors", "@com_github_stretchr_testify//require", diff --git a/pkg/kv/kvserver/protectedts/ptstorage/storage.go b/pkg/kv/kvserver/protectedts/ptstorage/storage.go index 9e1bc0f38c99..0cc0155d8f94 100644 --- a/pkg/kv/kvserver/protectedts/ptstorage/storage.go +++ b/pkg/kv/kvserver/protectedts/ptstorage/storage.go @@ -14,13 +14,12 @@ package ptstorage import ( "context" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" @@ -40,111 +39,27 @@ import ( // TODO(ajwerner): Hook into the alerts infrastructure and metrics to provide // visibility into corruption when it is detected. -// storage interacts with the durable state of the protectedts subsystem. -type storage struct { +// Manager interacts with the durable state of the protectedts subsystem. +type Manager struct { settings *cluster.Settings - ex sqlutil.InternalExecutor - - knobs *protectedts.TestingKnobs -} - -var _ protectedts.Storage = (*storage)(nil) - -// TODO(adityamaru): Delete in 22.2. -func useDeprecatedProtectedTSStorage( - ctx context.Context, st *cluster.Settings, knobs *protectedts.TestingKnobs, -) bool { - return knobs.DisableProtectedTimestampForMultiTenant -} - -// New creates a new Storage. -func New( - settings *cluster.Settings, ex sqlutil.InternalExecutor, knobs *protectedts.TestingKnobs, -) protectedts.Storage { - if knobs == nil { - knobs = &protectedts.TestingKnobs{} - } - return &storage{settings: settings, ex: ex, knobs: knobs} -} - -var errNoTxn = errors.New("must provide a non-nil transaction") - -func (p *storage) UpdateTimestamp( - ctx context.Context, txn *kv.Txn, id uuid.UUID, timestamp hlc.Timestamp, -) error { - row, err := p.ex.QueryRowEx(ctx, "protectedts-update", txn, - sessiondata.NodeUserSessionDataOverride, - updateTimestampQuery, id.GetBytesMut(), timestamp.WithSynthetic(false).AsOfSystemTime()) - if err != nil { - return errors.Wrapf(err, "failed to update record %v", id) - } - if len(row) == 0 { - return protectedts.ErrNotExists - } - return nil + knobs *protectedts.TestingKnobs } -func (p *storage) deprecatedProtect( - ctx context.Context, txn *kv.Txn, r *ptpb.Record, meta []byte, -) error { - s := makeSettings(p.settings) - encodedSpans, err := protoutil.Marshal(&Spans{Spans: r.DeprecatedSpans}) - if err != nil { // how can this possibly fail? - return errors.Wrap(err, "failed to marshal spans") - } - it, err := p.ex.QueryIteratorEx(ctx, "protectedts-deprecated-protect", txn, - sessiondata.NodeUserSessionDataOverride, - protectQueryWithoutTarget, - s.maxSpans, s.maxBytes, len(r.DeprecatedSpans), - r.ID, r.Timestamp.WithSynthetic(false).AsOfSystemTime(), - r.MetaType, meta, - len(r.DeprecatedSpans), encodedSpans) - if err != nil { - return errors.Wrapf(err, "failed to write record %v", r.ID) - } - ok, err := it.Next(ctx) - if err != nil { - return errors.Wrapf(err, "failed to write record %v", r.ID) - } - if !ok { - return errors.Newf("failed to write record %v", r.ID) - } - row := it.Cur() - if err := it.Close(); err != nil { - log.Infof(ctx, "encountered %v when writing record %v", err, r.ID) - } - if failed := *row[0].(*tree.DBool); failed { - curNumSpans := int64(*row[1].(*tree.DInt)) - if s.maxSpans > 0 && curNumSpans+int64(len(r.DeprecatedSpans)) > s.maxSpans { - return errors.WithHint( - errors.Errorf("protectedts: limit exceeded: %d+%d > %d spans", curNumSpans, - len(r.DeprecatedSpans), s.maxSpans), - "SET CLUSTER SETTING kv.protectedts.max_spans to a higher value") - } - curBytes := int64(*row[2].(*tree.DInt)) - recordBytes := int64(len(encodedSpans) + len(r.Meta) + len(r.MetaType)) - if s.maxBytes > 0 && curBytes+recordBytes > s.maxBytes { - return errors.WithHint( - errors.Errorf("protectedts: limit exceeded: %d+%d > %d bytes", curBytes, recordBytes, - s.maxBytes), - "SET CLUSTER SETTING kv.protectedts.max_bytes to a higher value") - } - return protectedts.ErrExists - } - return nil +// storage implements protectedts.Storage with a transaction. +type storage struct { + txn isql.Txn + settings *cluster.Settings + knobs *protectedts.TestingKnobs } -func (p *storage) Protect(ctx context.Context, txn *kv.Txn, r *ptpb.Record) error { +func (p *storage) Protect(ctx context.Context, r *ptpb.Record) error { if err := validateRecordForProtect(ctx, r, p.settings, p.knobs); err != nil { return err } - if txn == nil { - return errNoTxn - } meta := r.Meta if meta == nil { - // v20.1 crashes in rowToRecord and storage.Release if it finds a NULL + // v20.1 crashes in rowToRecord and Manager.Release if it finds a NULL // value in system.protected_ts_records.meta. v20.2 and above handle // this correctly, but we need to maintain mixed version compatibility // for at least one release. @@ -158,7 +73,7 @@ func (p *storage) Protect(ctx context.Context, txn *kv.Txn, r *ptpb.Record) erro // // TODO(adityamaru): Delete in 22.2 once we exclusively protect `target`s. if useDeprecatedProtectedTSStorage(ctx, p.settings, p.knobs) { - return p.deprecatedProtect(ctx, txn, r, meta) + return p.deprecatedProtect(ctx, r, meta) } // Clear the `DeprecatedSpans` field even if it has been set by the caller. @@ -172,7 +87,7 @@ func (p *storage) Protect(ctx context.Context, txn *kv.Txn, r *ptpb.Record) erro if err != nil { // how can this possibly fail? return errors.Wrap(err, "failed to marshal spans") } - it, err := p.ex.QueryIteratorEx(ctx, "protectedts-protect", txn, + it, err := p.txn.QueryIteratorEx(ctx, "protectedts-protect", p.txn.KV(), sessiondata.NodeUserSessionDataOverride, protectQuery, s.maxSpans, s.maxBytes, len(r.DeprecatedSpans), @@ -208,40 +123,11 @@ func (p *storage) Protect(ctx context.Context, txn *kv.Txn, r *ptpb.Record) erro return nil } -func (p *storage) deprecatedGetRecord( - ctx context.Context, txn *kv.Txn, id uuid.UUID, -) (*ptpb.Record, error) { - row, err := p.ex.QueryRowEx(ctx, "protectedts-deprecated-GetRecord", txn, - sessiondata.NodeUserSessionDataOverride, - getRecordWithoutTargetQuery, id.GetBytesMut()) - if err != nil { - return nil, errors.Wrapf(err, "failed to read record %v", id) - } - if len(row) == 0 { - return nil, protectedts.ErrNotExists - } - var r ptpb.Record - if err := rowToRecord(row, &r, true /* isDeprecatedRow */); err != nil { - return nil, err - } - return &r, nil -} - -func (p *storage) GetRecord(ctx context.Context, txn *kv.Txn, id uuid.UUID) (*ptpb.Record, error) { - if txn == nil { - return nil, errNoTxn - } - - // The `target` column was added to `system.protected_ts_records` as part of - // the tenant migration `AlterSystemProtectedTimestampAddColumn`. Prior to the - // migration we should continue return records that protect `spans`. - // - // TODO(adityamaru): Delete in 22.2 once we exclusively protect `target`s. +func (p *storage) GetRecord(ctx context.Context, id uuid.UUID) (*ptpb.Record, error) { if useDeprecatedProtectedTSStorage(ctx, p.settings, p.knobs) { - return p.deprecatedGetRecord(ctx, txn, id) + return p.deprecatedGetRecord(ctx, id) } - - row, err := p.ex.QueryRowEx(ctx, "protectedts-GetRecord", txn, + row, err := p.txn.QueryRowEx(ctx, "protectedts-GetRecord", p.txn.KV(), sessiondata.NodeUserSessionDataOverride, getRecordQuery, id.GetBytesMut()) if err != nil { @@ -257,11 +143,8 @@ func (p *storage) GetRecord(ctx context.Context, txn *kv.Txn, id uuid.UUID) (*pt return &r, nil } -func (p *storage) MarkVerified(ctx context.Context, txn *kv.Txn, id uuid.UUID) error { - if txn == nil { - return errNoTxn - } - numRows, err := p.ex.ExecEx(ctx, "protectedts-MarkVerified", txn, +func (p storage) MarkVerified(ctx context.Context, id uuid.UUID) error { + numRows, err := p.txn.ExecEx(ctx, "protectedts-MarkVerified", p.txn.KV(), sessiondata.NodeUserSessionDataOverride, markVerifiedQuery, id.GetBytesMut()) if err != nil { @@ -273,11 +156,8 @@ func (p *storage) MarkVerified(ctx context.Context, txn *kv.Txn, id uuid.UUID) e return nil } -func (p *storage) Release(ctx context.Context, txn *kv.Txn, id uuid.UUID) error { - if txn == nil { - return errNoTxn - } - numRows, err := p.ex.ExecEx(ctx, "protectedts-Release", txn, +func (p storage) Release(ctx context.Context, id uuid.UUID) error { + numRows, err := p.txn.ExecEx(ctx, "protectedts-Release", p.txn.KV(), sessiondata.NodeUserSessionDataOverride, releaseQuery, id.GetBytesMut()) if err != nil { @@ -289,11 +169,8 @@ func (p *storage) Release(ctx context.Context, txn *kv.Txn, id uuid.UUID) error return nil } -func (p *storage) GetMetadata(ctx context.Context, txn *kv.Txn) (ptpb.Metadata, error) { - if txn == nil { - return ptpb.Metadata{}, errNoTxn - } - row, err := p.ex.QueryRowEx(ctx, "protectedts-GetMetadata", txn, +func (p storage) GetMetadata(ctx context.Context) (ptpb.Metadata, error) { + row, err := p.txn.QueryRowEx(ctx, "protectedts-GetMetadata", p.txn.KV(), sessiondata.NodeUserSessionDataOverride, getMetadataQuery) if err != nil { @@ -310,15 +187,12 @@ func (p *storage) GetMetadata(ctx context.Context, txn *kv.Txn) (ptpb.Metadata, }, nil } -func (p *storage) GetState(ctx context.Context, txn *kv.Txn) (ptpb.State, error) { - if txn == nil { - return ptpb.State{}, errNoTxn - } - md, err := p.GetMetadata(ctx, txn) +func (p storage) GetState(ctx context.Context) (ptpb.State, error) { + md, err := p.GetMetadata(ctx) if err != nil { return ptpb.State{}, err } - records, err := p.getRecords(ctx, txn) + records, err := p.getRecords(ctx) if err != nil { return ptpb.State{}, err } @@ -328,10 +202,12 @@ func (p *storage) GetState(ctx context.Context, txn *kv.Txn) (ptpb.State, error) }, nil } -func (p *storage) deprecatedGetRecords(ctx context.Context, txn *kv.Txn) ([]ptpb.Record, error) { - it, err := p.ex.QueryIteratorEx(ctx, "protectedts-deprecated-GetRecords", txn, - sessiondata.NodeUserSessionDataOverride, - getRecordsWithoutTargetQuery) +func (p *storage) getRecords(ctx context.Context) ([]ptpb.Record, error) { + if useDeprecatedProtectedTSStorage(ctx, p.settings, p.knobs) { + return p.deprecatedGetRecords(ctx) + } + it, err := p.txn.QueryIteratorEx(ctx, "protectedts-GetRecords", p.txn.KV(), + sessiondata.NodeUserSessionDataOverride, getRecordsQuery) if err != nil { return nil, errors.Wrap(err, "failed to read records") } @@ -340,7 +216,7 @@ func (p *storage) deprecatedGetRecords(ctx context.Context, txn *kv.Txn) ([]ptpb var records []ptpb.Record for ok, err = it.Next(ctx); ok; ok, err = it.Next(ctx) { var record ptpb.Record - if err := rowToRecord(it.Cur(), &record, true /* isDeprecatedRow */); err != nil { + if err := rowToRecord(it.Cur(), &record, false /* isDeprecatedRow */); err != nil { log.Errorf(ctx, "failed to parse row as record: %v", err) } records = append(records, record) @@ -351,30 +227,42 @@ func (p *storage) deprecatedGetRecords(ctx context.Context, txn *kv.Txn) ([]ptpb return records, nil } -func (p *storage) getRecords(ctx context.Context, txn *kv.Txn) ([]ptpb.Record, error) { - if useDeprecatedProtectedTSStorage(ctx, p.settings, p.knobs) { - return p.deprecatedGetRecords(ctx, txn) - } - - it, err := p.ex.QueryIteratorEx(ctx, "protectedts-GetRecords", txn, - sessiondata.NodeUserSessionDataOverride, getRecordsQuery) +func (p storage) UpdateTimestamp(ctx context.Context, id uuid.UUID, timestamp hlc.Timestamp) error { + row, err := p.txn.QueryRowEx(ctx, "protectedts-update", p.txn.KV(), + sessiondata.NodeUserSessionDataOverride, + updateTimestampQuery, id.GetBytesMut(), timestamp.WithSynthetic(false).AsOfSystemTime()) if err != nil { - return nil, errors.Wrap(err, "failed to read records") + return errors.Wrapf(err, "failed to update record %v", id) + } + if len(row) == 0 { + return protectedts.ErrNotExists } + return nil +} - var ok bool - var records []ptpb.Record - for ok, err = it.Next(ctx); ok; ok, err = it.Next(ctx) { - var record ptpb.Record - if err := rowToRecord(it.Cur(), &record, false /* isDeprecatedRow */); err != nil { - log.Errorf(ctx, "failed to parse row as record: %v", err) - } - records = append(records, record) +func (p *Manager) WithTxn(txn isql.Txn) protectedts.Storage { + return &storage{ + txn: txn, + settings: p.settings, + knobs: p.knobs, } - if err != nil { - return nil, errors.Wrap(err, "failed to read records") +} + +var _ protectedts.Manager = (*Manager)(nil) + +// TODO(adityamaru): Delete in 22.2. +func useDeprecatedProtectedTSStorage( + ctx context.Context, st *cluster.Settings, knobs *protectedts.TestingKnobs, +) bool { + return knobs.DisableProtectedTimestampForMultiTenant +} + +// New creates a new Storage. +func New(settings *cluster.Settings, knobs *protectedts.TestingKnobs) *Manager { + if knobs == nil { + knobs = &protectedts.TestingKnobs{} } - return records, nil + return &Manager{settings: settings, knobs: knobs} } // rowToRecord parses a row as returned from the variants of getRecords and @@ -384,7 +272,7 @@ func (p *storage) getRecords(ctx context.Context, txn *kv.Txn) ([]ptpb.Record, e // solve. Malformed records can still be removed (and hopefully will be). // // isDeprecatedRow indicates if the supplied row was generated by one of the -// deprecated PTS storage methods, and as such, does not include the target +// deprecated PTS Manager methods, and as such, does not include the target // column. func rowToRecord(row tree.Datums, r *ptpb.Record, isDeprecatedRow bool) error { r.ID = row[0].(*tree.DUuid).UUID.GetBytes() @@ -424,6 +312,94 @@ func rowToRecord(row tree.Datums, r *ptpb.Record, isDeprecatedRow bool) error { return nil } +func (p *storage) deprecatedProtect(ctx context.Context, r *ptpb.Record, meta []byte) error { + s := makeSettings(p.settings) + encodedSpans, err := protoutil.Marshal(&Spans{Spans: r.DeprecatedSpans}) + if err != nil { // how can this possibly fail? + return errors.Wrap(err, "failed to marshal spans") + } + it, err := p.txn.QueryIteratorEx(ctx, "protectedts-deprecated-protect", p.txn.KV(), + sessiondata.NodeUserSessionDataOverride, + protectQueryWithoutTarget, + s.maxSpans, s.maxBytes, len(r.DeprecatedSpans), + r.ID, r.Timestamp.WithSynthetic(false).AsOfSystemTime(), + r.MetaType, meta, + len(r.DeprecatedSpans), encodedSpans) + if err != nil { + return errors.Wrapf(err, "failed to write record %v", r.ID) + } + ok, err := it.Next(ctx) + if err != nil { + return errors.Wrapf(err, "failed to write record %v", r.ID) + } + if !ok { + return errors.Newf("failed to write record %v", r.ID) + } + row := it.Cur() + if err := it.Close(); err != nil { + log.Infof(ctx, "encountered %v when writing record %v", err, r.ID) + } + if failed := *row[0].(*tree.DBool); failed { + curNumSpans := int64(*row[1].(*tree.DInt)) + if s.maxSpans > 0 && curNumSpans+int64(len(r.DeprecatedSpans)) > s.maxSpans { + return errors.WithHint( + errors.Errorf("protectedts: limit exceeded: %d+%d > %d spans", curNumSpans, + len(r.DeprecatedSpans), s.maxSpans), + "SET CLUSTER SETTING kv.protectedts.max_spans to a higher value") + } + curBytes := int64(*row[2].(*tree.DInt)) + recordBytes := int64(len(encodedSpans) + len(r.Meta) + len(r.MetaType)) + if s.maxBytes > 0 && curBytes+recordBytes > s.maxBytes { + return errors.WithHint( + errors.Errorf("protectedts: limit exceeded: %d+%d > %d bytes", curBytes, recordBytes, + s.maxBytes), + "SET CLUSTER SETTING kv.protectedts.max_bytes to a higher value") + } + return protectedts.ErrExists + } + return nil +} + +func (p *storage) deprecatedGetRecord(ctx context.Context, id uuid.UUID) (*ptpb.Record, error) { + row, err := p.txn.QueryRowEx(ctx, "protectedts-deprecated-GetRecord", p.txn.KV(), + sessiondata.NodeUserSessionDataOverride, + getRecordWithoutTargetQuery, id.GetBytesMut()) + if err != nil { + return nil, errors.Wrapf(err, "failed to read record %v", id) + } + if len(row) == 0 { + return nil, protectedts.ErrNotExists + } + var r ptpb.Record + if err := rowToRecord(row, &r, true /* isDeprecatedRow */); err != nil { + return nil, err + } + return &r, nil +} + +func (p *storage) deprecatedGetRecords(ctx context.Context) ([]ptpb.Record, error) { + it, err := p.txn.QueryIteratorEx(ctx, "protectedts-deprecated-GetRecords", p.txn.KV(), + sessiondata.NodeUserSessionDataOverride, + getRecordsWithoutTargetQuery) + if err != nil { + return nil, errors.Wrap(err, "failed to read records") + } + + var ok bool + var records []ptpb.Record + for ok, err = it.Next(ctx); ok; ok, err = it.Next(ctx) { + var record ptpb.Record + if err := rowToRecord(it.Cur(), &record, true /* isDeprecatedRow */); err != nil { + log.Errorf(ctx, "failed to parse row as record: %v", err) + } + records = append(records, record) + } + if err != nil { + return nil, errors.Wrap(err, "failed to read records") + } + return records, nil +} + type settings struct { maxSpans int64 maxBytes int64 diff --git a/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go b/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go index d411a64ef35d..482f5a1777ba 100644 --- a/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go +++ b/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go @@ -34,9 +34,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -45,7 +45,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/log/severity" "github.com/cockroachdb/cockroach/pkg/util/protoutil" - "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" @@ -87,8 +86,8 @@ var testCases = []testCase{ ops: []op{ funcOp(func(ctx context.Context, t *testing.T, tCtx *testContext) { rec := newRecord(tCtx, hlc.Timestamp{}, "", nil, tableTarget(42), tableSpan(42)) - err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return tCtx.pts.Protect(ctx, txn, &rec) + err := tCtx.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return tCtx.pts.WithTxn(txn).Protect(ctx, &rec) }) require.Regexp(t, "invalid zero value timestamp", err.Error()) }), @@ -101,8 +100,8 @@ var testCases = []testCase{ rec := newRecord(tCtx, tCtx.tc.Server(0).Clock().Now(), "", nil, tableTarget(42), tableSpan(42)) rec.Verified = true - err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return tCtx.pts.Protect(ctx, txn, &rec) + err := tCtx.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return tCtx.pts.WithTxn(txn).Protect(ctx, &rec) }) require.Regexp(t, "cannot create a verified record", err.Error()) }), @@ -125,8 +124,8 @@ var testCases = []testCase{ funcOp(func(ctx context.Context, t *testing.T, tCtx *testContext) { rec := newRecord(tCtx, tCtx.tc.Server(0).Clock().Now(), "", nil, tableTarget(42), tableSpan(42)) rec.ID = pickOneRecord(tCtx).GetBytes() - err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return tCtx.pts.Protect(ctx, txn, &rec) + err := tCtx.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return tCtx.pts.WithTxn(txn).Protect(ctx, &rec) }) require.EqualError(t, err, protectedts.ErrExists.Error()) }), @@ -239,8 +238,8 @@ var testCases = []testCase{ ops: []op{ funcOp(func(ctx context.Context, t *testing.T, tCtx *testContext) { var rec *ptpb.Record - err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { - rec, err = tCtx.pts.GetRecord(ctx, txn, randomID(tCtx)) + err := tCtx.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { + rec, err = tCtx.pts.WithTxn(txn).GetRecord(ctx, randomID(tCtx)) return err }) require.EqualError(t, err, protectedts.ErrNotExists.Error()) @@ -288,39 +287,21 @@ var testCases = []testCase{ name: "UpdateTimestamp -- does not exist", ops: []op{ funcOp(func(ctx context.Context, t *testing.T, tCtx *testContext) { - err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { - return tCtx.pts.UpdateTimestamp(ctx, txn, randomID(tCtx), hlc.Timestamp{WallTime: 1}) + err := tCtx.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { + return tCtx.pts.WithTxn(txn).UpdateTimestamp(ctx, randomID(tCtx), hlc.Timestamp{WallTime: 1}) }) require.EqualError(t, err, protectedts.ErrNotExists.Error()) }), }, }, - { - name: "nil transaction errors", - ops: []op{ - funcOp(func(ctx context.Context, t *testing.T, tCtx *testContext) { - rec := newRecord(tCtx, tCtx.tc.Server(0).Clock().Now(), "", nil, tableTarget(42), tableSpan(42)) - const msg = "must provide a non-nil transaction" - require.Regexp(t, msg, tCtx.pts.Protect(ctx, nil /* txn */, &rec).Error()) - require.Regexp(t, msg, tCtx.pts.Release(ctx, nil /* txn */, uuid.MakeV4()).Error()) - require.Regexp(t, msg, tCtx.pts.MarkVerified(ctx, nil /* txn */, uuid.MakeV4()).Error()) - _, err := tCtx.pts.GetRecord(ctx, nil /* txn */, uuid.MakeV4()) - require.Regexp(t, msg, err.Error()) - _, err = tCtx.pts.GetMetadata(ctx, nil /* txn */) - require.Regexp(t, msg, err.Error()) - _, err = tCtx.pts.GetState(ctx, nil /* txn */) - require.Regexp(t, msg, err.Error()) - }), - }, - }, { name: "Protect using synthetic timestamp", ops: []op{ funcOp(func(ctx context.Context, t *testing.T, tCtx *testContext) { rec := newRecord(tCtx, tCtx.tc.Server(0).Clock().Now().WithSynthetic(true), "", nil, tableTarget(42), tableSpan(42)) - err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return tCtx.pts.Protect(ctx, txn, &rec) + err := tCtx.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return tCtx.pts.WithTxn(txn).Protect(ctx, &rec) }) require.NoError(t, err) // Synthetic should be reset when writing timestamps to make it @@ -345,9 +326,9 @@ var testCases = []testCase{ } type testContext struct { - pts protectedts.Storage + pts protectedts.Manager tc *testcluster.TestCluster - db *kv.DB + db isql.DB // If set to false, the test will be run with // `DisableProtectedTimestampForMultiTenant` set to true, thereby testing the @@ -374,8 +355,8 @@ type releaseOp struct { func (r releaseOp) run(ctx context.Context, t *testing.T, tCtx *testContext) { id := r.idFunc(tCtx) - err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return tCtx.pts.Release(ctx, txn, id) + err := tCtx.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return tCtx.pts.WithTxn(txn).Release(ctx, id) }) if !testutils.IsError(err, r.expErr) { t.Fatalf("expected error to match %q, got %q", r.expErr, err) @@ -411,8 +392,8 @@ type markVerifiedOp struct { func (mv markVerifiedOp) run(ctx context.Context, t *testing.T, tCtx *testContext) { id := mv.idFunc(tCtx) - err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return tCtx.pts.MarkVerified(ctx, txn, id) + err := tCtx.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return tCtx.pts.WithTxn(txn).MarkVerified(ctx, id) }) if !testutils.IsError(err, mv.expErr) { t.Fatalf("expected error to match %q, got %q", mv.expErr, err) @@ -439,8 +420,8 @@ func (p protectOp) run(ctx context.Context, t *testing.T, tCtx *testContext) { if p.idFunc != nil { rec.ID = p.idFunc(tCtx).GetBytes() } - err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return tCtx.pts.Protect(ctx, txn, &rec) + err := tCtx.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return tCtx.pts.WithTxn(txn).Protect(ctx, &rec) }) if !testutils.IsError(err, p.expErr) { t.Fatalf("expected error to match %q, got %q", p.expErr, err) @@ -475,8 +456,8 @@ type updateTimestampOp struct { func (p updateTimestampOp) run(ctx context.Context, t *testing.T, tCtx *testContext) { id := pickOneRecord(tCtx) - err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return tCtx.pts.UpdateTimestamp(ctx, txn, id, p.updateTimestamp) + err := tCtx.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return tCtx.pts.WithTxn(txn).UpdateTimestamp(ctx, id, p.updateTimestamp) }) if !testutils.IsError(err, p.expErr) { t.Fatalf("expected error to match %q, got %q", p.expErr, err) @@ -509,33 +490,27 @@ func (test testCase) run(t *testing.T) { defer tc.Stopper().Stop(ctx) s := tc.Server(0) - pts := ptstorage.New(s.ClusterSettings(), s.InternalExecutor().(*sql.InternalExecutor), ptsKnobs) - db := s.DB() + ptm := ptstorage.New(s.ClusterSettings(), ptsKnobs) tCtx := testContext{ - pts: pts, - db: db, + pts: ptm, + db: s.InternalDB().(isql.DB), tc: tc, runWithDeprecatedSpans: test.runWithDeprecatedSpans, } + pts := ptstorage.WithDatabase(ptm, s.InternalDB().(isql.DB)) verify := func(t *testing.T) { var state ptpb.State - require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { - state, err = pts.GetState(ctx, txn) - return err - })) - var md ptpb.Metadata - require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { - md, err = pts.GetMetadata(ctx, txn) - return err - })) + state, err := pts.GetState(ctx) + require.NoError(t, err) + + md, err := pts.GetMetadata(ctx) + require.NoError(t, err) require.EqualValues(t, tCtx.state, state) require.EqualValues(t, tCtx.state.Metadata, md) for _, r := range tCtx.state.Records { var rec *ptpb.Record - require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { - rec, err = pts.GetRecord(ctx, txn, r.ID.GetUUID()) - return err - })) + rec, err := pts.GetRecord(ctx, r.ID.GetUUID()) + require.NoError(t, err) require.EqualValues(t, &r, rec) } } @@ -645,15 +620,14 @@ func TestCorruptData(t *testing.T) { runCorruptDataTest := func(tCtx *testContext, s serverutils.TestServerInterface, tc *testcluster.TestCluster, pts protectedts.Storage) { rec := newRecord(tCtx, s.Clock().Now(), "foo", []byte("bar"), tableTarget(42), tableSpan(42)) - require.NoError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return pts.Protect(ctx, txn, &rec) - })) - ie := tc.Server(0).InternalExecutor().(sqlutil.InternalExecutor) + require.NoError(t, pts.Protect(ctx, &rec)) + + db := tc.Server(0).InternalDB().(isql.DB) updateQuery := "UPDATE system.protected_ts_records SET target = $1 WHERE id = $2" if tCtx.runWithDeprecatedSpans { updateQuery = "UPDATE system.protected_ts_records SET spans = $1 WHERE id = $2" } - affected, err := ie.ExecEx( + affected, err := db.Executor().ExecEx( ctx, "corrupt-data", nil, /* txn */ sessiondata.NodeUserSessionDataOverride, updateQuery, @@ -661,18 +635,16 @@ func TestCorruptData(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, affected) - var got *ptpb.Record + got, err := pts.GetRecord(ctx, rec.ID.GetUUID()) msg := regexp.MustCompile("failed to unmarshal (span|target) for " + rec.ID.String() + ": ") - require.Regexp(t, msg, - s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { - got, err = pts.GetRecord(ctx, txn, rec.ID.GetUUID()) - return err - }).Error()) + require.Regexp(t, msg, err) require.Nil(t, got) - require.NoError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { - _, err = pts.GetState(ctx, txn) - return err - })) + + { + _, err := pts.GetState(ctx) + require.NoError(t, err) + } + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 100, msg, log.WithFlattenedSensitiveData) @@ -701,11 +673,13 @@ func TestCorruptData(t *testing.T) { defer tc.Stopper().Stop(ctx) s := tc.Server(0) - pts := s.ExecutorConfig().(sql.ExecutorConfig).ProtectedTimestampProvider - + ptp := s.ExecutorConfig().(sql.ExecutorConfig).ProtectedTimestampProvider tCtx := &testContext{runWithDeprecatedSpans: true} - runCorruptDataTest(tCtx, s, tc, pts) + runCorruptDataTest(tCtx, s, tc, ptstorage.WithDatabase( + ptp, tc.Server(0).InternalDB().(isql.DB), + )) }) + t.Run("corrupt target", func(t *testing.T) { // Set the log scope so we can introspect the logged errors. scope := log.Scope(t) @@ -718,7 +692,10 @@ func TestCorruptData(t *testing.T) { s := tc.Server(0) pts := s.ExecutorConfig().(sql.ExecutorConfig).ProtectedTimestampProvider - runCorruptDataTest(&testContext{}, s, tc, pts) + runCorruptDataTest( + &testContext{}, s, tc, + ptstorage.WithDatabase(pts, s.InternalDB().(isql.DB)), + ) }) t.Run("corrupt hlc timestamp", func(t *testing.T) { // Set the log scope so we can introspect the logged errors. @@ -731,17 +708,15 @@ func TestCorruptData(t *testing.T) { defer tc.Stopper().Stop(ctx) s := tc.Server(0) - pts := s.ExecutorConfig().(sql.ExecutorConfig).ProtectedTimestampProvider - + ptp := s.ExecutorConfig().(sql.ExecutorConfig).ProtectedTimestampProvider + pts := ptstorage.WithDatabase(ptp, s.InternalDB().(isql.DB)) rec := newRecord(&testContext{}, s.Clock().Now(), "foo", []byte("bar"), tableTarget(42), tableSpan(42)) - require.NoError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return pts.Protect(ctx, txn, &rec) - })) + require.NoError(t, pts.Protect(ctx, &rec)) // This timestamp has too many logical digits and thus will fail parsing. var d tree.DDecimal d.SetFinite(math.MaxInt32, -12) - ie := tc.Server(0).InternalExecutor().(sqlutil.InternalExecutor) + ie := tc.Server(0).InternalExecutor().(isql.Executor) affected, err := ie.ExecEx( ctx, "corrupt-data", nil, /* txn */ sessiondata.NodeUserSessionDataOverride, @@ -750,19 +725,13 @@ func TestCorruptData(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, affected) - var got *ptpb.Record msg := regexp.MustCompile("failed to parse timestamp for " + rec.ID.String() + ": logical part has too many digits") - require.Regexp(t, msg, - s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { - got, err = pts.GetRecord(ctx, txn, rec.ID.GetUUID()) - return err - })) + got, err := pts.GetRecord(ctx, rec.ID.GetUUID()) + require.Regexp(t, msg, err) require.Nil(t, got) - require.NoError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { - _, err = pts.GetState(ctx, txn) - return err - })) + _, err = pts.GetState(ctx) + require.NoError(t, err) log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 100, msg, @@ -775,7 +744,7 @@ func TestCorruptData(t *testing.T) { }) } -// TestErrorsFromSQL ensures that errors from the underlying InternalExecutor +// TestErrorsFromSQL ensures that errors from the underlying Executor // are properly transmitted back to the client. func TestErrorsFromSQL(t *testing.T) { ctx := context.Background() @@ -785,66 +754,71 @@ func TestErrorsFromSQL(t *testing.T) { defer tc.Stopper().Stop(ctx) s := tc.Server(0) - ie := s.InternalExecutor().(sqlutil.InternalExecutor) - wrappedIE := &wrappedInternalExecutor{wrapped: ie} - pts := ptstorage.New(s.ClusterSettings(), wrappedIE, &protectedts.TestingKnobs{}) - - wrappedIE.setErrFunc(func(string) error { - return errors.New("boom") - }) + pts := ptstorage.New(s.ClusterSettings(), &protectedts.TestingKnobs{}) + db := s.InternalDB().(isql.DB) + errFunc := func(string) error { return errors.New("boom") } rec := newRecord(&testContext{}, s.Clock().Now(), "foo", []byte("bar"), tableTarget(42), tableSpan(42)) - require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return pts.Protect(ctx, txn, &rec) + require.EqualError(t, db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return pts.WithTxn(wrapTxn(txn, errFunc)).Protect(ctx, &rec) }), fmt.Sprintf("failed to write record %v: boom", rec.ID)) - require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - _, err := pts.GetRecord(ctx, txn, rec.ID.GetUUID()) + require.EqualError(t, db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + _, err := pts.WithTxn(wrapTxn(txn, errFunc)).GetRecord(ctx, rec.ID.GetUUID()) return err }), fmt.Sprintf("failed to read record %v: boom", rec.ID)) - require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return pts.MarkVerified(ctx, txn, rec.ID.GetUUID()) + require.EqualError(t, db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return pts.WithTxn(wrapTxn(txn, errFunc)).MarkVerified(ctx, rec.ID.GetUUID()) }), fmt.Sprintf("failed to mark record %v as verified: boom", rec.ID)) - require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return pts.Release(ctx, txn, rec.ID.GetUUID()) + require.EqualError(t, db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return pts.WithTxn(wrapTxn(txn, errFunc)).Release(ctx, rec.ID.GetUUID()) }), fmt.Sprintf("failed to release record %v: boom", rec.ID)) - require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - _, err := pts.GetMetadata(ctx, txn) + require.EqualError(t, db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + _, err := pts.WithTxn(wrapTxn(txn, errFunc)).GetMetadata(ctx) return err }), "failed to read metadata: boom") - require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - _, err := pts.GetState(ctx, txn) + require.EqualError(t, db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + _, err := pts.WithTxn(wrapTxn(txn, errFunc)).GetState(ctx) return err }), "failed to read metadata: boom") // Test that we get an error retrieving the records in GetState. - // The preceding call tested the error while retriving the metadata in a + // The preceding call tested the error while retrieving the metadata in a // call to GetState. var seen bool - wrappedIE.setErrFunc(func(string) error { + errFunc = func(string) error { if !seen { seen = true return nil } return errors.New("boom") - }) - require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - _, err := pts.GetState(ctx, txn) + } + require.EqualError(t, db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + _, err := pts.WithTxn(wrapTxn(txn, errFunc)).GetState(ctx) return err }), "failed to read records: boom") } -// wrappedInternalExecutor allows errors to be injected in SQL execution. -type wrappedInternalExecutor struct { - wrapped sqlutil.InternalExecutor +// wrappedInternalTxn allows errors to be injected in SQL execution. +type wrappedInternalTxn struct { + wrapped isql.Txn - mu struct { - syncutil.RWMutex - errFunc func(statement string) error - } + errFunc func(statement string) error } -func (ie *wrappedInternalExecutor) QueryBufferedExWithCols( +func (txn *wrappedInternalTxn) KV() *kv.Txn { + return txn.wrapped.KV() +} + +func (txn *wrappedInternalTxn) SessionData() *sessiondata.SessionData { + return txn.wrapped.SessionData() +} + +func wrapTxn(txn isql.Txn, errFunc func(statement string) error) *wrappedInternalTxn { + return &wrappedInternalTxn{wrapped: txn, errFunc: errFunc} +} + +func (txn *wrappedInternalTxn) QueryBufferedExWithCols( ctx context.Context, opName string, - txn *kv.Txn, + _ *kv.Txn, session sessiondata.InternalExecutorOverride, stmt string, qargs ...interface{}, @@ -852,56 +826,56 @@ func (ie *wrappedInternalExecutor) QueryBufferedExWithCols( panic("unimplemented") } -var _ sqlutil.InternalExecutor = &wrappedInternalExecutor{} +var _ isql.Executor = &wrappedInternalTxn{} -func (ie *wrappedInternalExecutor) Exec( - ctx context.Context, opName string, txn *kv.Txn, statement string, params ...interface{}, +func (txn *wrappedInternalTxn) Exec( + ctx context.Context, opName string, _ *kv.Txn, statement string, params ...interface{}, ) (int, error) { panic("unimplemented") } -func (ie *wrappedInternalExecutor) ExecEx( +func (txn *wrappedInternalTxn) ExecEx( ctx context.Context, opName string, - txn *kv.Txn, + kvTxn *kv.Txn, o sessiondata.InternalExecutorOverride, stmt string, qargs ...interface{}, ) (int, error) { - if f := ie.getErrFunc(); f != nil { + if f := txn.errFunc; f != nil { if err := f(stmt); err != nil { return 0, err } } - return ie.wrapped.ExecEx(ctx, opName, txn, o, stmt, qargs...) + return txn.wrapped.ExecEx(ctx, opName, kvTxn, o, stmt, qargs...) } -func (ie *wrappedInternalExecutor) QueryRowEx( +func (txn *wrappedInternalTxn) QueryRowEx( ctx context.Context, opName string, - txn *kv.Txn, + kvTxn *kv.Txn, session sessiondata.InternalExecutorOverride, stmt string, qargs ...interface{}, ) (tree.Datums, error) { - if f := ie.getErrFunc(); f != nil { + if f := txn.errFunc; f != nil { if err := f(stmt); err != nil { return nil, err } } - return ie.wrapped.QueryRowEx(ctx, opName, txn, session, stmt, qargs...) + return txn.wrapped.QueryRowEx(ctx, opName, kvTxn, session, stmt, qargs...) } -func (ie *wrappedInternalExecutor) QueryRow( - ctx context.Context, opName string, txn *kv.Txn, statement string, qargs ...interface{}, +func (txn *wrappedInternalTxn) QueryRow( + ctx context.Context, opName string, _ *kv.Txn, statement string, qargs ...interface{}, ) (tree.Datums, error) { panic("not implemented") } -func (ie *wrappedInternalExecutor) QueryRowExWithCols( +func (txn *wrappedInternalTxn) QueryRowExWithCols( ctx context.Context, opName string, - txn *kv.Txn, + _ *kv.Txn, session sessiondata.InternalExecutorOverride, stmt string, qargs ...interface{}, @@ -909,16 +883,16 @@ func (ie *wrappedInternalExecutor) QueryRowExWithCols( panic("not implemented") } -func (ie *wrappedInternalExecutor) QueryBuffered( - ctx context.Context, opName string, txn *kv.Txn, stmt string, qargs ...interface{}, +func (txn *wrappedInternalTxn) QueryBuffered( + ctx context.Context, opName string, _ *kv.Txn, stmt string, qargs ...interface{}, ) ([]tree.Datums, error) { panic("not implemented") } -func (ie *wrappedInternalExecutor) QueryBufferedEx( +func (txn *wrappedInternalTxn) QueryBufferedEx( ctx context.Context, opName string, - txn *kv.Txn, + _ *kv.Txn, session sessiondata.InternalExecutorOverride, stmt string, qargs ...interface{}, @@ -926,41 +900,29 @@ func (ie *wrappedInternalExecutor) QueryBufferedEx( panic("not implemented") } -func (ie *wrappedInternalExecutor) QueryIterator( - ctx context.Context, opName string, txn *kv.Txn, stmt string, qargs ...interface{}, -) (sqlutil.InternalRows, error) { +func (txn *wrappedInternalTxn) QueryIterator( + ctx context.Context, opName string, _ *kv.Txn, stmt string, qargs ...interface{}, +) (isql.Rows, error) { panic("not implemented") } -func (ie *wrappedInternalExecutor) QueryIteratorEx( +func (txn *wrappedInternalTxn) QueryIteratorEx( ctx context.Context, opName string, - txn *kv.Txn, + kvTxn *kv.Txn, session sessiondata.InternalExecutorOverride, stmt string, qargs ...interface{}, -) (sqlutil.InternalRows, error) { - if f := ie.getErrFunc(); f != nil { +) (isql.Rows, error) { + if f := txn.errFunc; f != nil { if err := f(stmt); err != nil { return nil, err } } - return ie.wrapped.QueryIteratorEx(ctx, opName, txn, session, stmt, qargs...) -} - -func (ie *wrappedInternalExecutor) getErrFunc() func(statement string) error { - ie.mu.RLock() - defer ie.mu.RUnlock() - return ie.mu.errFunc -} - -func (ie *wrappedInternalExecutor) setErrFunc(f func(statement string) error) { - ie.mu.Lock() - defer ie.mu.Unlock() - ie.mu.errFunc = f + return txn.wrapped.QueryIteratorEx(ctx, opName, kvTxn, session, stmt, qargs...) } -func (ie *wrappedInternalExecutor) WithSyntheticDescriptors( +func (txn *wrappedInternalTxn) WithSyntheticDescriptors( descs []catalog.Descriptor, run func() error, ) error { panic("not implemented") diff --git a/pkg/kv/kvserver/protectedts/ptstorage/storage_with_database.go b/pkg/kv/kvserver/protectedts/ptstorage/storage_with_database.go index 8eb62e7650a9..756e414f562b 100644 --- a/pkg/kv/kvserver/protectedts/ptstorage/storage_with_database.go +++ b/pkg/kv/kvserver/protectedts/ptstorage/storage_with_database.go @@ -13,98 +13,69 @@ package ptstorage import ( "context" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/uuid" ) // WithDatabase wraps s such that any calls made with a nil *Txn will be wrapped // in a call to db.Txn. This is often convenient in testing. -func WithDatabase(s protectedts.Storage, db *kv.DB) protectedts.Storage { +func WithDatabase(s protectedts.Manager, db isql.DB) protectedts.Storage { return &storageWithDatabase{s: s, db: db} } type storageWithDatabase struct { - db *kv.DB - s protectedts.Storage + db isql.DB + s protectedts.Manager } -func (s *storageWithDatabase) Protect(ctx context.Context, txn *kv.Txn, r *ptpb.Record) error { - if txn == nil { - return s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return s.s.Protect(ctx, txn, r) - }) - } - return s.s.Protect(ctx, txn, r) +func (s *storageWithDatabase) Protect(ctx context.Context, r *ptpb.Record) error { + return s.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return s.s.WithTxn(txn).Protect(ctx, r) + }) } func (s *storageWithDatabase) GetRecord( - ctx context.Context, txn *kv.Txn, id uuid.UUID, -) (r *ptpb.Record, err error) { - if txn == nil { - err = s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - r, err = s.s.GetRecord(ctx, txn, id) - return err - }) - return r, err - } - return s.s.GetRecord(ctx, txn, id) + ctx context.Context, id uuid.UUID, +) (r *ptpb.Record, _ error) { + return r, s.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { + r, err = s.s.WithTxn(txn).GetRecord(ctx, id) + return err + }) } -func (s *storageWithDatabase) MarkVerified(ctx context.Context, txn *kv.Txn, id uuid.UUID) error { - if txn == nil { - return s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return s.s.Release(ctx, txn, id) - }) - } - return s.s.Release(ctx, txn, id) +func (s *storageWithDatabase) MarkVerified(ctx context.Context, id uuid.UUID) error { + return s.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return s.s.WithTxn(txn).MarkVerified(ctx, id) + }) } -func (s *storageWithDatabase) Release(ctx context.Context, txn *kv.Txn, id uuid.UUID) error { - if txn == nil { - return s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return s.s.Release(ctx, txn, id) - }) - } - return s.s.Release(ctx, txn, id) +func (s *storageWithDatabase) Release(ctx context.Context, id uuid.UUID) error { + return s.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return s.s.WithTxn(txn).Release(ctx, id) + }) } -func (s *storageWithDatabase) GetMetadata( - ctx context.Context, txn *kv.Txn, -) (md ptpb.Metadata, err error) { - if txn == nil { - err = s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - md, err = s.s.GetMetadata(ctx, txn) - return err - }) - return md, err - } - return s.s.GetMetadata(ctx, txn) +func (s *storageWithDatabase) GetMetadata(ctx context.Context) (md ptpb.Metadata, _ error) { + return md, s.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { + md, err = s.s.WithTxn(txn).GetMetadata(ctx) + return err + }) } -func (s *storageWithDatabase) GetState( - ctx context.Context, txn *kv.Txn, -) (state ptpb.State, err error) { - if txn == nil { - err = s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - state, err = s.s.GetState(ctx, txn) - return err - }) - return state, err - } - return s.s.GetState(ctx, txn) +func (s *storageWithDatabase) GetState(ctx context.Context) (state ptpb.State, err error) { + return state, s.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { + state, err = s.s.WithTxn(txn).GetState(ctx) + return err + }) } func (s *storageWithDatabase) UpdateTimestamp( - ctx context.Context, txn *kv.Txn, id uuid.UUID, timestamp hlc.Timestamp, + ctx context.Context, id uuid.UUID, timestamp hlc.Timestamp, ) (err error) { - if txn == nil { - err = s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return s.s.UpdateTimestamp(ctx, txn, id, timestamp) - }) - return err - } - return s.s.UpdateTimestamp(ctx, txn, id, timestamp) + return s.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { + return s.s.WithTxn(txn).UpdateTimestamp(ctx, id, timestamp) + }) } diff --git a/pkg/kv/kvserver/rangelog/BUILD.bazel b/pkg/kv/kvserver/rangelog/BUILD.bazel index 3c0590ebbf6f..7c9aac44c758 100644 --- a/pkg/kv/kvserver/rangelog/BUILD.bazel +++ b/pkg/kv/kvserver/rangelog/BUILD.bazel @@ -43,9 +43,9 @@ go_test( "//pkg/sql/catalog", "//pkg/sql/catalog/systemschema", "//pkg/sql/catalog/tabledesc", + "//pkg/sql/isql", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/testutils/serverutils", "//pkg/testutils/sqlutils", "//pkg/testutils/testcluster", diff --git a/pkg/kv/kvserver/rangelog/internal_executor_writer_test.go b/pkg/kv/kvserver/rangelog/internal_executor_writer_test.go index 621aea4003bd..027ce1d2e8eb 100644 --- a/pkg/kv/kvserver/rangelog/internal_executor_writer_test.go +++ b/pkg/kv/kvserver/rangelog/internal_executor_writer_test.go @@ -17,23 +17,23 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/errors" ) // InternalExecutorWriter implements kvserver.RangeLogWriter -// using the InternalExecutor. +// using the Executor. type InternalExecutorWriter struct { generateUniqueID func() int64 - ie sqlutil.InternalExecutor + ie isql.Executor insertQuery string } // NewInternalExecutorWriter returns a new InternalExecutorWriter which -// implements kvserver.RangeLogWriter using the InternalExecutor. +// implements kvserver.RangeLogWriter using the Executor. func NewInternalExecutorWriter( - generateUniqueID func() int64, ie sqlutil.InternalExecutor, tableName string, + generateUniqueID func() int64, ie isql.Executor, tableName string, ) *InternalExecutorWriter { return &InternalExecutorWriter{ generateUniqueID: generateUniqueID, diff --git a/pkg/kv/kvserver/rangelog/rangelog.go b/pkg/kv/kvserver/rangelog/rangelog.go index 9e7e6daf4699..7320cf59b4ce 100644 --- a/pkg/kv/kvserver/rangelog/rangelog.go +++ b/pkg/kv/kvserver/rangelog/rangelog.go @@ -26,7 +26,7 @@ import ( "github.com/cockroachdb/errors" ) -// Writer implements kvserver.RangeLogWriter using the InternalExecutor. +// Writer implements kvserver.RangeLogWriter using the Executor. type Writer struct { generateUniqueID IDGen w bootstrap.KVWriter diff --git a/pkg/kv/kvserver/rangelog/rangelog_test.go b/pkg/kv/kvserver/rangelog/rangelog_test.go index c5325407f398..37db3b6b470e 100644 --- a/pkg/kv/kvserver/rangelog/rangelog_test.go +++ b/pkg/kv/kvserver/rangelog/rangelog_test.go @@ -78,7 +78,7 @@ func TestRangeLog(t *testing.T) { // Write the data. ec := s.ExecutorConfig().(sql.ExecutorConfig) codec := ec.Codec - ie := ec.InternalExecutor + ie := ec.InternalDB.Executor() mkWriter := func(genID func() int64) kvserver.RangeLogWriter { genA, genB := makeTeeIDGen(genID) return &teeWriter{ diff --git a/pkg/kv/kvserver/reports/BUILD.bazel b/pkg/kv/kvserver/reports/BUILD.bazel index 8a7049812f8c..4ecf233423c2 100644 --- a/pkg/kv/kvserver/reports/BUILD.bazel +++ b/pkg/kv/kvserver/reports/BUILD.bazel @@ -28,9 +28,9 @@ go_library( "//pkg/sql/catalog/catalogkeys", "//pkg/sql/catalog/descbuilder", "//pkg/sql/catalog/descpb", + "//pkg/sql/isql", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/util/log", "//pkg/util/stop", "//pkg/util/syncutil", @@ -75,7 +75,7 @@ go_test( "//pkg/sql/catalog/descbuilder", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/tabledesc", - "//pkg/sql/sqlutil", + "//pkg/sql/isql", "//pkg/sql/types", "//pkg/testutils", "//pkg/testutils/keysutils", diff --git a/pkg/kv/kvserver/reports/constraint_stats_report.go b/pkg/kv/kvserver/reports/constraint_stats_report.go index 353a89dbb139..10375b9cd683 100644 --- a/pkg/kv/kvserver/reports/constraint_stats_report.go +++ b/pkg/kv/kvserver/reports/constraint_stats_report.go @@ -22,8 +22,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" ) @@ -159,7 +159,7 @@ func (r ConstraintReport) ensureEntries(key ZoneKey, zone *zonepb.ZoneConfig) { } func (r *replicationConstraintStatsReportSaver) loadPreviousVersion( - ctx context.Context, ex sqlutil.InternalExecutor, txn *kv.Txn, + ctx context.Context, ex isql.Executor, txn *kv.Txn, ) error { // The data for the previous save needs to be loaded if: // - this is the first time that we call this method and lastUpdatedAt has never been set @@ -201,7 +201,7 @@ func (r *replicationConstraintStatsReportSaver) loadPreviousVersion( } func (r *replicationConstraintStatsReportSaver) updateTimestamp( - ctx context.Context, ex sqlutil.InternalExecutor, txn *kv.Txn, reportTS time.Time, + ctx context.Context, ex isql.Executor, txn *kv.Txn, reportTS time.Time, ) error { if !r.lastGenerated.IsZero() && reportTS == r.lastGenerated { return errors.Errorf( @@ -228,11 +228,7 @@ func (r *replicationConstraintStatsReportSaver) updateTimestamp( // takes ownership. // reportTS is the time that will be set in the updated_at column for every row. func (r *replicationConstraintStatsReportSaver) Save( - ctx context.Context, - report ConstraintReport, - reportTS time.Time, - db *kv.DB, - ex sqlutil.InternalExecutor, + ctx context.Context, report ConstraintReport, reportTS time.Time, db *kv.DB, ex isql.Executor, ) error { r.lastUpdatedRowCount = 0 if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { @@ -296,7 +292,7 @@ func (r *replicationConstraintStatsReportSaver) upsertConstraintStatus( key ConstraintStatusKey, violationCount int, db *kv.DB, - ex sqlutil.InternalExecutor, + ex isql.Executor, ) error { var err error previousStatus, hasOldVersion := r.previousVersion[key] diff --git a/pkg/kv/kvserver/reports/constraint_stats_report_test.go b/pkg/kv/kvserver/reports/constraint_stats_report_test.go index b89d009bdd70..345f003f3afc 100644 --- a/pkg/kv/kvserver/reports/constraint_stats_report_test.go +++ b/pkg/kv/kvserver/reports/constraint_stats_report_test.go @@ -38,7 +38,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descbuilder" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/testutils/keysutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -730,7 +730,7 @@ func TestConstraintReport(t *testing.T) { // doesn't interfere with the test. ReporterInterval.Override(ctx, &st.SV, 0) s, _, db := serverutils.StartServer(t, base.TestServerArgs{Settings: st}) - con := s.InternalExecutor().(sqlutil.InternalExecutor) + con := s.InternalExecutor().(isql.Executor) defer s.Stopper().Stop(ctx) // Verify that tables are empty. diff --git a/pkg/kv/kvserver/reports/critical_localities_report.go b/pkg/kv/kvserver/reports/critical_localities_report.go index 2730c58bcdac..486fbc09cab4 100644 --- a/pkg/kv/kvserver/reports/critical_localities_report.go +++ b/pkg/kv/kvserver/reports/critical_localities_report.go @@ -19,8 +19,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/errors" ) @@ -83,7 +83,7 @@ func (r LocalityReport) CountRangeAtRisk(zKey ZoneKey, loc LocalityRepr) { } func (r *replicationCriticalLocalitiesReportSaver) loadPreviousVersion( - ctx context.Context, ex sqlutil.InternalExecutor, txn *kv.Txn, + ctx context.Context, ex isql.Executor, txn *kv.Txn, ) error { // The data for the previous save needs to be loaded if: // - this is the first time that we call this method and lastUpdatedAt has never been set @@ -124,7 +124,7 @@ func (r *replicationCriticalLocalitiesReportSaver) loadPreviousVersion( } func (r *replicationCriticalLocalitiesReportSaver) updateTimestamp( - ctx context.Context, ex sqlutil.InternalExecutor, txn *kv.Txn, reportTS time.Time, + ctx context.Context, ex isql.Executor, txn *kv.Txn, reportTS time.Time, ) error { if !r.lastGenerated.IsZero() && reportTS == r.lastGenerated { return errors.Errorf( @@ -151,11 +151,7 @@ func (r *replicationCriticalLocalitiesReportSaver) updateTimestamp( // takes ownership. // reportTS is the time that will be set in the updated_at column for every row. func (r *replicationCriticalLocalitiesReportSaver) Save( - ctx context.Context, - report LocalityReport, - reportTS time.Time, - db *kv.DB, - ex sqlutil.InternalExecutor, + ctx context.Context, report LocalityReport, reportTS time.Time, db *kv.DB, ex isql.Executor, ) error { r.lastUpdatedRowCount = 0 if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { @@ -218,7 +214,7 @@ func (r *replicationCriticalLocalitiesReportSaver) upsertLocality( key localityKey, status localityStatus, db *kv.DB, - ex sqlutil.InternalExecutor, + ex isql.Executor, ) error { var err error previousStatus, hasOldVersion := r.previousVersion[key] diff --git a/pkg/kv/kvserver/reports/critical_localities_report_test.go b/pkg/kv/kvserver/reports/critical_localities_report_test.go index 391d5fce26bd..15bb95ca8bb8 100644 --- a/pkg/kv/kvserver/reports/critical_localities_report_test.go +++ b/pkg/kv/kvserver/reports/critical_localities_report_test.go @@ -20,7 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -181,7 +181,7 @@ func TestCriticalLocalitiesSaving(t *testing.T) { // doesn't interfere with the test. ReporterInterval.Override(ctx, &st.SV, 0) s, _, db := serverutils.StartServer(t, base.TestServerArgs{Settings: st}) - con := s.InternalExecutor().(sqlutil.InternalExecutor) + con := s.InternalExecutor().(isql.Executor) defer s.Stopper().Stop(ctx) // Verify that tables are empty. @@ -288,9 +288,7 @@ func TestCriticalLocalitiesSaving(t *testing.T) { } // TableData reads a table and returns the rows as strings. -func TableData( - ctx context.Context, tableName string, executor sqlutil.InternalExecutor, -) [][]string { +func TableData(ctx context.Context, tableName string, executor isql.Executor) [][]string { if it, err := executor.QueryIterator( ctx, "test-select-"+tableName, nil /* txn */, "select * from "+tableName, ); err == nil { diff --git a/pkg/kv/kvserver/reports/replication_stats_report.go b/pkg/kv/kvserver/reports/replication_stats_report.go index ee5f08d4218e..58aa25e2483c 100644 --- a/pkg/kv/kvserver/reports/replication_stats_report.go +++ b/pkg/kv/kvserver/reports/replication_stats_report.go @@ -20,8 +20,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" ) @@ -88,7 +88,7 @@ func (r RangeReport) CountRange(zKey ZoneKey, status roachpb.RangeStatusReport) } func (r *replicationStatsReportSaver) loadPreviousVersion( - ctx context.Context, ex sqlutil.InternalExecutor, txn *kv.Txn, + ctx context.Context, ex isql.Executor, txn *kv.Txn, ) error { // The data for the previous save needs to be loaded if: // - this is the first time that we call this method and lastUpdatedAt has never been set @@ -134,7 +134,7 @@ func (r *replicationStatsReportSaver) loadPreviousVersion( } func (r *replicationStatsReportSaver) updateTimestamp( - ctx context.Context, ex sqlutil.InternalExecutor, txn *kv.Txn, reportTS time.Time, + ctx context.Context, ex isql.Executor, txn *kv.Txn, reportTS time.Time, ) error { if !r.lastGenerated.IsZero() && reportTS == r.lastGenerated { return errors.Errorf( @@ -161,11 +161,7 @@ func (r *replicationStatsReportSaver) updateTimestamp( // takes ownership. // reportTS is the time that will be set in the updated_at column for every row. func (r *replicationStatsReportSaver) Save( - ctx context.Context, - report RangeReport, - reportTS time.Time, - db *kv.DB, - ex sqlutil.InternalExecutor, + ctx context.Context, report RangeReport, reportTS time.Time, db *kv.DB, ex isql.Executor, ) error { r.lastUpdatedRowCount = 0 if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { @@ -217,7 +213,7 @@ func (r *replicationStatsReportSaver) Save( // upsertStat upserts a row into system.replication_stats. func (r *replicationStatsReportSaver) upsertStats( - ctx context.Context, txn *kv.Txn, key ZoneKey, stats zoneRangeStatus, ex sqlutil.InternalExecutor, + ctx context.Context, txn *kv.Txn, key ZoneKey, stats zoneRangeStatus, ex isql.Executor, ) error { var err error previousStats, hasOldVersion := r.previousVersion[key] diff --git a/pkg/kv/kvserver/reports/replication_stats_report_test.go b/pkg/kv/kvserver/reports/replication_stats_report_test.go index a00aebcbdf8d..1852380abca0 100644 --- a/pkg/kv/kvserver/reports/replication_stats_report_test.go +++ b/pkg/kv/kvserver/reports/replication_stats_report_test.go @@ -20,7 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -38,7 +38,7 @@ func TestRangeReport(t *testing.T) { // doesn't interfere with the test. ReporterInterval.Override(ctx, &st.SV, 0) s, _, db := serverutils.StartServer(t, base.TestServerArgs{Settings: st}) - con := s.InternalExecutor().(sqlutil.InternalExecutor) + con := s.InternalExecutor().(isql.Executor) defer s.Stopper().Stop(ctx) // Verify that tables are empty. diff --git a/pkg/kv/kvserver/reports/reporter.go b/pkg/kv/kvserver/reports/reporter.go index 52c73d168995..e844231c14be 100644 --- a/pkg/kv/kvserver/reports/reporter.go +++ b/pkg/kv/kvserver/reports/reporter.go @@ -31,9 +31,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descbuilder" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/syncutil" @@ -74,7 +74,7 @@ type Reporter struct { liveness *liveness.NodeLiveness settings *cluster.Settings storePool *storepool.StorePool - executor sqlutil.InternalExecutor + executor isql.Executor cfgs config.SystemConfigProvider frequencyMu struct { @@ -91,7 +91,7 @@ func NewReporter( storePool *storepool.StorePool, st *cluster.Settings, liveness *liveness.NodeLiveness, - executor sqlutil.InternalExecutor, + executor isql.Executor, provider config.SystemConfigProvider, ) *Reporter { r := Reporter{ @@ -796,7 +796,7 @@ type reportID int // getReportGenerationTime returns the time at a particular report was last // generated. Returns time.Time{} if the report is not found. func getReportGenerationTime( - ctx context.Context, rid reportID, ex sqlutil.InternalExecutor, txn *kv.Txn, + ctx context.Context, rid reportID, ex isql.Executor, txn *kv.Txn, ) (time.Time, error) { row, err := ex.QueryRowEx( ctx, diff --git a/pkg/multitenant/BUILD.bazel b/pkg/multitenant/BUILD.bazel index 770cf8befb89..bd57fd4d507d 100644 --- a/pkg/multitenant/BUILD.bazel +++ b/pkg/multitenant/BUILD.bazel @@ -13,12 +13,11 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/base", - "//pkg/kv", "//pkg/multitenant/tenantcostmodel", "//pkg/roachpb", "//pkg/settings", + "//pkg/sql/isql", "//pkg/sql/sqlliveness", - "//pkg/sql/sqlutil", "//pkg/util/metric", "//pkg/util/stop", ], diff --git a/pkg/multitenant/tenant_usage.go b/pkg/multitenant/tenant_usage.go index 7078bf2d54ab..c49a2f90ae71 100644 --- a/pkg/multitenant/tenant_usage.go +++ b/pkg/multitenant/tenant_usage.go @@ -14,9 +14,8 @@ import ( "context" "time" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/metric" ) @@ -55,8 +54,7 @@ type TenantUsageServer interface { // ReconfigureTokenBucket( ctx context.Context, - txn *kv.Txn, - ie sqlutil.InternalExecutor, + ie isql.Txn, tenantID roachpb.TenantID, availableRU float64, refillRate float64, diff --git a/pkg/repstream/BUILD.bazel b/pkg/repstream/BUILD.bazel index 7608230a5823..f068e6b62dc0 100644 --- a/pkg/repstream/BUILD.bazel +++ b/pkg/repstream/BUILD.bazel @@ -7,7 +7,7 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/repstream", visibility = ["//visibility:public"], deps = [ - "//pkg/kv", + "//pkg/sql/isql", "//pkg/sql/sem/eval", "@com_github_cockroachdb_errors//:errors", ], diff --git a/pkg/repstream/api.go b/pkg/repstream/api.go index 40a4c17b9294..b45a0e704766 100644 --- a/pkg/repstream/api.go +++ b/pkg/repstream/api.go @@ -13,22 +13,22 @@ package repstream import ( "context" - "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/errors" ) // GetReplicationStreamManagerHook is the hook to get access to the producer side replication APIs. // Used by builtin functions to trigger streaming replication. -var GetReplicationStreamManagerHook func(ctx context.Context, evalCtx *eval.Context, txn *kv.Txn) (eval.ReplicationStreamManager, error) +var GetReplicationStreamManagerHook func(ctx context.Context, evalCtx *eval.Context, txn isql.Txn) (eval.ReplicationStreamManager, error) // GetStreamIngestManagerHook is the hook to get access to the ingestion side replication APIs. // Used by builtin functions to trigger streaming replication. -var GetStreamIngestManagerHook func(ctx context.Context, evalCtx *eval.Context, txn *kv.Txn) (eval.StreamIngestManager, error) +var GetStreamIngestManagerHook func(ctx context.Context, evalCtx *eval.Context, txn isql.Txn) (eval.StreamIngestManager, error) // GetReplicationStreamManager returns a ReplicationStreamManager if a CCL binary is loaded. func GetReplicationStreamManager( - ctx context.Context, evalCtx *eval.Context, txn *kv.Txn, + ctx context.Context, evalCtx *eval.Context, txn isql.Txn, ) (eval.ReplicationStreamManager, error) { if GetReplicationStreamManagerHook == nil { return nil, errors.New("replication streaming requires a CCL binary") @@ -38,7 +38,7 @@ func GetReplicationStreamManager( // GetStreamIngestManager returns a StreamIngestManager if a CCL binary is loaded. func GetStreamIngestManager( - ctx context.Context, evalCtx *eval.Context, txn *kv.Txn, + ctx context.Context, evalCtx *eval.Context, txn isql.Txn, ) (eval.StreamIngestManager, error) { if GetReplicationStreamManagerHook == nil { return nil, errors.New("replication streaming requires a CCL binary") diff --git a/pkg/scheduledjobs/BUILD.bazel b/pkg/scheduledjobs/BUILD.bazel index 061d11d222e2..a0debd525841 100644 --- a/pkg/scheduledjobs/BUILD.bazel +++ b/pkg/scheduledjobs/BUILD.bazel @@ -12,7 +12,7 @@ go_library( "//pkg/kv/kvserver/protectedts", "//pkg/security/username", "//pkg/settings/cluster", - "//pkg/sql/sqlutil", + "//pkg/sql/isql", "//pkg/util/hlc", "//pkg/util/timeutil", ], diff --git a/pkg/scheduledjobs/env.go b/pkg/scheduledjobs/env.go index 6e76ef16b000..1126de83dfcd 100644 --- a/pkg/scheduledjobs/env.go +++ b/pkg/scheduledjobs/env.go @@ -19,7 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/timeutil" ) @@ -43,9 +43,8 @@ type JobSchedulerEnv interface { // JobExecutionConfig encapsulates external components needed for scheduled job execution. type JobExecutionConfig struct { - Settings *cluster.Settings - InternalExecutor sqlutil.InternalExecutor - DB *kv.DB + Settings *cluster.Settings + DB isql.DB // TestingKnobs is *jobs.TestingKnobs; however we cannot depend // on jobs package due to circular dependencies. TestingKnobs base.ModuleTestingKnobs @@ -89,31 +88,22 @@ func (e *prodJobSchedulerEnvImpl) IsExecutorEnabled(name string) bool { // ScheduleControllerEnv is an environment for controlling (DROP, PAUSE) // scheduled jobs. type ScheduleControllerEnv interface { - InternalExecutor() sqlutil.InternalExecutor - PTSProvider() protectedts.Provider + PTSProvider() protectedts.Storage } // ProdScheduleControllerEnvImpl is the production implementation of // ScheduleControllerEnv. type ProdScheduleControllerEnvImpl struct { - pts protectedts.Provider - ie sqlutil.InternalExecutor + pts protectedts.Storage } // MakeProdScheduleControllerEnv returns a ProdScheduleControllerEnvImpl // instance. -func MakeProdScheduleControllerEnv( - pts protectedts.Provider, ie sqlutil.InternalExecutor, -) *ProdScheduleControllerEnvImpl { - return &ProdScheduleControllerEnvImpl{pts: pts, ie: ie} -} - -// InternalExecutor implements the ScheduleControllerEnv interface. -func (c *ProdScheduleControllerEnvImpl) InternalExecutor() sqlutil.InternalExecutor { - return c.ie +func MakeProdScheduleControllerEnv(pts protectedts.Storage) *ProdScheduleControllerEnvImpl { + return &ProdScheduleControllerEnvImpl{pts: pts} } // PTSProvider implements the ScheduleControllerEnv interface. -func (c *ProdScheduleControllerEnvImpl) PTSProvider() protectedts.Provider { +func (c *ProdScheduleControllerEnvImpl) PTSProvider() protectedts.Storage { return c.pts } diff --git a/pkg/scheduledjobs/schedulebase/BUILD.bazel b/pkg/scheduledjobs/schedulebase/BUILD.bazel index b1da702c55e9..66b0f222daed 100644 --- a/pkg/scheduledjobs/schedulebase/BUILD.bazel +++ b/pkg/scheduledjobs/schedulebase/BUILD.bazel @@ -8,12 +8,12 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/jobs/jobspb", - "//pkg/kv", "//pkg/scheduledjobs", "//pkg/sql", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/descs", "//pkg/sql/catalog/resolver", + "//pkg/sql/isql", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/sem/tree", diff --git a/pkg/scheduledjobs/schedulebase/util.go b/pkg/scheduledjobs/schedulebase/util.go index 7c7ff9ce6b2b..a510830e4fe7 100644 --- a/pkg/scheduledjobs/schedulebase/util.go +++ b/pkg/scheduledjobs/schedulebase/util.go @@ -17,12 +17,12 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -75,7 +75,7 @@ func ComputeScheduleRecurrence(now time.Time, rec *string) (*ScheduleRecurrence, func CheckScheduleAlreadyExists( ctx context.Context, p sql.PlanHookState, scheduleLabel string, ) (bool, error) { - row, err := p.ExecCfg().InternalExecutor.QueryRowEx(ctx, "check-sched", + row, err := p.InternalSQLTxn().QueryRowEx(ctx, "check-sched", p.Txn(), sessiondata.RootUserSessionDataOverride, fmt.Sprintf("SELECT count(schedule_name) FROM %s WHERE schedule_name = '%s'", scheduledjobs.ProdJobSchedulerEnv.ScheduledJobsTableName(), scheduleLabel)) @@ -175,8 +175,9 @@ func FullyQualifyTables( } switch tp := tablePattern.(type) { case *tree.TableName: - if err := sql.DescsTxn(ctx, p.ExecCfg(), func(ctx context.Context, txn *kv.Txn, - col *descs.Collection) error { + if err := sql.DescsTxn(ctx, p.ExecCfg(), func( + ctx context.Context, txn isql.Txn, col *descs.Collection, + ) error { // Resolve the table. un := tp.ToUnresolvedObjectName() found, _, tableDesc, err := resolver.ResolveExisting( @@ -191,13 +192,13 @@ func FullyQualifyTables( } // Resolve the database. - dbDesc, err := col.ByIDWithLeased(txn).WithoutNonPublic().Get().Database(ctx, tableDesc.GetParentID()) + dbDesc, err := col.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Database(ctx, tableDesc.GetParentID()) if err != nil { return err } // Resolve the schema. - schemaDesc, err := col.ByIDWithLeased(txn).WithoutNonPublic().Get().Schema(ctx, tableDesc.GetParentSchemaID()) + schemaDesc, err := col.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Schema(ctx, tableDesc.GetParentSchemaID()) if err != nil { return err } @@ -221,12 +222,14 @@ func FullyQualifyTables( // Otherwise, no updates are needed since the schema field refers to the // database. var schemaID descpb.ID - if err := sql.DescsTxn(ctx, p.ExecCfg(), func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - dbDesc, err := col.ByNameWithLeased(txn).Get().Database(ctx, p.CurrentDatabase()) + if err := sql.DescsTxn(ctx, p.ExecCfg(), func( + ctx context.Context, txn isql.Txn, col *descs.Collection, + ) error { + dbDesc, err := col.ByNameWithLeased(txn.KV()).Get().Database(ctx, p.CurrentDatabase()) if err != nil { return err } - schemaID, err = col.LookupSchemaID(ctx, txn, dbDesc.GetID(), tp.SchemaName.String()) + schemaID, err = col.LookupSchemaID(ctx, txn.KV(), dbDesc.GetID(), tp.SchemaName.String()) return err }); err != nil { return nil, err diff --git a/pkg/server/BUILD.bazel b/pkg/server/BUILD.bazel index 7bfcb1f7045d..e5f3759b1822 100644 --- a/pkg/server/BUILD.bazel +++ b/pkg/server/BUILD.bazel @@ -113,7 +113,6 @@ go_library( "//pkg/kv/kvserver/loqrecovery", "//pkg/kv/kvserver/loqrecovery/loqrecoverypb", "//pkg/kv/kvserver/protectedts", - "//pkg/kv/kvserver/protectedts/ptpb", "//pkg/kv/kvserver/protectedts/ptprovider", "//pkg/kv/kvserver/protectedts/ptreconcile", "//pkg/kv/kvserver/rangefeed", @@ -189,6 +188,7 @@ go_library( "//pkg/sql/gcjob/gcjobnotifier", "//pkg/sql/idxusage", "//pkg/sql/importer", + "//pkg/sql/isql", "//pkg/sql/lexbase", "//pkg/sql/optionalnodeliveness", "//pkg/sql/parser", @@ -220,7 +220,6 @@ go_library( "//pkg/sql/sqlstats/insights", "//pkg/sql/sqlstats/persistedsqlstats", "//pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil", - "//pkg/sql/sqlutil", "//pkg/sql/stats", "//pkg/sql/stmtdiagnostics", "//pkg/sql/syntheticprivilege", diff --git a/pkg/server/admin.go b/pkg/server/admin.go index 68a60edf62dd..f35bead545e9 100644 --- a/pkg/server/admin.go +++ b/pkg/server/admin.go @@ -47,13 +47,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/optionalnodeliveness" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/roleoption" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/syntheticprivilege" "github.com/cockroachdb/cockroach/pkg/ts/catalog" "github.com/cockroachdb/cockroach/pkg/util/contextutil" @@ -392,7 +392,7 @@ func (s *adminServer) databasesHelper( } // We have to make sure to close the iterator since we might return from the // for loop early (before Next() returns false). - defer func(it sqlutil.InternalRows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) + defer func(it isql.Rows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) var resp serverpb.DatabasesResponse var hasNext bool @@ -480,7 +480,7 @@ func (s *adminServer) getDatabaseGrants( } // We have to make sure to close the iterator since we might return from the // for loop early (before Next() returns false). - defer func(it sqlutil.InternalRows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) + defer func(it isql.Rows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) { const ( userCol = "grantee" @@ -544,7 +544,7 @@ WHERE table_catalog = $ AND table_type != 'SYSTEM VIEW'`, req.Database) } // We have to make sure to close the iterator since we might return from the // for loop early (before Next() returns false). - defer func(it sqlutil.InternalRows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) + defer func(it isql.Rows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) { ok, err := it.Next(ctx) if err != nil { @@ -642,7 +642,9 @@ func (s *adminServer) databaseDetailsHelper( if err != nil { return nil, err } - dbIndexRecommendations, err := getDatabaseIndexRecommendations(ctx, req.Database, s.ie, s.st, s.sqlServer.execCfg) + dbIndexRecommendations, err := getDatabaseIndexRecommendations( + ctx, req.Database, s.ie, s.st, s.sqlServer.execCfg.UnusedIndexRecommendationsKnobs, + ) if err != nil { return nil, err } @@ -834,7 +836,7 @@ func (s *adminServer) tableDetailsHelper( } // We have to make sure to close the iterator since we might return from the // for loop early (before Next() returns false). - defer func(it sqlutil.InternalRows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) + defer func(it isql.Rows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) // TODO(cdo): protobuf v3's default behavior for fields with zero values (e.g. empty strings) // is to suppress them. So, if protobuf field "foo" is an empty string, "foo" won't show // up in the marshaled JSON. I feel that this is counterintuitive, and this should be fixed @@ -907,7 +909,7 @@ func (s *adminServer) tableDetailsHelper( } // We have to make sure to close the iterator since we might return from the // for loop early (before Next() returns false). - defer func(it sqlutil.InternalRows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) + defer func(it isql.Rows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) { const ( nameCol = "index_name" @@ -971,7 +973,7 @@ func (s *adminServer) tableDetailsHelper( } // We have to make sure to close the iterator since we might return from the // for loop early (before Next() returns false). - defer func(it sqlutil.InternalRows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) + defer func(it isql.Rows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) { const ( userCol = "grantee" @@ -1467,7 +1469,7 @@ func (s *adminServer) usersHelper( return nil, err } // We have to make sure to close the iterator. - defer func(it sqlutil.InternalRows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) + defer func(it isql.Rows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) var resp serverpb.UsersResponse var ok bool @@ -1558,7 +1560,7 @@ func (s *adminServer) eventsHelper( } // We have to make sure to close the iterator since we might return from the // for loop early (before Next() returns false). - defer func(it sqlutil.InternalRows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) + defer func(it isql.Rows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) // Marshal response. var resp serverpb.EventsResponse @@ -1695,7 +1697,7 @@ func (s *adminServer) rangeLogHelper( } // We have to make sure to close the iterator since we might return from the // for loop early (before Next() returns false). - defer func(it sqlutil.InternalRows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) + defer func(it isql.Rows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) // Marshal response. var resp serverpb.RangeLogResponse @@ -1891,7 +1893,7 @@ func (s *adminServer) SetUIData( for key, val := range req.KeyValues { // Do an upsert of the key. We update each key in a separate transaction to // avoid long-running transactions and possible deadlocks. - ie := s.sqlServer.internalExecutorFactory.MakeInternalExecutorWithoutTxn() + ie := s.sqlServer.internalDB.Executor() query := `UPSERT INTO system.ui (key, value, "lastUpdated") VALUES ($1, $2, now())` rowsAffected, err := ie.ExecEx( ctx, "admin-set-ui-data", nil, /* txn */ @@ -2300,7 +2302,7 @@ func jobsHelper( } // We have to make sure to close the iterator since we might return from the // for loop early (before Next() returns false). - defer func(it sqlutil.InternalRows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) + defer func(it isql.Rows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) ok, err := it.Next(ctx) if err != nil { @@ -2507,7 +2509,7 @@ func (s *adminServer) locationsHelper( } // We have to make sure to close the iterator since we might return from the // for loop early (before Next() returns false). - defer func(it sqlutil.InternalRows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) + defer func(it isql.Rows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) ok, err := it.Next(ctx) if err != nil { @@ -2868,7 +2870,7 @@ func (s *adminServer) dataDistributionHelper( } // We have to make sure to close the iterator since we might return from the // for loop early (before Next() returns false). - defer func(it sqlutil.InternalRows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) + defer func(it isql.Rows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) // Used later when we're scanning Meta2 and only have IDs, not names. tableInfosByTableID := map[uint32]serverpb.DataDistributionResponse_TableInfo{} @@ -3000,7 +3002,7 @@ func (s *adminServer) dataDistributionHelper( } // We have to make sure to close the iterator since we might return from the // for loop early (before Next() returns false). - defer func(it sqlutil.InternalRows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) + defer func(it isql.Rows) { retErr = errors.CombineErrors(retErr, it.Close()) }(it) for hasNext, err = it.Next(ctx); hasNext; hasNext, err = it.Next(ctx) { row := it.Cur() @@ -3541,7 +3543,7 @@ func (rs resultScanner) Scan(row tree.Datums, colName string, dst interface{}) e // TODO(mrtracy): The following methods, used to look up the zone configuration // for a database or table, use the same algorithm as a set of methods in // cli/zone.go for the same purpose. However, as that code connects to the -// server with a SQL connections, while this code uses the InternalExecutor, the +// server with a SQL connections, while this code uses the Executor, the // code cannot be commonized. // // queryZone retrieves the specific ZoneConfig associated with the supplied ID, @@ -3679,7 +3681,7 @@ func (s *adminServer) dialNode( // adminPrivilegeChecker is a helper struct to check whether given usernames // have admin privileges. type adminPrivilegeChecker struct { - ie *sql.InternalExecutor + ie isql.Executor st *cluster.Settings // makePlanner is a function that calls NewInternalPlanner // to make a planner outside of the sql package. This is a hack diff --git a/pkg/server/api_v2_sql.go b/pkg/server/api_v2_sql.go index 6929e66c66d5..0de234773dc6 100644 --- a/pkg/server/api_v2_sql.go +++ b/pkg/server/api_v2_sql.go @@ -19,14 +19,14 @@ import ( "net/http" "time" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" "github.com/cockroachdb/cockroach/pkg/util/contextutil" "github.com/cockroachdb/cockroach/pkg/util/json" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -361,40 +361,16 @@ func (a *apiV2Server) execSQL(w http.ResponseWriter, r *http.Request) { // The SQL username that owns this session. username := getSQLUsername(ctx) - // runner is the function that will execute all the statements as a group. - // If there's just one statement, we execute them with an implicit, - // auto-commit transaction. - - type ( - txnFunc = func(context.Context, *kv.Txn, sqlutil.InternalExecutor) error - runnerFunc = func(ctx context.Context, fn txnFunc) error - ) - var runner runnerFunc - if len(requestPayload.Statements) > 1 { - // We need a transaction to group the statements together. - // We use TxnWithSteppingEnabled here even though we don't - // use stepping below, because that buys us admission control. - ief := a.sqlServer.internalExecutorFactory - runner = func(ctx context.Context, fn txnFunc) error { - return ief.TxnWithExecutor(ctx, a.db, nil /* sessionData */, func( - ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, - ) error { - return fn(ctx, txn, ie) - }, sqlutil.SteppingEnabled()) - } - } else { - runner = func(ctx context.Context, fn func(context.Context, *kv.Txn, sqlutil.InternalExecutor) error) error { - return fn(ctx, nil, a.sqlServer.internalExecutor) - } + options := []isql.TxnOption{ + isql.WithPriority(admissionpb.NormalPri), } - result.Execution = &execResult{} result.Execution.TxnResults = make([]txnResult, 0, len(requestPayload.Statements)) err = contextutil.RunWithTimeout(ctx, "run-sql-via-api", timeout, func(ctx context.Context) error { retryNum := 0 - return runner(ctx, func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor) error { + return a.sqlServer.internalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { result.Execution.TxnResults = result.Execution.TxnResults[:0] result.Execution.Retries = retryNum retryNum++ @@ -431,7 +407,7 @@ func (a *apiV2Server) execSQL(w http.ResponseWriter, r *http.Request) { } }() - it, err := ie.QueryIteratorEx(ctx, "run-query-via-api", txn, + it, err := txn.QueryIteratorEx(ctx, "run-query-via-api", txn.KV(), sessiondata.InternalExecutorOverride{ User: username, Database: requestPayload.Database, @@ -443,7 +419,7 @@ func (a *apiV2Server) execSQL(w http.ResponseWriter, r *http.Request) { } // We have to make sure to close the iterator since we might return from the // for loop early (before Next() returns false). - defer func(it sqlutil.InternalRows) { + defer func(it isql.Rows) { if returnType == tree.RowsAffected || (returnType != tree.Rows && it.RowsAffected() > 0) { txnRes.RowsAffected = it.RowsAffected() } @@ -472,7 +448,7 @@ func (a *apiV2Server) execSQL(w http.ResponseWriter, r *http.Request) { } } return nil - }) + }, options...) }) if err != nil { result.Error = &jsonError{err} diff --git a/pkg/server/authentication.go b/pkg/server/authentication.go index 2e84b8712da2..f3ef7a5423bc 100644 --- a/pkg/server/authentication.go +++ b/pkg/server/authentication.go @@ -267,7 +267,6 @@ func (s *authenticationServer) UserLoginFromSSO( exists, _, canLoginDBConsole, _, _, _, err := sql.GetUserSessionInitInfo( ctx, s.sqlServer.execCfg, - s.sqlServer.execCfg.InternalExecutor, username, "", /* databaseName */ ) @@ -432,7 +431,6 @@ func (s *authenticationServer) verifyPasswordDBConsole( exists, _, canLoginDBConsole, _, _, pwRetrieveFn, err := sql.GetUserSessionInitInfo( ctx, s.sqlServer.execCfg, - s.sqlServer.execCfg.InternalExecutor, userName, "", /* databaseName */ ) diff --git a/pkg/server/external_storage_builder.go b/pkg/server/external_storage_builder.go index dfe700703936..41fe0e8d2bf0 100644 --- a/pkg/server/external_storage_builder.go +++ b/pkg/server/external_storage_builder.go @@ -17,14 +17,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/blobs" "github.com/cockroachdb/cockroach/pkg/cloud" "github.com/cockroachdb/cockroach/pkg/cloud/cloudpb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/multitenant" "github.com/cockroachdb/cockroach/pkg/multitenant/multitenantio" "github.com/cockroachdb/cockroach/pkg/rpc/nodedialer" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/errors" ) @@ -39,9 +37,7 @@ type externalStorageBuilder struct { settings *cluster.Settings blobClientFactory blobs.BlobClientFactory initCalled bool - ie *sql.InternalExecutor - ief sqlutil.InternalExecutorFactory - db *kv.DB + db isql.DB limiters cloud.Limiters recorder multitenant.TenantSideExternalIORecorder metrics metric.Struct @@ -54,9 +50,7 @@ func (e *externalStorageBuilder) init( nodeIDContainer *base.NodeIDContainer, nodeDialer *nodedialer.Dialer, testingKnobs base.TestingKnobs, - ie *sql.InternalExecutor, - ief sqlutil.InternalExecutorFactory, - db *kv.DB, + db isql.DB, recorder multitenant.TenantSideExternalIORecorder, registry *metric.Registry, ) { @@ -71,8 +65,6 @@ func (e *externalStorageBuilder) init( e.settings = settings e.blobClientFactory = blobClientFactory e.initCalled = true - e.ie = ie - e.ief = ief e.db = db e.limiters = cloud.MakeLimiters(ctx, &settings.SV) e.recorder = recorder @@ -89,8 +81,10 @@ func (e *externalStorageBuilder) makeExternalStorage( if !e.initCalled { return nil, errors.New("cannot create external storage before init") } - return cloud.MakeExternalStorage(ctx, dest, e.conf, e.settings, e.blobClientFactory, e.ie, e.ief, - e.db, e.limiters, e.metrics, append(e.defaultOptions(), opts...)...) + return cloud.MakeExternalStorage( + ctx, dest, e.conf, e.settings, e.blobClientFactory, e.db, e.limiters, e.metrics, + append(e.defaultOptions(), opts...)..., + ) } func (e *externalStorageBuilder) makeExternalStorageFromURI( @@ -99,8 +93,10 @@ func (e *externalStorageBuilder) makeExternalStorageFromURI( if !e.initCalled { return nil, errors.New("cannot create external storage before init") } - return cloud.ExternalStorageFromURI(ctx, uri, e.conf, e.settings, e.blobClientFactory, - user, e.ie, e.ief, e.db, e.limiters, e.metrics, append(e.defaultOptions(), opts...)...) + return cloud.ExternalStorageFromURI( + ctx, uri, e.conf, e.settings, e.blobClientFactory, user, e.db, e.limiters, e.metrics, + append(e.defaultOptions(), opts...)..., + ) } func (e *externalStorageBuilder) defaultOptions() []cloud.ExternalStorageOption { diff --git a/pkg/server/index_usage_stats.go b/pkg/server/index_usage_stats.go index 9a7501a3d833..0845ad1e639f 100644 --- a/pkg/server/index_usage_stats.go +++ b/pkg/server/index_usage_stats.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/idxusage" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" @@ -223,7 +224,7 @@ func getTableIndexUsageStats( ctx context.Context, req *serverpb.TableIndexStatsRequest, idxUsageStatsProvider *idxusage.LocalIndexUsageStats, - ie *sql.InternalExecutor, + ie isql.Executor, st *cluster.Settings, execConfig *sql.ExecutorConfig, ) (*serverpb.TableIndexStatsResponse, error) { @@ -346,7 +347,7 @@ func getTableIDFromDatabaseAndTableName( ctx context.Context, database string, table string, - ie *sql.InternalExecutor, + ie isql.Executor, userName username.SQLUsername, ) (int, error) { // Fully qualified table name is either database.table or database.schema.table @@ -373,9 +374,9 @@ func getTableIDFromDatabaseAndTableName( func getDatabaseIndexRecommendations( ctx context.Context, dbName string, - ie *sql.InternalExecutor, + ie isql.Executor, st *cluster.Settings, - execConfig *sql.ExecutorConfig, + knobs *idxusage.UnusedIndexRecommendationTestingKnobs, ) ([]*serverpb.IndexRecommendation, error) { // Omit fetching index recommendations for the 'system' database. @@ -442,7 +443,7 @@ func getDatabaseIndexRecommendations( CreatedAt: createdAt, LastRead: lastRead, IndexType: string(indexType), - UnusedIndexKnobs: execConfig.UnusedIndexRecommendationsKnobs, + UnusedIndexKnobs: knobs, } recommendations := statsRow.GetRecommendationsFromIndexStats(dbName, st) idxRecommendations = append(idxRecommendations, recommendations...) diff --git a/pkg/server/loss_of_quorum.go b/pkg/server/loss_of_quorum.go index 8d8f336a97d0..c9809893f71d 100644 --- a/pkg/server/loss_of_quorum.go +++ b/pkg/server/loss_of_quorum.go @@ -16,8 +16,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/loqrecovery/loqrecoverypb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/errors" @@ -50,7 +50,7 @@ func logPendingLossOfQuorumRecoveryEvents(ctx context.Context, stores *kvserver. } func publishPendingLossOfQuorumRecoveryEvents( - ctx context.Context, ie sqlutil.InternalExecutor, stores *kvserver.Stores, stopper *stop.Stopper, + ctx context.Context, ie isql.Executor, stores *kvserver.Stores, stopper *stop.Stopper, ) { _ = stopper.RunAsyncTask(ctx, "publish-loss-of-quorum-events", func(ctx context.Context) { if err := stores.VisitStores(func(s *kvserver.Store) error { diff --git a/pkg/server/node.go b/pkg/server/node.go index 0dda9a6a1eec..0dcfe1b5001f 100644 --- a/pkg/server/node.go +++ b/pkg/server/node.go @@ -43,7 +43,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/bootstrap" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/admission" @@ -399,7 +399,7 @@ func NewNode( return n } -// InitLogger connects the Node to the InternalExecutor to be used for event +// InitLogger connects the Node to the Executor to be used for event // logging. func (n *Node) InitLogger(execCfg *sql.ExecutorConfig) { n.execCfg = execCfg @@ -1803,7 +1803,7 @@ func (n *Node) TokenBucket( var NewTenantUsageServer = func( settings *cluster.Settings, db *kv.DB, - ief sqlutil.InternalExecutorFactory, + ief isql.DB, ) multitenant.TenantUsageServer { return dummyTenantUsageServer{} } @@ -1824,8 +1824,7 @@ func (dummyTenantUsageServer) TokenBucketRequest( // ReconfigureTokenBucket is defined in the TenantUsageServer interface. func (dummyTenantUsageServer) ReconfigureTokenBucket( ctx context.Context, - txn *kv.Txn, - ie sqlutil.InternalExecutor, + txn isql.Txn, tenantID roachpb.TenantID, availableRU float64, refillRate float64, diff --git a/pkg/server/server.go b/pkg/server/server.go index 89a9ecbfe656..cdd00cb313fb 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -501,13 +501,13 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) { ctSender := sidetransport.NewSender(stopper, st, clock, nodeDialer) ctReceiver := sidetransport.NewReceiver(nodeIDContainer, stopper, stores, nil /* testingKnobs */) - // The InternalExecutor will be further initialized later, as we create more + // The Executor will be further initialized later, as we create more // of the server's components. There's a circular dependency - many things - // need an InternalExecutor, but the InternalExecutor needs an executorConfig, + // need an Executor, but the Executor needs an executorConfig, // which in turn needs many things. That's why everybody that needs an - // InternalExecutor uses this one instance. + // Executor uses this one instance. internalExecutor := &sql.InternalExecutor{} - internalExecutorFactory := &sql.InternalExecutorFactory{} + insqlDB := sql.NewShimInternalDB(db) jobRegistry := &jobs.Registry{} // ditto // Create an ExternalStorageBuilder. This is only usable after Start() where @@ -518,15 +518,16 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) { protectedtsKnobs, _ := cfg.TestingKnobs.ProtectedTS.(*protectedts.TestingKnobs) protectedtsProvider, err := ptprovider.New(ptprovider.Config{ - DB: db, - InternalExecutor: internalExecutor, - Settings: st, - Knobs: protectedtsKnobs, + DB: insqlDB, + Settings: st, + Knobs: protectedtsKnobs, ReconcileStatusFuncs: ptreconcile.StatusFuncs{ jobsprotectedts.GetMetaType(jobsprotectedts.Jobs): jobsprotectedts.MakeStatusFunc( - jobRegistry, internalExecutor, jobsprotectedts.Jobs), - jobsprotectedts.GetMetaType(jobsprotectedts.Schedules): jobsprotectedts.MakeStatusFunc(jobRegistry, - internalExecutor, jobsprotectedts.Schedules), + jobRegistry, jobsprotectedts.Jobs, + ), + jobsprotectedts.GetMetaType(jobsprotectedts.Schedules): jobsprotectedts.MakeStatusFunc( + jobRegistry, jobsprotectedts.Schedules, + ), }, }) if err != nil { @@ -737,7 +738,7 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) { updates.TestingKnobs = &cfg.TestingKnobs.Server.(*TestingKnobs).DiagnosticsTestingKnobs } - tenantUsage := NewTenantUsageServer(st, db, internalExecutorFactory) + tenantUsage := NewTenantUsageServer(st, db, insqlDB) registry.AddMetricStruct(tenantUsage.Metrics()) tenantSettingsWatcher := tenantsettingswatcher.New( @@ -923,7 +924,7 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) { closedSessionCache: closedSessionCache, remoteFlowRunner: remoteFlowRunner, circularInternalExecutor: internalExecutor, - internalExecutorFactory: internalExecutorFactory, + internalDB: insqlDB, circularJobRegistry: jobRegistry, protectedtsProvider: protectedtsProvider, rangeFeedFactory: rangeFeedFactory, @@ -1210,7 +1211,6 @@ func (s *Server) PreStart(ctx context.Context) error { ieMon := sql.MakeInternalExecutorMemMonitor(sql.MemoryMetrics{}, s.ClusterSettings()) ieMon.StartNoReserved(ctx, s.PGServer().SQLServer.GetBytesMonitor()) s.stopper.AddCloser(stop.CloserFn(func() { ieMon.Stop(ctx) })) - fileTableInternalExecutor := sql.MakeInternalExecutor(s.PGServer().SQLServer, sql.MemoryMetrics{}, ieMon) s.externalStorageBuilder.init( ctx, s.cfg.ExternalIODirConfig, @@ -1218,9 +1218,7 @@ func (s *Server) PreStart(ctx context.Context) error { s.nodeIDContainer, s.nodeDialer, s.cfg.TestingKnobs, - &fileTableInternalExecutor, - s.sqlServer.execCfg.InternalExecutorFactory, - s.db, + s.sqlServer.execCfg.InternalDB.CloneWithMemoryMonitor(sql.MemoryMetrics{}, ieMon), nil, /* TenantExternalIORecorder */ s.registry, ) @@ -1834,7 +1832,10 @@ func (s *Server) PreStart(ctx context.Context) error { // startup fails, and write to range log once the server is running as we need // to run sql statements to update rangelog. publishPendingLossOfQuorumRecoveryEvents( - workersCtx, s.node.execCfg.InternalExecutor, s.node.stores, s.stopper, + workersCtx, + s.node.execCfg.InternalDB.Executor(), + s.node.stores, + s.stopper, ) log.Event(ctx, "server initialized") diff --git a/pkg/server/server_internal_executor_factory_test.go b/pkg/server/server_internal_executor_factory_test.go index 38b6a9afd5c3..3e4c32580d8f 100644 --- a/pkg/server/server_internal_executor_factory_test.go +++ b/pkg/server/server_internal_executor_factory_test.go @@ -31,8 +31,8 @@ func TestInternalExecutorClearsMonitorMemory(t *testing.T) { s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) - mon := s.(*TestServer).sqlServer.internalExecutorFactoryMemMonitor - ief := s.ExecutorConfig().(sql.ExecutorConfig).InternalExecutorFactory + mon := s.(*TestServer).sqlServer.internalDBMemMonitor + ief := s.ExecutorConfig().(sql.ExecutorConfig).InternalDB sessionData := sql.NewFakeSessionData(&s.ClusterSettings().SV) ie := ief.NewInternalExecutor(sessionData) rows, err := ie.QueryIteratorEx(ctx, "test", nil, sessiondata.NodeUserSessionDataOverride, `SELECT 1`) diff --git a/pkg/server/server_sql.go b/pkg/server/server_sql.go index c501e13bb4bd..5a6bf9e87dc5 100644 --- a/pkg/server/server_sql.go +++ b/pkg/server/server_sql.go @@ -134,20 +134,20 @@ import ( // standalone SQLServer instances per tenant (the KV layer is shared across all // tenants). type SQLServer struct { - ambientCtx log.AmbientContext - stopper *stop.Stopper - stopTrigger *stopTrigger - sqlIDContainer *base.SQLIDContainer - pgServer *pgwire.Server - distSQLServer *distsql.ServerImpl - execCfg *sql.ExecutorConfig - cfg *BaseConfig - internalExecutor *sql.InternalExecutor - internalExecutorFactory descs.TxnManager - leaseMgr *lease.Manager - blobService *blobs.Service - tracingService *service.Service - tenantConnect kvtenant.Connector + ambientCtx log.AmbientContext + stopper *stop.Stopper + stopTrigger *stopTrigger + sqlIDContainer *base.SQLIDContainer + pgServer *pgwire.Server + distSQLServer *distsql.ServerImpl + execCfg *sql.ExecutorConfig + cfg *BaseConfig + internalExecutor *sql.InternalExecutor + internalDB descs.DB + leaseMgr *lease.Manager + blobService *blobs.Service + tracingService *service.Service + tenantConnect kvtenant.Connector // sessionRegistry can be queried for info on running SQL sessions. It is // shared between the sql.Server and the statusServer. sessionRegistry *sql.SessionRegistry @@ -183,11 +183,11 @@ type SQLServer struct { // This is set to true when the server has started accepting client conns. isReady syncutil.AtomicBool - // internalExecutorFactoryMemMonitor is the memory monitor corresponding to the - // InternalExecutorFactory singleton. It only gets closed when - // Server is closed. Every InternalExecutor created via the factory + // internalDBMemMonitor is the memory monitor corresponding to the + // InternalDB singleton. It only gets closed when + // Server is closed. Every Executor created via the factory // uses this memory monitor. - internalExecutorFactoryMemMonitor *mon.BytesMonitor + internalDBMemMonitor *mon.BytesMonitor // upgradeManager deals with cluster version upgrades on bootstrap and on // `set cluster setting version = `. @@ -306,10 +306,14 @@ type sqlServerArgs struct { // struct in this configuration, which newSQLServer fills. // // TODO(tbg): make this less hacky. + // TODO(ajwerner): Replace this entirely with the internalDB which follows. + // it is no less hacky, but at least it removes some redundancy. In some ways + // the internalDB is worse: the Executor() method cannot be used during server + // startup while the internalDB is partially initialized. circularInternalExecutor *sql.InternalExecutor // empty initially - // internalExecutorFactory is to initialize an internal executor. - internalExecutorFactory *sql.InternalExecutorFactory + // internalDB is to initialize an internal executor. + internalDB *sql.InternalDB // Stores and deletes expired liveness sessions. sqlLivenessProvider sqlliveness.Provider @@ -542,9 +546,7 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { cfg.AmbientCtx, cfg.stopper, cfg.clock, - cfg.db, - cfg.circularInternalExecutor, - cfg.internalExecutorFactory, + cfg.internalDB, cfg.rpcContext.LogicalClusterID, cfg.nodeIDContainer, cfg.sqlLivenessProvider, @@ -571,9 +573,8 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { leaseMgr := lease.NewLeaseManager( cfg.AmbientCtx, cfg.nodeIDContainer, - cfg.db, + cfg.internalDB, cfg.clock, - cfg.circularInternalExecutor, cfg.Settings, codec, lmKnobs, @@ -701,8 +702,7 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { NodeID: cfg.nodeIDContainer, Locality: cfg.Locality, Codec: codec, - DB: cfg.db, - Executor: cfg.circularInternalExecutor, + DB: cfg.internalDB, RPCContext: cfg.rpcContext, Stopper: cfg.stopper, @@ -902,7 +902,7 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { SyntheticPrivilegeCache: syntheticprivilegecache.New( cfg.Settings, cfg.stopper, cfg.db, serverCacheMemoryMonitor.MakeBoundAccount(), - virtualSchemas, cfg.internalExecutorFactory, + virtualSchemas, cfg.internalDB, ), DistSQLPlanner: sql.NewDistSQLPlanner( ctx, @@ -925,10 +925,8 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { TableStatsCache: stats.NewTableStatisticsCache( cfg.TableStatCacheSize, - cfg.db, - cfg.circularInternalExecutor, cfg.Settings, - cfg.internalExecutorFactory, + cfg.internalDB, ), QueryCache: querycache.New(cfg.QueryCacheSize), @@ -939,7 +937,7 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { GCJobNotifier: gcJobNotifier, RangeFeedFactory: cfg.rangeFeedFactory, CollectionFactory: collectionFactory, - SystemTableIDResolver: descs.MakeSystemTableIDResolver(collectionFactory, cfg.internalExecutorFactory, cfg.db), + SystemTableIDResolver: descs.MakeSystemTableIDResolver(collectionFactory, cfg.internalDB), ConsistencyChecker: consistencychecker.NewConsistencyChecker(cfg.db), RangeProber: rangeprober.NewRangeProber(cfg.db), DescIDGenerator: descidgen.NewGenerator(cfg.Settings, codec, cfg.db), @@ -1052,15 +1050,15 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { distSQLServer.ServerConfig.SchemaTelemetryController = pgServer.SQLServer.GetSchemaTelemetryController() distSQLServer.ServerConfig.IndexUsageStatsController = pgServer.SQLServer.GetIndexUsageStatsController() - // We use one BytesMonitor for all InternalExecutor's created by the - // ieFactory. - // Note that ieFactoryMonitor does not have to be closed, the parent - // monitor comes from server. ieFactoryMonitor is a singleton attached + // We use one BytesMonitor for all Executor's created by the + // internalDB. + // Note that internalDBMonitor does not have to be closed, the parent + // monitor comes from server. internalDBMonitor is a singleton attached // to server, if server is closed, we don't have to worry about - // returning the memory allocated to ieFactoryMonitor since the + // returning the memory allocated to internalDBMonitor since the // parent monitor is being closed anyway. - ieFactoryMonitor := mon.NewMonitor( - "internal executor factory", + internalDBMonitor := mon.NewMonitor( + "internal sql executor", mon.MemoryResource, internalMemMetrics.CurBytesCount, internalMemMetrics.MaxBytesHist, @@ -1068,23 +1066,23 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { math.MaxInt64, /* noteworthy */ cfg.Settings, ) - ieFactoryMonitor.StartNoReserved(ctx, pgServer.SQLServer.GetBytesMonitor()) + internalDBMonitor.StartNoReserved(ctx, pgServer.SQLServer.GetBytesMonitor()) // Now that we have a pgwire.Server (which has a sql.Server), we can close a // circular dependency between the rowexec.Server and sql.Server and set - // InternalExecutorFactory. The same applies for setting a + // InternalDB. The same applies for setting a // SessionBoundInternalExecutor on the job registry. - ieFactory := sql.NewInternalExecutorFactory( + internalDB := sql.NewInternalDB( pgServer.SQLServer, internalMemMetrics, - ieFactoryMonitor, + internalDBMonitor, ) - - distSQLServer.ServerConfig.InternalExecutorFactory = ieFactory - jobRegistry.SetInternalExecutorFactory(ieFactory) + *cfg.internalDB = *internalDB + execCfg.InternalDB = internalDB + jobRegistry.SetInternalDB(internalDB) execCfg.IndexBackfiller = sql.NewIndexBackfiller(execCfg) execCfg.IndexMerger = sql.NewIndexBackfillerMergePlanner(execCfg) execCfg.ProtectedTimestampManager = jobsprotectedts.NewManager( - execCfg.DB, + execCfg.InternalDB, execCfg.Codec, execCfg.ProtectedTimestampProvider, execCfg.SystemConfig, @@ -1094,27 +1092,23 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { execCfg.DB, execCfg.Codec, execCfg.Settings, - ieFactory, + internalDB, execCfg.ProtectedTimestampManager, sql.ValidateForwardIndexes, sql.ValidateInvertedIndexes, sql.ValidateConstraint, sql.NewFakeSessionData, ) - execCfg.InternalExecutorFactory = ieFactory distSQLServer.ServerConfig.ProtectedTimestampProvider = execCfg.ProtectedTimestampProvider for _, m := range pgServer.Metrics() { cfg.registry.AddMetricStruct(m) } - *cfg.circularInternalExecutor = sql.MakeInternalExecutor(pgServer.SQLServer, internalMemMetrics, ieFactoryMonitor) - *cfg.internalExecutorFactory = *ieFactory - execCfg.InternalExecutor = cfg.circularInternalExecutor + *cfg.circularInternalExecutor = sql.MakeInternalExecutor(pgServer.SQLServer, internalMemMetrics, internalDBMonitor) stmtDiagnosticsRegistry := stmtdiagnostics.NewRegistry( - cfg.circularInternalExecutor, - cfg.db, + cfg.internalDB, cfg.Settings, ) execCfg.StmtDiagnosticsRecorder = stmtDiagnosticsRegistry @@ -1134,24 +1128,22 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { DB: cfg.db, }) systemDeps = upgrade.SystemDeps{ - Cluster: c, - DB: cfg.db, - InternalExecutor: cfg.circularInternalExecutor, - DistSender: cfg.distSender, - Stopper: cfg.stopper, + Cluster: c, + DB: cfg.internalDB, + DistSender: cfg.distSender, + Stopper: cfg.stopper, } } else { c = upgradecluster.NewTenantCluster(cfg.db) systemDeps = upgrade.SystemDeps{ - Cluster: c, - DB: cfg.db, - InternalExecutor: cfg.circularInternalExecutor, + Cluster: c, + DB: cfg.internalDB, } } knobs, _ := cfg.TestingKnobs.UpgradeManager.(*upgradebase.TestingKnobs) upgradeMgr = upgrademanager.NewManager( - systemDeps, leaseMgr, cfg.circularInternalExecutor, cfg.internalExecutorFactory, jobRegistry, codec, + systemDeps, leaseMgr, cfg.circularInternalExecutor, jobRegistry, codec, cfg.Settings, clusterIDForSQL.Get(), knobs, ) execCfg.UpgradeJobDeps = upgradeMgr @@ -1187,9 +1179,8 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { spanConfigKnobs, ) spanConfig.manager = spanconfigmanager.New( - cfg.db, + cfg.internalDB, jobRegistry, - cfg.circularInternalExecutor, cfg.stopper, cfg.Settings, spanConfigReconciler, @@ -1208,14 +1199,12 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { } temporaryObjectCleaner := sql.NewTemporaryObjectCleaner( cfg.Settings, - cfg.db, + cfg.internalDB, codec, cfg.registry, cfg.sqlStatusServer, cfg.isMeta1Leaseholder, sqlExecutorTestingKnobs, - ieFactory, - collectionFactory, waitForInstanceReaderStarted, ) @@ -1270,41 +1259,41 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*SQLServer, error) { } return &SQLServer{ - ambientCtx: cfg.BaseConfig.AmbientCtx, - stopper: cfg.stopper, - stopTrigger: cfg.stopTrigger, - sqlIDContainer: cfg.nodeIDContainer, - pgServer: pgServer, - distSQLServer: distSQLServer, - execCfg: execCfg, - internalExecutor: cfg.circularInternalExecutor, - internalExecutorFactory: cfg.internalExecutorFactory, - leaseMgr: leaseMgr, - blobService: blobService, - tracingService: tracingService, - tenantConnect: cfg.tenantConnect, - sessionRegistry: cfg.sessionRegistry, - closedSessionCache: cfg.closedSessionCache, - jobRegistry: jobRegistry, - statsRefresher: statsRefresher, - temporaryObjectCleaner: temporaryObjectCleaner, - internalMemMetrics: internalMemMetrics, - sqlMemMetrics: sqlMemMetrics, - stmtDiagnosticsRegistry: stmtDiagnosticsRegistry, - sqlLivenessProvider: cfg.sqlLivenessProvider, - sqlInstanceStorage: cfg.sqlInstanceStorage, - sqlInstanceReader: cfg.sqlInstanceReader, - metricsRegistry: cfg.registry, - diagnosticsReporter: reporter, - spanconfigMgr: spanConfig.manager, - spanconfigSQLTranslatorFactory: spanConfig.sqlTranslatorFactory, - spanconfigSQLWatcher: spanConfig.sqlWatcher, - settingsWatcher: settingsWatcher, - systemConfigWatcher: cfg.systemConfigWatcher, - isMeta1Leaseholder: cfg.isMeta1Leaseholder, - cfg: cfg.BaseConfig, - internalExecutorFactoryMemMonitor: ieFactoryMonitor, - upgradeManager: upgradeMgr, + ambientCtx: cfg.BaseConfig.AmbientCtx, + stopper: cfg.stopper, + stopTrigger: cfg.stopTrigger, + sqlIDContainer: cfg.nodeIDContainer, + pgServer: pgServer, + distSQLServer: distSQLServer, + execCfg: execCfg, + internalExecutor: cfg.circularInternalExecutor, + internalDB: cfg.internalDB, + leaseMgr: leaseMgr, + blobService: blobService, + tracingService: tracingService, + tenantConnect: cfg.tenantConnect, + sessionRegistry: cfg.sessionRegistry, + closedSessionCache: cfg.closedSessionCache, + jobRegistry: jobRegistry, + statsRefresher: statsRefresher, + temporaryObjectCleaner: temporaryObjectCleaner, + internalMemMetrics: internalMemMetrics, + sqlMemMetrics: sqlMemMetrics, + stmtDiagnosticsRegistry: stmtDiagnosticsRegistry, + sqlLivenessProvider: cfg.sqlLivenessProvider, + sqlInstanceStorage: cfg.sqlInstanceStorage, + sqlInstanceReader: cfg.sqlInstanceReader, + metricsRegistry: cfg.registry, + diagnosticsReporter: reporter, + spanconfigMgr: spanConfig.manager, + spanconfigSQLTranslatorFactory: spanConfig.sqlTranslatorFactory, + spanconfigSQLWatcher: spanConfig.sqlWatcher, + settingsWatcher: settingsWatcher, + systemConfigWatcher: cfg.systemConfigWatcher, + isMeta1Leaseholder: cfg.isMeta1Leaseholder, + cfg: cfg.BaseConfig, + internalDBMemMonitor: internalDBMonitor, + upgradeManager: upgradeMgr, }, nil } @@ -1339,7 +1328,8 @@ func (s *SQLServer) preStart( // This also serves as a simple check to see if a tenant exist (i.e. by // checking whether the system db has been bootstrapped). regionPhysicalRep, err := sql.GetLocalityRegionEnumPhysicalRepresentation( - ctx, s.internalExecutorFactory, s.execCfg.DB, keys.SystemDatabaseID, s.distSQLServer.Locality) + ctx, s.internalDB, keys.SystemDatabaseID, s.distSQLServer.Locality, + ) if err != nil && !errors.Is(err, sql.ErrNotMultiRegionDatabase) { return err } @@ -1359,7 +1349,7 @@ func (s *SQLServer) preStart( } // Start instance ID reclaim loop. if err := s.sqlInstanceStorage.RunInstanceIDReclaimLoop( - ctx, stopper, timeutil.DefaultTimeSource{}, s.internalExecutorFactory, session.Expiration, + ctx, stopper, timeutil.DefaultTimeSource{}, s.internalDB, session.Expiration, ); err != nil { return err } @@ -1482,10 +1472,9 @@ func (s *SQLServer) preStart( stopper, s.metricsRegistry, &scheduledjobs.JobExecutionConfig{ - Settings: s.execCfg.Settings, - InternalExecutor: s.internalExecutor, - DB: s.execCfg.DB, - TestingKnobs: knobs.JobsTestingKnobs, + Settings: s.execCfg.Settings, + DB: s.execCfg.InternalDB, + TestingKnobs: knobs.JobsTestingKnobs, PlanHookMaker: func(opName string, txn *kv.Txn, user username.SQLUsername) (interface{}, func()) { // This is a hack to get around a Go package dependency cycle. See comment // in sql/jobs/registry.go on planHookMaker. @@ -1502,7 +1491,10 @@ func (s *SQLServer) preStart( scheduledjobs.ProdJobSchedulerEnv, ) - scheduledlogging.Start(ctx, stopper, s.execCfg.DB, s.execCfg.Settings, s.internalExecutor, s.execCfg.CaptureIndexUsageStatsKnobs) + scheduledlogging.Start( + ctx, stopper, s.execCfg.InternalDB, s.execCfg.Settings, + s.execCfg.CaptureIndexUsageStatsKnobs, + ) s.execCfg.SyntheticPrivilegeCache.Start(ctx) return nil } diff --git a/pkg/server/status.go b/pkg/server/status.go index 2b9d6b69688c..0f6e5f3579d3 100644 --- a/pkg/server/status.go +++ b/pkg/server/status.go @@ -63,7 +63,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" "github.com/cockroachdb/cockroach/pkg/sql/sqlinstance" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/insights" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/contextutil" "github.com/cockroachdb/cockroach/pkg/util/grpcutil" @@ -2523,9 +2522,10 @@ func (s *systemStatusServer) HotRangesV2( schemaName = meta.(tableMeta).schemaName indexName = meta.(tableMeta).indexName } else { - if err = s.sqlServer.distSQLServer.InternalExecutorFactory.DescsTxnWithExecutor( - ctx, s.db, nil, func(ctx context.Context, txn *kv.Txn, col *descs.Collection, ie sqlutil.InternalExecutor) error { - desc, err := col.ByID(txn).WithoutNonPublic().Get().Table(ctx, descpb.ID(tableID)) + if err = s.sqlServer.distSQLServer.DB.DescsTxn( + ctx, func(ctx context.Context, txn descs.Txn) error { + col := txn.Descriptors() + desc, err := col.ByID(txn.KV()).WithoutNonPublic().Get().Table(ctx, descpb.ID(tableID)) if err != nil { return errors.Wrapf(err, "cannot get table descriptor with tableID: %d, %s", tableID, r.Desc) } @@ -2543,13 +2543,13 @@ func (s *systemStatusServer) HotRangesV2( } } - if dbDesc, err := col.ByID(txn).WithoutNonPublic().Get().Database(ctx, desc.GetParentID()); err != nil { + if dbDesc, err := col.ByID(txn.KV()).WithoutNonPublic().Get().Database(ctx, desc.GetParentID()); err != nil { log.Warningf(ctx, "cannot get database by descriptor ID: %s: %v", r.Desc, err) } else { dbName = dbDesc.GetName() } - if schemaDesc, err := col.ByID(txn).WithoutNonPublic().Get().Schema(ctx, desc.GetParentSchemaID()); err != nil { + if schemaDesc, err := col.ByID(txn.KV()).WithoutNonPublic().Get().Schema(ctx, desc.GetParentSchemaID()); err != nil { log.Warningf(ctx, "cannot get schema name for range descriptor: %s: %v", r.Desc, err) } else { schemaName = schemaDesc.GetName() diff --git a/pkg/server/tenant.go b/pkg/server/tenant.go index 8ef0d05d9283..074989592f2b 100644 --- a/pkg/server/tenant.go +++ b/pkg/server/tenant.go @@ -405,7 +405,6 @@ func (s *SQLServerWrapper) PreStart(ctx context.Context) error { ieMon := sql.MakeInternalExecutorMemMonitor(sql.MemoryMetrics{}, s.ClusterSettings()) ieMon.StartNoReserved(ctx, s.PGServer().SQLServer.GetBytesMonitor()) s.stopper.AddCloser(stop.CloserFn(func() { ieMon.Stop(ctx) })) - fileTableInternalExecutor := sql.MakeInternalExecutor(s.PGServer().SQLServer, sql.MemoryMetrics{}, ieMon) s.externalStorageBuilder.init( ctx, s.sqlCfg.ExternalIODirConfig, @@ -413,9 +412,8 @@ func (s *SQLServerWrapper) PreStart(ctx context.Context) error { s.sqlServer.cfg.IDContainer, s.nodeDialer, s.sqlServer.cfg.TestingKnobs, - &fileTableInternalExecutor, - s.sqlServer.execCfg.InternalExecutorFactory, - s.db, + s.sqlServer.execCfg.InternalDB. + CloneWithMemoryMonitor(sql.MemoryMetrics{}, ieMon), s.costController, s.registry, ) @@ -904,30 +902,33 @@ func makeTenantSQLServerArgs( tenantConnect, ) + // Define structures which have circular dependencies. The underlying structures + // will be filled in during the construction of the sql server. circularInternalExecutor := &sql.InternalExecutor{} - internalExecutorFactory := &sql.InternalExecutorFactory{} + internalExecutorFactory := sql.NewShimInternalDB(db) circularJobRegistry := &jobs.Registry{} // Initialize the protectedts subsystem in multi-tenant clusters. var protectedTSProvider protectedts.Provider protectedtsKnobs, _ := baseCfg.TestingKnobs.ProtectedTS.(*protectedts.TestingKnobs) pp, err := ptprovider.New(ptprovider.Config{ - DB: db, - InternalExecutor: circularInternalExecutor, - Settings: st, - Knobs: protectedtsKnobs, + DB: internalExecutorFactory, + Settings: st, + Knobs: protectedtsKnobs, ReconcileStatusFuncs: ptreconcile.StatusFuncs{ jobsprotectedts.GetMetaType(jobsprotectedts.Jobs): jobsprotectedts.MakeStatusFunc( - circularJobRegistry, circularInternalExecutor, jobsprotectedts.Jobs), + circularJobRegistry, jobsprotectedts.Jobs, + ), jobsprotectedts.GetMetaType(jobsprotectedts.Schedules): jobsprotectedts.MakeStatusFunc( - circularJobRegistry, circularInternalExecutor, jobsprotectedts.Schedules), + circularJobRegistry, jobsprotectedts.Schedules, + ), }, }) if err != nil { return sqlServerArgs{}, err } registry.AddMetricStruct(pp.Metrics()) - protectedTSProvider = tenantProtectedTSProvider{Provider: pp, st: st} + protectedTSProvider = pp recorder := status.NewMetricsRecorder(clock, nil, rpcContext, nil, st) @@ -1008,7 +1009,7 @@ func makeTenantSQLServerArgs( sessionRegistry: sessionRegistry, remoteFlowRunner: remoteFlowRunner, circularInternalExecutor: circularInternalExecutor, - internalExecutorFactory: internalExecutorFactory, + internalDB: internalExecutorFactory, circularJobRegistry: circularJobRegistry, protectedtsProvider: protectedTSProvider, rangeFeedFactory: rangeFeedFactory, diff --git a/pkg/server/testserver.go b/pkg/server/testserver.go index 770214c46400..009a059a476e 100644 --- a/pkg/server/testserver.go +++ b/pkg/server/testserver.go @@ -36,8 +36,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangefeed" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness" - "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" - "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/security/certnames" @@ -615,17 +613,6 @@ func (ts *TestServer) Start(ctx context.Context) error { return nil } -type tenantProtectedTSProvider struct { - protectedts.Provider - st *cluster.Settings -} - -func (d tenantProtectedTSProvider) Protect( - ctx context.Context, txn *kv.Txn, rec *ptpb.Record, -) error { - return d.Provider.Protect(ctx, txn, rec) -} - // TestTenant is an in-memory instantiation of the SQL-only process created for // each active Cockroach tenant. TestTenant provides tests with access to // internal methods and state on SQLServer. It is typically started in tests by @@ -1288,9 +1275,9 @@ func (ts *TestServer) InternalExecutor() interface{} { return ts.sqlServer.internalExecutor } -// InternalExecutorFactory is part of TestServerInterface. -func (ts *TestServer) InternalExecutorFactory() interface{} { - return ts.sqlServer.internalExecutorFactory +// InternalDB is part of TestServerInterface. +func (ts *TestServer) InternalDB() interface{} { + return ts.sqlServer.internalDB } // GetNode exposes the Server's Node. diff --git a/pkg/server/tracedumper/BUILD.bazel b/pkg/server/tracedumper/BUILD.bazel index 62d5b755861f..435522979868 100644 --- a/pkg/server/tracedumper/BUILD.bazel +++ b/pkg/server/tracedumper/BUILD.bazel @@ -13,7 +13,7 @@ go_library( "//pkg/server/dumpstore", "//pkg/settings", "//pkg/settings/cluster", - "//pkg/sql/sqlutil", + "//pkg/sql/isql", "//pkg/util/log", "//pkg/util/timeutil", "//pkg/util/tracing/zipper", @@ -34,7 +34,7 @@ go_test( "//pkg/security/securitytest", "//pkg/server", "//pkg/server/dumpstore", - "//pkg/sql/sqlutil", + "//pkg/sql/isql", "//pkg/sql/tests", "//pkg/testutils", "//pkg/testutils/serverutils", diff --git a/pkg/server/tracedumper/tracedumper.go b/pkg/server/tracedumper/tracedumper.go index 41bec1bf80d0..238b4c580d5c 100644 --- a/pkg/server/tracedumper/tracedumper.go +++ b/pkg/server/tracedumper/tracedumper.go @@ -20,7 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/server/dumpstore" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/tracing/zipper" @@ -76,9 +76,7 @@ var _ dumpstore.Dumper = &TraceDumper{} // with traceID, to the configured dir. // The file names are prefixed with the timestamp of when it was written, to // facilitate GC of older trace zips. -func (t *TraceDumper) Dump( - ctx context.Context, name string, traceID int64, ie sqlutil.InternalExecutor, -) { +func (t *TraceDumper) Dump(ctx context.Context, name string, traceID int64, ie isql.Executor) { err := func() error { now := t.currentTime() traceZipFile := fmt.Sprintf( diff --git a/pkg/server/tracedumper/tracedumper_test.go b/pkg/server/tracedumper/tracedumper_test.go index cb11791f87b5..55e482e98425 100644 --- a/pkg/server/tracedumper/tracedumper_test.go +++ b/pkg/server/tracedumper/tracedumper_test.go @@ -19,7 +19,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/server/dumpstore" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -50,7 +50,7 @@ func TestTraceDumperZipCreation(t *testing.T) { defer s.Stopper().Stop(ctx) filename := "foo" - td.Dump(ctx, filename, 123, s.InternalExecutor().(sqlutil.InternalExecutor)) + td.Dump(ctx, filename, 123, s.InternalExecutor().(isql.Executor)) expectedFilename := fmt.Sprintf("%s.%s.%s.zip", jobTraceDumpPrefix, baseTime.Format(timeFormat), filename) fullpath := td.store.GetFullPath(expectedFilename) diff --git a/pkg/spanconfig/spanconfig.go b/pkg/spanconfig/spanconfig.go index cfe4bf0eacba..7c987c863b09 100644 --- a/pkg/spanconfig/spanconfig.go +++ b/pkg/spanconfig/spanconfig.go @@ -126,8 +126,7 @@ type SQLTranslator interface { // that don't exist. // Additionally, if `generateSystemSpanConfigurations` is set to true, // Translate will generate all the span configurations that apply to - // `spanconfig.SystemTargets`. The timestamp at which the translation is valid - // is also returned. + // `spanconfig.SystemTargets`. // // For every ID we first descend the zone configuration hierarchy with the // ID as the root to accumulate IDs of all leaf objects. Leaf objects are @@ -137,14 +136,17 @@ type SQLTranslator interface { // for each one of these accumulated IDs, we generate tuples // by following up the inheritance chain to fully hydrate the span // configuration. Translate also accounts for and negotiates subzone spans. - Translate(ctx context.Context, ids descpb.IDs, - generateSystemSpanConfigurations bool) ([]Record, hlc.Timestamp, error) + Translate( + ctx context.Context, + ids descpb.IDs, + generateSystemSpanConfigurations bool, + ) ([]Record, error) } // FullTranslate translates the entire SQL zone configuration state to the span // configuration state. The timestamp at which such a translation is valid is // also returned. -func FullTranslate(ctx context.Context, s SQLTranslator) ([]Record, hlc.Timestamp, error) { +func FullTranslate(ctx context.Context, s SQLTranslator) ([]Record, error) { // As RANGE DEFAULT is the root of all zone configurations (including other // named zones for the system tenant), we can construct the entire span // configuration state by starting from RANGE DEFAULT. diff --git a/pkg/spanconfig/spanconfigjob/BUILD.bazel b/pkg/spanconfig/spanconfigjob/BUILD.bazel index 76c75637e524..2e4a9d985700 100644 --- a/pkg/spanconfig/spanconfigjob/BUILD.bazel +++ b/pkg/spanconfig/spanconfigjob/BUILD.bazel @@ -9,11 +9,11 @@ go_library( deps = [ "//pkg/jobs", "//pkg/jobs/jobspb", - "//pkg/kv", "//pkg/settings", "//pkg/settings/cluster", "//pkg/spanconfig", "//pkg/sql", + "//pkg/sql/isql", "//pkg/util", "//pkg/util/hlc", "//pkg/util/log", diff --git a/pkg/spanconfig/spanconfigjob/job.go b/pkg/spanconfig/spanconfigjob/job.go index 7602b7ca88df..089be902ea6e 100644 --- a/pkg/spanconfig/spanconfigjob/job.go +++ b/pkg/spanconfig/spanconfigjob/job.go @@ -16,11 +16,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -82,7 +82,7 @@ func (r *resumer) Resume(ctx context.Context, execCtxI interface{}) (jobErr erro // Note that we are doing this before the possible error return below. If // there is a problem starting the reconciler this job will aggressively // restart at the job system level with no backoff. - if err := r.job.Update(ctx, nil, func(_ *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + if err := r.job.NoTxn().Update(ctx, func(_ isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { if md.RunStats != nil && md.RunStats.NumRuns > 1 { ju.UpdateRunStats(1, md.RunStats.LastRun) } @@ -182,7 +182,7 @@ func (r *resumer) Resume(ctx context.Context, execCtxI interface{}) (jobErr erro } lastCheckpoint = rc.Checkpoint() - return r.job.SetProgress(ctx, nil, jobspb.AutoSpanConfigReconciliationProgress{ + return r.job.NoTxn().SetProgress(ctx, jobspb.AutoSpanConfigReconciliationProgress{ Checkpoint: rc.Checkpoint(), }) }); err != nil { diff --git a/pkg/spanconfig/spanconfigkvaccessor/BUILD.bazel b/pkg/spanconfig/spanconfigkvaccessor/BUILD.bazel index f4de40ad9dbf..b16181c8f1f3 100644 --- a/pkg/spanconfig/spanconfigkvaccessor/BUILD.bazel +++ b/pkg/spanconfig/spanconfigkvaccessor/BUILD.bazel @@ -15,10 +15,10 @@ go_library( "//pkg/settings", "//pkg/settings/cluster", "//pkg/spanconfig", + "//pkg/sql/isql", "//pkg/sql/parser", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/util/hlc", "//pkg/util/log", "//pkg/util/protoutil", @@ -44,7 +44,7 @@ go_test( "//pkg/server", "//pkg/spanconfig", "//pkg/spanconfig/spanconfigtestutils", - "//pkg/sql/sqlutil", + "//pkg/sql/isql", "//pkg/testutils", "//pkg/testutils/datapathutils", "//pkg/testutils/serverutils", diff --git a/pkg/spanconfig/spanconfigkvaccessor/kvaccessor.go b/pkg/spanconfig/spanconfigkvaccessor/kvaccessor.go index 9ac70383224b..ef035ef81b1f 100644 --- a/pkg/spanconfig/spanconfigkvaccessor/kvaccessor.go +++ b/pkg/spanconfig/spanconfigkvaccessor/kvaccessor.go @@ -22,10 +22,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/spanconfig" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" @@ -46,7 +46,7 @@ var batchSizeSetting = settings.RegisterIntSetting( // CRDB cluster. It's a concrete implementation of the KVAccessor interface. type KVAccessor struct { db *kv.DB - ie sqlutil.InternalExecutor + ie isql.Executor // optionalTxn captures the transaction we're scoped to; it's allowed to be // nil. If nil, it's unsafe to use multiple times as part of the same // request with any expectation of transactionality -- we're responsible for @@ -67,7 +67,7 @@ var _ spanconfig.KVAccessor = &KVAccessor{} // New constructs a new KVAccessor. func New( db *kv.DB, - ie sqlutil.InternalExecutor, + ie isql.Executor, settings *cluster.Settings, clock *hlc.Clock, configurationsTableFQN string, @@ -181,7 +181,7 @@ func (k *KVAccessor) UpdateSpanConfigRecords( func newKVAccessor( db *kv.DB, - ie sqlutil.InternalExecutor, + ie isql.Executor, settings *cluster.Settings, clock *hlc.Clock, configurationsTableFQN string, diff --git a/pkg/spanconfig/spanconfigkvaccessor/kvaccessor_test.go b/pkg/spanconfig/spanconfigkvaccessor/kvaccessor_test.go index 09ab8d802e60..805817c915d3 100644 --- a/pkg/spanconfig/spanconfigkvaccessor/kvaccessor_test.go +++ b/pkg/spanconfig/spanconfigkvaccessor/kvaccessor_test.go @@ -22,7 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/spanconfig/spanconfigkvaccessor" "github.com/cockroachdb/cockroach/pkg/spanconfig/spanconfigtestutils" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/datapathutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" @@ -80,7 +80,7 @@ func TestDataDriven(t *testing.T) { tdb.Exec(t, fmt.Sprintf("CREATE TABLE %s (LIKE system.span_configurations INCLUDING ALL)", dummySpanConfigurationsFQN)) accessor := spanconfigkvaccessor.New( tc.Server(0).DB(), - tc.Server(0).InternalExecutor().(sqlutil.InternalExecutor), + tc.Server(0).InternalExecutor().(isql.Executor), tc.Server(0).ClusterSettings(), tc.Server(0).Clock(), dummySpanConfigurationsFQN, @@ -162,7 +162,7 @@ func BenchmarkKVAccessorUpdate(b *testing.B) { accessor := spanconfigkvaccessor.New( tc.Server(0).DB(), - tc.Server(0).InternalExecutor().(sqlutil.InternalExecutor), + tc.Server(0).InternalExecutor().(isql.Executor), tc.Server(0).ClusterSettings(), tc.Server(0).Clock(), dummySpanConfigurationsFQN, @@ -205,7 +205,7 @@ func TestKVAccessorPagination(t *testing.T) { var batches, batchSize int accessor := spanconfigkvaccessor.New( tc.Server(0).DB(), - tc.Server(0).InternalExecutor().(sqlutil.InternalExecutor), + tc.Server(0).InternalExecutor().(isql.Executor), tc.Server(0).ClusterSettings(), tc.Server(0).Clock(), dummySpanConfigurationsFQN, @@ -324,7 +324,7 @@ func TestKVAccessorCommitMinTSWaitRespondsToCtxCancellation(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) accessor := spanconfigkvaccessor.New( tc.Server(0).DB(), - tc.Server(0).InternalExecutor().(sqlutil.InternalExecutor), + tc.Server(0).InternalExecutor().(isql.Executor), tc.Server(0).ClusterSettings(), tc.Server(0).Clock(), dummySpanConfigurationsFQN, diff --git a/pkg/spanconfig/spanconfigkvsubscriber/BUILD.bazel b/pkg/spanconfig/spanconfigkvsubscriber/BUILD.bazel index 4aa2e24cf664..55ca34d31bbd 100644 --- a/pkg/spanconfig/spanconfigkvsubscriber/BUILD.bazel +++ b/pkg/spanconfig/spanconfigkvsubscriber/BUILD.bazel @@ -63,7 +63,7 @@ go_test( "//pkg/spanconfig", "//pkg/spanconfig/spanconfigkvaccessor", "//pkg/spanconfig/spanconfigtestutils", - "//pkg/sql/sqlutil", + "//pkg/sql/isql", "//pkg/testutils", "//pkg/testutils/datapathutils", "//pkg/testutils/serverutils", diff --git a/pkg/spanconfig/spanconfigkvsubscriber/datadriven_test.go b/pkg/spanconfig/spanconfigkvsubscriber/datadriven_test.go index 0188b1022866..d3383bf71a4b 100644 --- a/pkg/spanconfig/spanconfigkvsubscriber/datadriven_test.go +++ b/pkg/spanconfig/spanconfigkvsubscriber/datadriven_test.go @@ -26,7 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/spanconfig/spanconfigkvaccessor" "github.com/cockroachdb/cockroach/pkg/spanconfig/spanconfigkvsubscriber" "github.com/cockroachdb/cockroach/pkg/spanconfig/spanconfigtestutils" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/datapathutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" @@ -126,7 +126,7 @@ func TestDataDriven(t *testing.T) { kvAccessor := spanconfigkvaccessor.New( tc.Server(0).DB(), - tc.Server(0).InternalExecutor().(sqlutil.InternalExecutor), + tc.Server(0).InternalExecutor().(isql.Executor), tc.Server(0).ClusterSettings(), tc.Server(0).Clock(), fmt.Sprintf("defaultdb.public.%s", dummyTableName), diff --git a/pkg/spanconfig/spanconfiglimiter/BUILD.bazel b/pkg/spanconfig/spanconfiglimiter/BUILD.bazel index 7c4ee02a0851..1cd761e52e80 100644 --- a/pkg/spanconfig/spanconfiglimiter/BUILD.bazel +++ b/pkg/spanconfig/spanconfiglimiter/BUILD.bazel @@ -14,9 +14,9 @@ go_library( "//pkg/settings", "//pkg/settings/cluster", "//pkg/spanconfig", + "//pkg/sql/isql", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "@com_github_cockroachdb_errors//:errors", ], ) diff --git a/pkg/spanconfig/spanconfiglimiter/limiter.go b/pkg/spanconfig/spanconfiglimiter/limiter.go index 8ea649936bec..fa70c74b52a5 100644 --- a/pkg/spanconfig/spanconfiglimiter/limiter.go +++ b/pkg/spanconfig/spanconfiglimiter/limiter.go @@ -19,9 +19,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/spanconfig" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/errors" ) @@ -39,15 +39,13 @@ var tenantLimitSetting = settings.RegisterIntSetting( // Limiter is used to limit the number of span configs installed by secondary // tenants. It's a concrete implementation of the spanconfig.Limiter interface. type Limiter struct { - ie sqlutil.InternalExecutor + ie isql.Executor settings *cluster.Settings knobs *spanconfig.TestingKnobs } // New constructs and returns a Limiter. -func New( - ie sqlutil.InternalExecutor, settings *cluster.Settings, knobs *spanconfig.TestingKnobs, -) *Limiter { +func New(ie isql.Executor, settings *cluster.Settings, knobs *spanconfig.TestingKnobs) *Limiter { if knobs == nil { knobs = &spanconfig.TestingKnobs{} } diff --git a/pkg/spanconfig/spanconfigmanager/BUILD.bazel b/pkg/spanconfig/spanconfigmanager/BUILD.bazel index 4c7be448f16f..245453aee5e1 100644 --- a/pkg/spanconfig/spanconfigmanager/BUILD.bazel +++ b/pkg/spanconfig/spanconfigmanager/BUILD.bazel @@ -13,12 +13,11 @@ go_library( "//pkg/clusterversion", "//pkg/jobs", "//pkg/jobs/jobspb", - "//pkg/kv", "//pkg/security/username", "//pkg/settings", "//pkg/settings/cluster", "//pkg/spanconfig", - "//pkg/sql/sqlutil", + "//pkg/sql/isql", "//pkg/util/log", "//pkg/util/stop", "//pkg/util/timeutil", @@ -42,8 +41,8 @@ go_test( "//pkg/security/username", "//pkg/server", "//pkg/spanconfig", - "//pkg/sql", "//pkg/sql/catalog", + "//pkg/sql/isql", "//pkg/testutils", "//pkg/testutils/serverutils", "//pkg/testutils/sqlutils", diff --git a/pkg/spanconfig/spanconfigmanager/manager.go b/pkg/spanconfig/spanconfigmanager/manager.go index 39ea03f103e9..de8c293a5f61 100644 --- a/pkg/spanconfig/spanconfigmanager/manager.go +++ b/pkg/spanconfig/spanconfigmanager/manager.go @@ -17,12 +17,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/spanconfig" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/timeutil" @@ -60,9 +59,8 @@ var jobEnabledSetting = settings.RegisterBoolSetting( // // configurations with the clusters span configurations. type Manager struct { - db *kv.DB + db isql.DB jr *jobs.Registry - ie sqlutil.InternalExecutor stopper *stop.Stopper settings *cluster.Settings knobs *spanconfig.TestingKnobs @@ -72,9 +70,8 @@ type Manager struct { // New constructs a new Manager. func New( - db *kv.DB, + idb isql.DB, jr *jobs.Registry, - ie sqlutil.InternalExecutor, stopper *stop.Stopper, settings *cluster.Settings, reconciler spanconfig.Reconciler, @@ -84,9 +81,8 @@ func New( knobs = &spanconfig.TestingKnobs{} } return &Manager{ - db: db, + db: idb, jr: jr, - ie: ie, stopper: stopper, settings: settings, Reconciler: reconciler, @@ -188,8 +184,8 @@ func (m *Manager) createAndStartJobIfNoneExists(ctx context.Context) (bool, erro } var job *jobs.Job - if err := m.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - exists, err := jobs.RunningJobExists(ctx, jobspb.InvalidJobID, m.ie, txn, + if err := m.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + exists, err := jobs.RunningJobExists(ctx, jobspb.InvalidJobID, txn, func(payload *jobspb.Payload) bool { return payload.Type() == jobspb.TypeAutoSpanConfigReconciliation }, diff --git a/pkg/spanconfig/spanconfigmanager/manager_test.go b/pkg/spanconfig/spanconfigmanager/manager_test.go index 6de529b35a34..1727f4f7c0c5 100644 --- a/pkg/spanconfig/spanconfigmanager/manager_test.go +++ b/pkg/spanconfig/spanconfigmanager/manager_test.go @@ -22,8 +22,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/spanconfig/spanconfigmanager" - "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" @@ -69,9 +69,8 @@ func TestManagerConcurrentJobCreation(t *testing.T) { ts := tc.Server(0) manager := spanconfigmanager.New( - ts.DB(), + ts.InternalDB().(isql.DB), ts.JobRegistry().(*jobs.Registry), - ts.InternalExecutor().(*sql.InternalExecutor), ts.Stopper(), ts.ClusterSettings(), ts.SpanConfigReconciler().(spanconfig.Reconciler), @@ -155,9 +154,8 @@ func TestManagerStartsJobIfFailed(t *testing.T) { ts := tc.Server(0) manager := spanconfigmanager.New( - ts.DB(), + ts.InternalDB().(isql.DB), ts.JobRegistry().(*jobs.Registry), - ts.InternalExecutor().(*sql.InternalExecutor), ts.Stopper(), ts.ClusterSettings(), ts.SpanConfigReconciler().(spanconfig.Reconciler), @@ -218,9 +216,8 @@ func TestManagerCheckJobConditions(t *testing.T) { return currentCount } manager := spanconfigmanager.New( - ts.DB(), + ts.InternalDB().(isql.DB), ts.JobRegistry().(*jobs.Registry), - ts.InternalExecutor().(*sql.InternalExecutor), ts.Stopper(), ts.ClusterSettings(), ts.SpanConfigReconciler().(spanconfig.Reconciler), @@ -318,9 +315,8 @@ func TestReconciliationJobErrorAndRecovery(t *testing.T) { var jobID jobspb.JobID ts := tc.Server(0) manager := spanconfigmanager.New( - ts.DB(), + ts.InternalDB().(isql.DB), ts.JobRegistry().(*jobs.Registry), - ts.InternalExecutor().(*sql.InternalExecutor), ts.Stopper(), ts.ClusterSettings(), ts.SpanConfigReconciler().(spanconfig.Reconciler), @@ -412,9 +408,8 @@ func TestReconciliationUsesRightCheckpoint(t *testing.T) { ts := tc.Server(0) manager := spanconfigmanager.New( - ts.DB(), + ts.InternalDB().(isql.DB), ts.JobRegistry().(*jobs.Registry), - ts.InternalExecutor().(*sql.InternalExecutor), ts.Stopper(), ts.ClusterSettings(), ts.SpanConfigReconciler().(spanconfig.Reconciler), diff --git a/pkg/spanconfig/spanconfigreconciler/BUILD.bazel b/pkg/spanconfig/spanconfigreconciler/BUILD.bazel index 93e7d0c00a54..986d4ee63a2a 100644 --- a/pkg/spanconfig/spanconfigreconciler/BUILD.bazel +++ b/pkg/spanconfig/spanconfigreconciler/BUILD.bazel @@ -18,8 +18,8 @@ go_library( "//pkg/sql/catalog", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/descs", + "//pkg/sql/isql", "//pkg/sql/sqlliveness", - "//pkg/sql/sqlutil", "//pkg/util/hlc", "//pkg/util/log", "//pkg/util/retry", diff --git a/pkg/spanconfig/spanconfigreconciler/reconciler.go b/pkg/spanconfig/spanconfigreconciler/reconciler.go index c290e70cf2c3..af08674f43e9 100644 --- a/pkg/spanconfig/spanconfigreconciler/reconciler.go +++ b/pkg/spanconfig/spanconfigreconciler/reconciler.go @@ -25,8 +25,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/retry" @@ -220,7 +220,7 @@ type fullReconciler struct { // - the timestamp we've reconciled up until. func (f *fullReconciler) reconcile( ctx context.Context, -) (storeWithLatestSpanConfigs *spanconfigstore.Store, reconciledUpUntil hlc.Timestamp, _ error) { +) (storeWithLatestSpanConfigs *spanconfigstore.Store, _ hlc.Timestamp, _ error) { storeWithExistingSpanConfigs, err := f.fetchExistingSpanConfigs(ctx) if err != nil { return nil, hlc.Timestamp{}, err @@ -230,15 +230,18 @@ func (f *fullReconciler) reconcile( // view of things. var records []spanconfig.Record - if err := f.execCfg.InternalExecutorFactory.DescsTxnWithExecutor(ctx, f.execCfg.DB, nil /* session data */, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, ie sqlutil.InternalExecutor, + var kvTxn *kv.Txn + if err := f.execCfg.InternalDB.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { - translator := f.sqlTranslatorFactory.NewSQLTranslator(txn, ie, descsCol) - records, reconciledUpUntil, err = spanconfig.FullTranslate(ctx, translator) + kvTxn = txn.KV() + translator := f.sqlTranslatorFactory.NewSQLTranslator(txn) + records, err = spanconfig.FullTranslate(ctx, translator) return err }); err != nil { return nil, hlc.Timestamp{}, err } + readTimestamp := kvTxn.CommitTimestamp() updates := make([]spanconfig.Update, len(records)) for i, record := range records { @@ -295,7 +298,7 @@ func (f *fullReconciler) reconcile( storeWithLatestSpanConfigs.Apply(ctx, false /* dryrun */, del) } - return storeWithLatestSpanConfigs, reconciledUpUntil, nil + return storeWithLatestSpanConfigs, readTimestamp, nil } // fetchExistingSpanConfigs returns a store populated with all span configs @@ -475,8 +478,8 @@ func (r *incrementalReconciler) reconcile( var missingProtectedTimestampTargets []spanconfig.SystemTarget var records []spanconfig.Record - if err := r.execCfg.InternalExecutorFactory.DescsTxnWithExecutor(ctx, r.execCfg.DB, nil /* session data */, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, ie sqlutil.InternalExecutor, + if err := r.execCfg.InternalDB.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { var err error @@ -484,7 +487,7 @@ func (r *incrementalReconciler) reconcile( // tables and system targets that live on the Reconciler, we could // move this to the SQLTranslator instead, now that the SQLTranslator // is transaction scoped. - missingTableIDs, err = r.filterForMissingTableIDs(ctx, txn, descsCol, sqlUpdates) + missingTableIDs, err = r.filterForMissingTableIDs(ctx, txn.KV(), txn.Descriptors(), sqlUpdates) if err != nil { return err } @@ -496,8 +499,8 @@ func (r *incrementalReconciler) reconcile( return err } - translator := r.sqlTranslatorFactory.NewSQLTranslator(txn, ie, descsCol) - records, _, err = translator.Translate(ctx, allIDs, generateSystemSpanConfigurations) + translator := r.sqlTranslatorFactory.NewSQLTranslator(txn) + records, err = translator.Translate(ctx, allIDs, generateSystemSpanConfigurations) return err }); err != nil { return err @@ -548,7 +551,7 @@ func (r *incrementalReconciler) reconcile( // correspond to cluster or tenant target protected timestamp records that are // no longer found, because they've been released. func (r *incrementalReconciler) filterForMissingProtectedTimestampSystemTargets( - ctx context.Context, txn *kv.Txn, updates []spanconfig.SQLUpdate, + ctx context.Context, txn isql.Txn, updates []spanconfig.SQLUpdate, ) ([]spanconfig.SystemTarget, error) { seen := make(map[spanconfig.SystemTarget]struct{}) var missingSystemTargets []spanconfig.SystemTarget @@ -567,7 +570,7 @@ func (r *incrementalReconciler) filterForMissingProtectedTimestampSystemTargets( // timestamp subsystem, and the internal limits to limit the size of this // table, there is scope for improvement in the future. One option could be // a rangefeed-backed materialized view of the system table. - ptsState, err := r.execCfg.ProtectedTimestampProvider.GetState(ctx, txn) + ptsState, err := r.execCfg.ProtectedTimestampProvider.WithTxn(txn).GetState(ctx) if err != nil { return nil, errors.Wrap(err, "failed to get protected timestamp state") } diff --git a/pkg/spanconfig/spanconfigsqltranslator/BUILD.bazel b/pkg/spanconfig/spanconfigsqltranslator/BUILD.bazel index ac617a79d49d..975d1879c354 100644 --- a/pkg/spanconfig/spanconfigsqltranslator/BUILD.bazel +++ b/pkg/spanconfig/spanconfigsqltranslator/BUILD.bazel @@ -17,8 +17,6 @@ go_library( "//pkg/sql/catalog", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/descs", - "//pkg/sql/sqlutil", - "//pkg/util/hlc", "@com_github_cockroachdb_errors//:errors", ], ) diff --git a/pkg/spanconfig/spanconfigsqltranslator/sqltranslator.go b/pkg/spanconfig/spanconfigsqltranslator/sqltranslator.go index 8a082173ced7..923dc884b5ac 100644 --- a/pkg/spanconfig/spanconfigsqltranslator/sqltranslator.go +++ b/pkg/spanconfig/spanconfigsqltranslator/sqltranslator.go @@ -26,33 +26,18 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" - "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/errors" ) // SQLTranslator implements the spanconfig.SQLTranslator interface. var _ spanconfig.SQLTranslator = &SQLTranslator{} -// txnBundle is created to emphasize that the SQL translator is correspond to -// a certain txn, and all fields here are a whole. It essentially keeps the -// semantics of “translate at a snapshot in time”. This means that this -// txnBundle should be written only in `NewTranslator`. -type txnBundle struct { - txn *kv.Txn - descsCol *descs.Collection - // TODO(janexing): we inject ie here is to replace the executor used in - // s.ptsProvider.GetState() in SQLTranslator.Translate(). - ie sqlutil.InternalExecutor -} - // SQLTranslator is the concrete implementation of spanconfig.SQLTranslator. type SQLTranslator struct { - ptsProvider protectedts.Provider - codec keys.SQLCodec - knobs *spanconfig.TestingKnobs - - txnBundle txnBundle + codec keys.SQLCodec + knobs *spanconfig.TestingKnobs + txn descs.Txn + pts protectedts.Storage } // Factory is used to construct transaction-scoped SQLTranslators. @@ -79,40 +64,19 @@ func NewFactory( // NewSQLTranslator constructs and returns a transaction-scoped // spanconfig.SQLTranslator. The caller must ensure that the collection and // internal executor and the transaction are associated with each other. -func (f *Factory) NewSQLTranslator( - txn *kv.Txn, ie sqlutil.InternalExecutor, descsCol *descs.Collection, -) *SQLTranslator { +func (f *Factory) NewSQLTranslator(txn descs.Txn) *SQLTranslator { return &SQLTranslator{ - ptsProvider: f.ptsProvider, - codec: f.codec, - knobs: f.knobs, - txnBundle: txnBundle{ - txn: txn, - descsCol: descsCol, - ie: ie, - }, + codec: f.codec, + knobs: f.knobs, + txn: txn, + pts: f.ptsProvider.WithTxn(txn), } } -// GetTxn returns the txn bound to this sql translator. -func (s *SQLTranslator) GetTxn() *kv.Txn { - return s.txnBundle.txn -} - -// GetDescsCollection returns the descriptor collection bound to this sql translator. -func (s *SQLTranslator) GetDescsCollection() *descs.Collection { - return s.txnBundle.descsCol -} - -// GetInternalExecutor returns the internal executor bound to this sql translator. -func (s *SQLTranslator) GetInternalExecutor() sqlutil.InternalExecutor { - return s.txnBundle.ie -} - // Translate is part of the spanconfig.SQLTranslator interface. func (s *SQLTranslator) Translate( ctx context.Context, ids descpb.IDs, generateSystemSpanConfigurations bool, -) (records []spanconfig.Record, _ hlc.Timestamp, _ error) { +) (records []spanconfig.Record, _ error) { // Construct an in-memory view of the system.protected_ts_records table to // populate the protected timestamp field on the emitted span configs. // @@ -122,16 +86,16 @@ func (s *SQLTranslator) Translate( // timestamp subsystem, and the internal limits to limit the size of this // table, there is scope for improvement in the future. One option could be // a rangefeed-backed materialized view of the system table. - ptsState, err := s.ptsProvider.GetState(ctx, s.GetTxn()) + ptsState, err := s.pts.GetState(ctx) if err != nil { - return nil, hlc.Timestamp{}, errors.Wrap(err, "failed to get protected timestamp state") + return nil, errors.Wrap(err, "failed to get protected timestamp state") } ptsStateReader := spanconfig.NewProtectedTimestampStateReader(ctx, ptsState) if generateSystemSpanConfigurations { records, err = s.generateSystemSpanConfigRecords(ptsStateReader) if err != nil { - return nil, hlc.Timestamp{}, errors.Wrap(err, "failed to generate SystemTarget records") + return nil, errors.Wrap(err, "failed to generate SystemTarget records") } } @@ -141,9 +105,9 @@ func (s *SQLTranslator) Translate( seen := make(map[descpb.ID]struct{}) var leafIDs descpb.IDs for _, id := range ids { - descendantLeafIDs, err := s.findDescendantLeafIDs(ctx, id, s.GetTxn(), s.GetDescsCollection()) + descendantLeafIDs, err := s.findDescendantLeafIDs(ctx, id) if err != nil { - return nil, hlc.Timestamp{}, err + return nil, err } for _, descendantLeafID := range descendantLeafIDs { if _, found := seen[descendantLeafID]; !found { @@ -153,15 +117,15 @@ func (s *SQLTranslator) Translate( } } - pseudoTableRecords, err := s.maybeGeneratePseudoTableRecords(ctx, s.GetTxn(), ids) + pseudoTableRecords, err := s.maybeGeneratePseudoTableRecords(ctx, ids) if err != nil { - return nil, hlc.Timestamp{}, err + return nil, err } records = append(records, pseudoTableRecords...) - scratchRangeRecord, err := s.maybeGenerateScratchRangeRecord(ctx, s.GetTxn(), ids) + scratchRangeRecord, err := s.maybeGenerateScratchRangeRecord(ctx, ids) if err != nil { - return nil, hlc.Timestamp{}, err + return nil, err } if !scratchRangeRecord.IsEmpty() { records = append(records, scratchRangeRecord) @@ -169,14 +133,14 @@ func (s *SQLTranslator) Translate( // For every unique leaf ID, generate span configurations. for _, leafID := range leafIDs { - translatedRecords, err := s.generateSpanConfigurations(ctx, leafID, s.GetTxn(), s.GetDescsCollection(), ptsStateReader) + translatedRecords, err := s.generateSpanConfigurations(ctx, leafID, ptsStateReader) if err != nil { - return nil, hlc.Timestamp{}, err + return nil, err } records = append(records, translatedRecords...) } - return records, s.GetTxn().CommitTimestamp(), nil + return records, nil } // generateSystemSpanConfigRecords is responsible for generating all the SpanConfigs @@ -237,18 +201,14 @@ func (s *SQLTranslator) generateSystemSpanConfigRecords( // ID. The ID must belong to an object that has a span configuration associated // with it, i.e, it should either belong to a table or a named zone. func (s *SQLTranslator) generateSpanConfigurations( - ctx context.Context, - id descpb.ID, - txn *kv.Txn, - descsCol *descs.Collection, - ptsStateReader *spanconfig.ProtectedTimestampStateReader, + ctx context.Context, id descpb.ID, ptsStateReader *spanconfig.ProtectedTimestampStateReader, ) (_ []spanconfig.Record, err error) { if zonepb.IsNamedZoneID(uint32(id)) { - return s.generateSpanConfigurationsForNamedZone(ctx, txn, id) + return s.generateSpanConfigurationsForNamedZone(ctx, s.txn.KV(), id) } // We're dealing with a SQL object. - desc, err := descsCol.ByID(txn).Get().Desc(ctx, id) + desc, err := s.txn.Descriptors().ByID(s.txn.KV()).Get().Desc(ctx, id) if err != nil { if errors.Is(err, catalog.ErrDescriptorNotFound) { return nil, nil // the descriptor has been deleted; nothing to do here @@ -271,7 +231,7 @@ func (s *SQLTranslator) generateSpanConfigurations( "can only generate span configurations for tables, but got %s", desc.DescriptorType(), ) } - return s.generateSpanConfigurationsForTable(ctx, txn, table, ptsStateReader) + return s.generateSpanConfigurationsForTable(ctx, s.txn.KV(), table, ptsStateReader) } // generateSpanConfigurationsForNamedZone expects an ID corresponding to a named @@ -323,14 +283,18 @@ func (s *SQLTranslator) generateSpanConfigurationsForNamedZone( return nil, errors.AssertionFailedf("unknown named zone config %s", name) } - zoneConfig, err := sql.GetHydratedZoneConfigForNamedZone(ctx, txn, s.txnBundle.descsCol, name) + zoneConfig, err := sql.GetHydratedZoneConfigForNamedZone( + ctx, txn, s.txn.Descriptors(), name, + ) if err != nil { return nil, err } spanConfig := zoneConfig.AsSpanConfig() var records []spanconfig.Record for _, span := range spans { - record, err := spanconfig.MakeRecord(spanconfig.MakeTargetFromSpan(span), spanConfig) + record, err := spanconfig.MakeRecord( + spanconfig.MakeTargetFromSpan(span), spanConfig, + ) if err != nil { return nil, err } @@ -355,7 +319,9 @@ func (s *SQLTranslator) generateSpanConfigurationsForTable( return nil, nil } - zone, err := sql.GetHydratedZoneConfigForTable(ctx, txn, s.txnBundle.descsCol, table.GetID()) + zone, err := sql.GetHydratedZoneConfigForTable( + ctx, txn, s.txn.Descriptors(), table.GetID(), + ) if err != nil { return nil, err } @@ -510,13 +476,13 @@ func (s *SQLTranslator) generateSpanConfigurationsForTable( // configuration hierarchy. Leaf IDs are either table IDs or named zone IDs // (other than RANGE DEFAULT). func (s *SQLTranslator) findDescendantLeafIDs( - ctx context.Context, id descpb.ID, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, id descpb.ID, ) (descpb.IDs, error) { if zonepb.IsNamedZoneID(uint32(id)) { - return s.findDescendantLeafIDsForNamedZone(ctx, id, txn, descsCol) + return s.findDescendantLeafIDsForNamedZone(ctx, id) } // We're dealing with a SQL Object here. - return s.findDescendantLeafIDsForDescriptor(ctx, id, txn, descsCol) + return s.findDescendantLeafIDsForDescriptor(ctx, id) } // findDescendantLeafIDsForDescriptor finds all leaf object IDs below the given @@ -527,9 +493,9 @@ func (s *SQLTranslator) findDescendantLeafIDs( // - Other: Nothing, as these do not carry zone configurations and // are not part of the zone configuration hierarchy. func (s *SQLTranslator) findDescendantLeafIDsForDescriptor( - ctx context.Context, id descpb.ID, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, id descpb.ID, ) (descpb.IDs, error) { - desc, err := descsCol.ByID(txn).Get().Desc(ctx, id) + desc, err := s.txn.Descriptors().ByID(s.txn.KV()).Get().Desc(ctx, id) if err != nil { if errors.Is(err, catalog.ErrDescriptorNotFound) { return nil, nil // the descriptor has been deleted; nothing to do here @@ -565,7 +531,7 @@ func (s *SQLTranslator) findDescendantLeafIDsForDescriptor( // GetAll is the only way to retrieve dropped descriptors whose IDs are not known // ahead of time. This has unfortunate performance implications tracked by // https://github.com/cockroachdb/cockroach/issues/90655 - all, err := descsCol.GetAll(ctx, txn) + all, err := s.txn.Descriptors().GetAll(ctx, s.txn.KV()) if err != nil { return nil, err } @@ -585,7 +551,7 @@ func (s *SQLTranslator) findDescendantLeafIDsForDescriptor( // - RANGE DEFAULT: All tables (and named zones iff system tenant). // - Any other named zone: ID of the named zone itself. func (s *SQLTranslator) findDescendantLeafIDsForNamedZone( - ctx context.Context, id descpb.ID, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, id descpb.ID, ) (descpb.IDs, error) { name, ok := zonepb.NamedZonesByID[uint32(id)] if !ok { @@ -599,14 +565,14 @@ func (s *SQLTranslator) findDescendantLeafIDsForNamedZone( } // A change to RANGE DEFAULT translates to every SQL object of the tenant. - databases, err := descsCol.GetAllDatabaseDescriptors(ctx, txn) + databases, err := s.txn.Descriptors().GetAllDatabaseDescriptors(ctx, s.txn.KV()) if err != nil { return nil, err } var descendantIDs descpb.IDs for _, dbDesc := range databases { tableIDs, err := s.findDescendantLeafIDsForDescriptor( - ctx, dbDesc.GetID(), txn, descsCol, + ctx, dbDesc.GetID(), ) if err != nil { return nil, err @@ -632,7 +598,7 @@ func (s *SQLTranslator) findDescendantLeafIDsForNamedZone( // maybeGeneratePseudoTableRecords generates span configs for // pseudo table ID key spans, if applicable. func (s *SQLTranslator) maybeGeneratePseudoTableRecords( - ctx context.Context, txn *kv.Txn, ids descpb.IDs, + ctx context.Context, ids descpb.IDs, ) ([]spanconfig.Record, error) { if !s.codec.ForSystemTenant() { return nil, nil @@ -664,7 +630,9 @@ func (s *SQLTranslator) maybeGeneratePseudoTableRecords( // emulate. As for what config to apply over said range -- we do as // the system config span does, applying the config for the system // database. - zone, err := sql.GetHydratedZoneConfigForDatabase(ctx, txn, s.txnBundle.descsCol, keys.SystemDatabaseID) + zone, err := sql.GetHydratedZoneConfigForDatabase( + ctx, s.txn.KV(), s.txn.Descriptors(), keys.SystemDatabaseID, + ) if err != nil { return nil, err } @@ -690,7 +658,7 @@ func (s *SQLTranslator) maybeGeneratePseudoTableRecords( } func (s *SQLTranslator) maybeGenerateScratchRangeRecord( - ctx context.Context, txn *kv.Txn, ids descpb.IDs, + ctx context.Context, ids descpb.IDs, ) (spanconfig.Record, error) { if !s.knobs.ConfigureScratchRange || !s.codec.ForSystemTenant() { return spanconfig.Record{}, nil // nothing to do @@ -701,7 +669,9 @@ func (s *SQLTranslator) maybeGenerateScratchRangeRecord( continue // nothing to do } - zone, err := sql.GetHydratedZoneConfigForDatabase(ctx, txn, s.txnBundle.descsCol, keys.RootNamespaceID) + zone, err := sql.GetHydratedZoneConfigForDatabase( + ctx, s.txn.KV(), s.txn.Descriptors(), keys.RootNamespaceID, + ) if err != nil { return spanconfig.Record{}, err } diff --git a/pkg/spanconfig/spanconfigsqlwatcher/BUILD.bazel b/pkg/spanconfig/spanconfigsqlwatcher/BUILD.bazel index b9514caee50b..c3c1987e27de 100644 --- a/pkg/spanconfig/spanconfigsqlwatcher/BUILD.bazel +++ b/pkg/spanconfig/spanconfigsqlwatcher/BUILD.bazel @@ -55,8 +55,8 @@ go_test( "//pkg/jobs", "//pkg/jobs/jobsprotectedts", "//pkg/keys", - "//pkg/kv", "//pkg/kv/kvserver/protectedts/ptpb", + "//pkg/kv/kvserver/protectedts/ptstorage", "//pkg/roachpb", "//pkg/security/securityassets", "//pkg/security/securitytest", @@ -65,6 +65,7 @@ go_test( "//pkg/sql/catalog", "//pkg/sql/catalog/descpb", "//pkg/sql/distsql", + "//pkg/sql/isql", "//pkg/testutils/serverutils", "//pkg/testutils/sqlutils", "//pkg/testutils/testcluster", diff --git a/pkg/spanconfig/spanconfigsqlwatcher/protectedtsdecoder_test.go b/pkg/spanconfig/spanconfigsqlwatcher/protectedtsdecoder_test.go index c751bc390594..b92bfe9cd241 100644 --- a/pkg/spanconfig/spanconfigsqlwatcher/protectedtsdecoder_test.go +++ b/pkg/spanconfig/spanconfigsqlwatcher/protectedtsdecoder_test.go @@ -18,12 +18,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobsprotectedts" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptstorage" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/spanconfig/spanconfigsqlwatcher" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/distsql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/uuid" @@ -63,15 +64,16 @@ func TestProtectedTimestampDecoder(t *testing.T) { }, } { t.Run(testCase.name, func(t *testing.T) { - var rec *ptpb.Record ts := s0.Clock().Now() jobID := jr.MakeJobID() + pts := ptstorage.WithDatabase(ptp, s0.InternalDB().(isql.DB)) - require.NoError(t, s0.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { - rec = jobsprotectedts.MakeRecord(uuid.MakeV4(), int64(jobID), ts, - nil /* deprecatedSpans */, jobsprotectedts.Jobs, testCase.target) - return ptp.Protect(ctx, txn, rec) - })) + rec := jobsprotectedts.MakeRecord( + uuid.MakeV4(), int64(jobID), ts, + nil, /* deprecatedSpans */ + jobsprotectedts.Jobs, testCase.target, + ) + require.NoError(t, pts.Protect(ctx, rec)) rows, err := tc.Server(0).DB().Scan(ctx, k, k.PrefixEnd(), 0 /* maxRows */) require.NoError(t, err) @@ -88,9 +90,7 @@ func TestProtectedTimestampDecoder(t *testing.T) { require.Truef(t, rec.Target.Equal(got), "expected target=%s, got target=%s", rec.Target.String(), got.String()) - require.NoError(t, s0.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { - return ptp.Release(ctx, txn, rec.ID.GetUUID()) - })) + require.NoError(t, pts.Release(ctx, rec.ID.GetUUID())) }) } } diff --git a/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/BUILD.bazel b/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/BUILD.bazel index 888df890cc7d..573b82d9821a 100644 --- a/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/BUILD.bazel +++ b/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/BUILD.bazel @@ -13,7 +13,6 @@ go_library( "//pkg/base", "//pkg/jobs", "//pkg/jobs/jobsprotectedts", - "//pkg/kv", "//pkg/kv/kvserver/protectedts", "//pkg/kv/kvserver/protectedts/ptpb", "//pkg/roachpb", @@ -29,6 +28,7 @@ go_library( "//pkg/sql/catalog/descs", "//pkg/sql/catalog/tabledesc", "//pkg/sql/distsql", + "//pkg/sql/isql", "//pkg/sql/sem/tree", "//pkg/testutils", "//pkg/testutils/serverutils", diff --git a/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/tenant_state.go b/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/tenant_state.go index 923874ed5faa..47d7d5fb2df7 100644 --- a/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/tenant_state.go +++ b/pkg/spanconfig/spanconfigtestutils/spanconfigtestcluster/tenant_state.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobsprotectedts" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/spanconfig" @@ -31,6 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/distsql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" @@ -146,20 +146,21 @@ func (s *Tenant) KVAccessorRecorder() *spanconfigtestutils.KVAccessorRecorder { func (s *Tenant) WithMutableDatabaseDescriptor( ctx context.Context, dbName string, f func(*dbdesc.Mutable), ) { - execCfg := s.ExecutorConfig().(sql.ExecutorConfig) - require.NoError(s.t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + descsDB := s.ExecutorConfig().(sql.ExecutorConfig).InternalDB + require.NoError(s.t, descsDB.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { - imm, err := descsCol.ByName(txn).WithOffline().Get().Database(ctx, dbName) + imm, err := txn.Descriptors().ByName(txn.KV()).WithOffline().Get().Database(ctx, dbName) if err != nil { return err } - mut, err := descsCol.MutableByID(txn).Database(ctx, imm.GetID()) + mut, err := txn.Descriptors().MutableByID(txn.KV()).Database(ctx, imm.GetID()) if err != nil { return err } f(mut) - return descsCol.WriteDesc(ctx, false, mut, txn) + const kvTrace = false + return txn.Descriptors().WriteDesc(ctx, kvTrace, mut, txn.KV()) })) } @@ -171,20 +172,20 @@ func (s *Tenant) WithMutableTableDescriptor( ) { execCfg := s.ExecutorConfig().(sql.ExecutorConfig) require.NoError(s.t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) error { - g := descsCol.ByName(txn).WithOffline().Get() + g := descsCol.ByName(txn.KV()).WithOffline().Get() tn := tree.NewTableNameWithSchema(tree.Name(dbName), "public", tree.Name(tbName)) _, imm, err := descs.PrefixAndTable(ctx, g, tn) if err != nil { return err } - mut, err := descsCol.MutableByID(txn).Table(ctx, imm.GetID()) + mut, err := descsCol.MutableByID(txn.KV()).Table(ctx, imm.GetID()) if err != nil { return err } f(mut) - return descsCol.WriteDesc(ctx, false /* kvTrace */, mut, txn) + return descsCol.WriteDesc(ctx, false /* kvTrace */, mut, txn.KV()) })) } @@ -194,10 +195,10 @@ func (s *Tenant) LookupTableDescriptorByID( ) (desc catalog.TableDescriptor) { execCfg := s.ExecutorConfig().(sql.ExecutorConfig) require.NoError(s.t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) error { var err error - desc, err = descsCol.ByID(txn).Get().Table(ctx, id) + desc, err = descsCol.ByID(txn.KV()).Get().Table(ctx, id) return err })) return desc @@ -209,10 +210,10 @@ func (s *Tenant) LookupTableByName( ) (desc catalog.TableDescriptor) { execCfg := s.ExecutorConfig().(sql.ExecutorConfig) require.NoError(s.t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) error { var err error - g := descsCol.ByName(txn).WithOffline().MaybeGet() + g := descsCol.ByName(txn.KV()).WithOffline().MaybeGet() tn := tree.NewTableNameWithSchema(tree.Name(dbName), "public", tree.Name(tbName)) _, desc, err = descs.PrefixAndTable(ctx, g, tn) return err @@ -227,9 +228,9 @@ func (s *Tenant) LookupDatabaseByName( ) (desc catalog.DatabaseDescriptor) { execCfg := s.ExecutorConfig().(sql.ExecutorConfig) require.NoError(s.t, sql.DescsTxn(ctx, &execCfg, - func(ctx context.Context, txn *kv.Txn, descsCol *descs.Collection) error { + func(ctx context.Context, txn isql.Txn, descsCol *descs.Collection) error { var err error - desc, err = descsCol.ByName(txn).WithOffline().Get().Database(ctx, dbName) + desc, err = descsCol.ByName(txn.KV()).WithOffline().Get().Database(ctx, dbName) return err })) return desc @@ -241,8 +242,8 @@ func (s *Tenant) MakeProtectedTimestampRecordAndProtect( ctx context.Context, recordID string, protectTS int, target *ptpb.Target, ) { jobID := s.JobsRegistry().MakeJobID() - require.NoError(s.t, s.ExecCfg().DB.Txn(ctx, - func(ctx context.Context, txn *kv.Txn) (err error) { + require.NoError(s.t, s.ExecCfg().InternalDB.Txn(ctx, + func(ctx context.Context, txn isql.Txn) (err error) { require.Len(s.t, recordID, 1, "datadriven test only supports single character record IDs") recID, err := uuid.FromBytes([]byte(strings.Repeat(recordID, 16))) @@ -250,20 +251,20 @@ func (s *Tenant) MakeProtectedTimestampRecordAndProtect( rec := jobsprotectedts.MakeRecord(recID, int64(jobID), hlc.Timestamp{WallTime: int64(protectTS)}, nil, /* deprecatedSpans */ jobsprotectedts.Jobs, target) - return s.ProtectedTimestampProvider().Protect(ctx, txn, rec) + return s.ProtectedTimestampProvider().WithTxn(txn).Protect(ctx, rec) })) s.updateTimestampAfterLastSQLChange() } // ReleaseProtectedTimestampRecord will release a ptpb.Record. func (s *Tenant) ReleaseProtectedTimestampRecord(ctx context.Context, recordID string) { - require.NoError(s.t, s.ExecCfg().DB.Txn(ctx, - func(ctx context.Context, txn *kv.Txn) error { + require.NoError(s.t, s.ExecCfg().InternalDB.Txn(ctx, + func(ctx context.Context, txn isql.Txn) error { require.Len(s.t, recordID, 1, "datadriven test only supports single character record IDs") recID, err := uuid.FromBytes([]byte(strings.Repeat(recordID, 16))) require.NoError(s.t, err) - return s.ProtectedTimestampProvider().Release(ctx, txn, recID) + return s.ProtectedTimestampProvider().WithTxn(txn).Release(ctx, recID) })) s.updateTimestampAfterLastSQLChange() } diff --git a/pkg/sql/BUILD.bazel b/pkg/sql/BUILD.bazel index b0e52da98635..9ceb276c846f 100644 --- a/pkg/sql/BUILD.bazel +++ b/pkg/sql/BUILD.bazel @@ -389,6 +389,7 @@ go_library( "//pkg/sql/idxrecommendations", "//pkg/sql/idxusage", "//pkg/sql/inverted", + "//pkg/sql/isql", "//pkg/sql/lex", "//pkg/sql/lexbase", "//pkg/sql/memsize", @@ -463,7 +464,6 @@ go_library( "//pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil", "//pkg/sql/sqlstats/sslocal", "//pkg/sql/sqltelemetry", - "//pkg/sql/sqlutil", "//pkg/sql/stats", "//pkg/sql/stats/bounds", "//pkg/sql/stmtdiagnostics", @@ -739,6 +739,7 @@ go_test( "//pkg/sql/execstats", "//pkg/sql/flowinfra", "//pkg/sql/gcjob", + "//pkg/sql/isql", "//pkg/sql/lexbase", "//pkg/sql/mutations", "//pkg/sql/opt/exec/explain", @@ -772,7 +773,6 @@ go_test( "//pkg/sql/sqlliveness", "//pkg/sql/sqlstats", "//pkg/sql/sqltestutils", - "//pkg/sql/sqlutil", "//pkg/sql/stats", "//pkg/sql/stmtdiagnostics", "//pkg/sql/tests", diff --git a/pkg/sql/alter_database.go b/pkg/sql/alter_database.go index c82ef1431370..82b6f0c30d22 100644 --- a/pkg/sql/alter_database.go +++ b/pkg/sql/alter_database.go @@ -540,7 +540,7 @@ func removeLocalityConfigFromAllTablesInDB( case *catpb.LocalityConfig_Global_: if err := ApplyZoneConfigForMultiRegionTable( ctx, - p.txn, + p.Txn(), p.ExecCfg(), p.extendedEvalCtx.Tracing.KVTracingEnabled(), p.Descriptors(), @@ -608,7 +608,7 @@ func (n *alterDatabaseDropRegionNode) startExec(params runParams) error { if err := discardMultiRegionFieldsForDatabaseZoneConfig( params.ctx, n.desc.ID, - params.p.txn, + params.p.Txn(), params.p.execCfg, params.p.Descriptors(), params.extendedEvalCtx.Tracing.KVTracingEnabled(), @@ -772,7 +772,7 @@ func (n *alterDatabasePrimaryRegionNode) switchPrimaryRegion(params runParams) e params.ctx, n.desc.ID, updatedRegionConfig, - params.p.txn, + params.p.Txn(), params.p.execCfg, params.p.Descriptors(), params.extendedEvalCtx.Tracing.KVTracingEnabled(), @@ -1151,7 +1151,7 @@ func (n *alterDatabaseSurvivalGoalNode) startExec(params runParams) error { params.ctx, n.desc.ID, regionConfig, - params.p.txn, + params.p.Txn(), params.p.execCfg, params.p.Descriptors(), params.extendedEvalCtx.Tracing.KVTracingEnabled(), @@ -1280,7 +1280,7 @@ func (n *alterDatabasePlacementNode) startExec(params runParams) error { params.ctx, n.desc.ID, regionConfig, - params.p.txn, + params.p.Txn(), params.p.execCfg, params.p.Descriptors(), params.extendedEvalCtx.Tracing.KVTracingEnabled(), @@ -1814,7 +1814,7 @@ func (n *alterDatabaseSecondaryRegion) startExec(params runParams) error { params.ctx, n.desc.ID, updatedRegionConfig, - params.p.txn, + params.p.Txn(), params.p.execCfg, params.p.Descriptors(), params.extendedEvalCtx.Tracing.KVTracingEnabled(), @@ -1920,7 +1920,7 @@ func (n *alterDatabaseDropSecondaryRegion) startExec(params runParams) error { params.ctx, n.desc.ID, updatedRegionConfig, - params.p.txn, + params.p.Txn(), params.p.execCfg, params.p.Descriptors(), params.extendedEvalCtx.Tracing.KVTracingEnabled(), @@ -2214,7 +2214,7 @@ func (n *alterDatabaseSetZoneConfigExtensionNode) startExec(params runParams) er params.ctx, n.desc.ID, dbZoneConfig, - params.p.txn, + params.p.Txn(), params.p.execCfg, params.p.Descriptors(), params.extendedEvalCtx.Tracing.KVTracingEnabled(), diff --git a/pkg/sql/alter_index.go b/pkg/sql/alter_index.go index db38c0382375..8e95dce4710e 100644 --- a/pkg/sql/alter_index.go +++ b/pkg/sql/alter_index.go @@ -125,7 +125,7 @@ func (n *alterIndexNode) startExec(params runParams) error { descriptorChanged = true if err := deleteRemovedPartitionZoneConfigs( params.ctx, - params.p.txn, + params.p.InternalSQLTxn(), n.tableDesc, params.p.Descriptors(), n.index.GetID(), diff --git a/pkg/sql/alter_role.go b/pkg/sql/alter_role.go index 624ded1da8c1..55a76daeff31 100644 --- a/pkg/sql/alter_role.go +++ b/pkg/sql/alter_role.go @@ -190,7 +190,7 @@ func (n *alterRoleNode) startExec(params runParams) error { } // Check if role exists. - row, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.QueryRowEx( + row, err := params.p.InternalSQLTxn().QueryRowEx( params.ctx, opName, params.p.txn, @@ -225,10 +225,11 @@ func (n *alterRoleNode) startExec(params runParams) error { if hasPasswordOpt { // Updating PASSWORD is a special case since PASSWORD lives in system.users // while the rest of the role options lives in system.role_options. - rowAffected, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.Exec( + rowAffected, err := params.p.InternalSQLTxn().ExecEx( params.ctx, opName, params.p.txn, + sessiondata.NodeUserSessionDataOverride, `UPDATE system.users SET "hashedPassword" = $2 WHERE username = $1`, n.roleName, hashedPassword, @@ -435,7 +436,7 @@ func (n *alterRoleSetNode) startExec(params runParams) error { var rowsAffected int var internalExecErr error if newSettings == nil { - rowsAffected, internalExecErr = params.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( + rowsAffected, internalExecErr = params.p.InternalSQLTxn().ExecEx( params.ctx, opName, params.p.txn, @@ -445,7 +446,7 @@ func (n *alterRoleSetNode) startExec(params runParams) error { roleName, ) } else { - rowsAffected, internalExecErr = params.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( + rowsAffected, internalExecErr = params.p.InternalSQLTxn().ExecEx( params.ctx, opName, params.p.txn, @@ -550,7 +551,7 @@ func (n *alterRoleSetNode) getRoleName( return false, username.SQLUsername{}, pgerror.Newf(pgcode.InsufficientPrivilege, "cannot edit public role") } // Check if role exists. - row, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.QueryRowEx( + row, err := params.p.InternalSQLTxn().QueryRowEx( params.ctx, opName, params.p.txn, @@ -601,7 +602,7 @@ func (n *alterRoleSetNode) makeNewSettings( `SELECT settings FROM %s WHERE database_id = $1 AND role_name = $2`, sessioninit.DatabaseRoleSettingsTableName, ) - datums, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.QueryRowEx( + datums, err := params.p.InternalSQLTxn().QueryRowEx( params.ctx, opName, params.p.txn, diff --git a/pkg/sql/alter_table.go b/pkg/sql/alter_table.go index dfa09c2d0583..aa1493cf5fa6 100644 --- a/pkg/sql/alter_table.go +++ b/pkg/sql/alter_table.go @@ -21,7 +21,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" @@ -44,7 +43,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/volatility" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/stats" "github.com/cockroachdb/cockroach/pkg/sql/storageparam" "github.com/cockroachdb/cockroach/pkg/sql/storageparam/tablestorageparam" @@ -540,30 +538,28 @@ func (n *alterTableNode) startExec(params runParams) error { "constraint %q in the middle of being dropped", t.Constraint) } if ck := c.AsCheck(); ck != nil { - if err := params.p.WithInternalExecutor(params.ctx, func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor) error { - return validateCheckInTxn(ctx, ¶ms.p.semaCtx, params.p.SessionData(), n.tableDesc, txn, ie, ck.GetExpr()) - }); err != nil { + if err := validateCheckInTxn( + params.ctx, params.p.InternalSQLTxn(), ¶ms.p.semaCtx, + params.p.SessionData(), n.tableDesc, ck.GetExpr(), + ); err != nil { return err } ck.CheckDesc().Validity = descpb.ConstraintValidity_Validated } else if fk := c.AsForeignKey(); fk != nil { - if err := params.p.WithInternalExecutor(params.ctx, func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor) error { - return validateFkInTxn(ctx, n.tableDesc, txn, ie, params.p.descCollection, name) - }); err != nil { + if err := validateFkInTxn( + params.ctx, params.p.InternalSQLTxn(), params.p.descCollection, n.tableDesc, name, + ); err != nil { return err } fk.ForeignKeyDesc().Validity = descpb.ConstraintValidity_Validated } else if uwoi := c.AsUniqueWithoutIndex(); uwoi != nil { - if err := params.p.WithInternalExecutor(params.ctx, func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor) error { - return validateUniqueWithoutIndexConstraintInTxn( - params.ctx, - n.tableDesc, - txn, - ie, - params.p.User(), - name, - ) - }); err != nil { + if err := validateUniqueWithoutIndexConstraintInTxn( + params.ctx, + params.p.InternalSQLTxn(), + n.tableDesc, + params.p.User(), + name, + ); err != nil { return err } uwoi.UniqueWithoutIndexDesc().Validity = descpb.ConstraintValidity_Validated @@ -653,7 +649,7 @@ func (n *alterTableNode) startExec(params runParams) error { descriptorChanged = true if err := deleteRemovedPartitionZoneConfigs( params.ctx, - params.p.txn, + params.p.InternalSQLTxn(), n.tableDesc, params.p.Descriptors(), n.tableDesc.GetPrimaryIndexID(), @@ -1223,7 +1219,7 @@ func injectTableStats( } // First, delete all statistics for the table. - if _ /* rows */, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.Exec( + if _ /* rows */, err := params.p.InternalSQLTxn().Exec( params.ctx, "delete-stats", params.p.Txn(), @@ -1287,8 +1283,7 @@ func insertJSONStatistic( ) error { var ( ctx = params.ctx - ie = params.ExecCfg().InternalExecutor - txn = params.p.Txn() + txn = params.p.InternalSQLTxn() settings = params.ExecCfg().Settings ) @@ -1303,10 +1298,10 @@ func insertJSONStatistic( return pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "statistic for columns %v with collection time %s to insert is partial but cluster version is below 23.1", s.Columns, s.CreatedAt) } - _ /* rows */, err := ie.Exec( + _ /* rows */, err := txn.Exec( ctx, "insert-stats", - txn, + txn.KV(), `INSERT INTO system.table_statistics ( "tableID", "name", @@ -1340,10 +1335,10 @@ func insertJSONStatistic( fullStatisticIDValue = s.FullStatisticID } - _ /* rows */, err := ie.Exec( + _ /* rows */, err := txn.Exec( ctx, "insert-stats", - txn, + txn.KV(), `INSERT INTO system.table_statistics ( "tableID", "name", @@ -1770,13 +1765,12 @@ func handleTTLStorageParamChange( // Update cron schedule if required. if before.DeletionCron != after.DeletionCron { - env := JobSchedulerEnv(params.ExecCfg()) - s, err := jobs.LoadScheduledJob( + env := JobSchedulerEnv(params.ExecCfg().JobsKnobs()) + schedules := jobs.ScheduledJobTxn(params.p.InternalSQLTxn()) + s, err := schedules.Load( params.ctx, env, after.ScheduleID, - params.ExecCfg().InternalExecutor, - params.p.txn, ) if err != nil { return false, err @@ -1784,7 +1778,7 @@ func handleTTLStorageParamChange( if err := s.SetSchedule(after.DeletionCronOrDefault()); err != nil { return false, err } - if err := s.Update(params.ctx, params.ExecCfg().InternalExecutor, params.p.txn); err != nil { + if err := schedules.Update(params.ctx, s); err != nil { return false, err } } diff --git a/pkg/sql/alter_table_locality.go b/pkg/sql/alter_table_locality.go index 1bf7af7ce9f5..8a74370837d8 100644 --- a/pkg/sql/alter_table_locality.go +++ b/pkg/sql/alter_table_locality.go @@ -579,7 +579,7 @@ func (n *alterTableSetLocalityNode) writeNewTableLocalityAndZoneConfig( // Update the zone configuration. if err := ApplyZoneConfigForMultiRegionTable( params.ctx, - params.p.txn, + params.p.Txn(), params.p.ExecCfg(), params.p.extendedEvalCtx.Tracing.KVTracingEnabled(), params.p.Descriptors(), @@ -640,12 +640,12 @@ func (p *planner) alterTableDescLocalityToGlobal( // if it existed before the locality switch. func setNewLocalityConfig( ctx context.Context, - desc *tabledesc.Mutable, txn *kv.Txn, + descsCol *descs.Collection, b *kv.Batch, + desc *tabledesc.Mutable, config catpb.LocalityConfig, kvTrace bool, - descsCol *descs.Collection, ) error { getMultiRegionTypeDesc := func() (*typedesc.Mutable, error) { dbDesc, err := descsCol.ByID(txn).WithoutNonPublic().Get().Database(ctx, desc.GetParentID()) diff --git a/pkg/sql/authorization.go b/pkg/sql/authorization.go index b4ea87d034f7..6be9510f149e 100644 --- a/pkg/sql/authorization.go +++ b/pkg/sql/authorization.go @@ -15,7 +15,6 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/catalog" @@ -27,6 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/memsize" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -36,7 +36,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sessioninit" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/syntheticprivilege" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -445,9 +444,7 @@ func (p *planner) MemberOfWithAdminOption( return MemberOfWithAdminOption( ctx, p.execCfg, - p.ExecCfg().InternalExecutor, - p.Descriptors(), - p.Txn(), + p.InternalSQLTxn(), member, ) } @@ -457,12 +454,7 @@ func (p *planner) MemberOfWithAdminOption( // The "isAdmin" flag applies to both direct and indirect members. // Requires a valid transaction to be open. func MemberOfWithAdminOption( - ctx context.Context, - execCfg *ExecutorConfig, - ie sqlutil.InternalExecutor, - descsCol *descs.Collection, - txn *kv.Txn, - member username.SQLUsername, + ctx context.Context, execCfg *ExecutorConfig, txn descs.Txn, member username.SQLUsername, ) (map[username.SQLUsername]bool, error) { if txn == nil { return nil, errors.AssertionFailedf("cannot use MemberOfWithAdminoption without a txn") @@ -471,14 +463,16 @@ func MemberOfWithAdminOption( roleMembersCache := execCfg.RoleMemberCache // Lookup table version. - _, tableDesc, err := descs.PrefixAndTable(ctx, descsCol.ByNameWithLeased(txn).Get(), &roleMembersTableName) + _, tableDesc, err := descs.PrefixAndTable( + ctx, txn.Descriptors().ByNameWithLeased(txn.KV()).Get(), &roleMembersTableName, + ) if err != nil { return nil, err } tableVersion := tableDesc.GetVersion() if tableDesc.IsUncommittedVersion() { - return resolveMemberOfWithAdminOption(ctx, member, ie, txn, useSingleQueryForRoleMembershipCache.Get(execCfg.SV())) + return resolveMemberOfWithAdminOption(ctx, member, txn, useSingleQueryForRoleMembershipCache.Get(execCfg.SV())) } // Check version and maybe clear cache while holding the mutex. @@ -519,7 +513,7 @@ func MemberOfWithAdminOption( }, func(ctx context.Context) (interface{}, error) { return resolveMemberOfWithAdminOption( - ctx, member, ie, txn, + ctx, member, txn, useSingleQueryForRoleMembershipCache.Get(execCfg.SV()), ) }) @@ -570,11 +564,7 @@ var useSingleQueryForRoleMembershipCache = settings.RegisterBoolSetting( // resolveMemberOfWithAdminOption performs the actual recursive role membership lookup. func resolveMemberOfWithAdminOption( - ctx context.Context, - member username.SQLUsername, - ie sqlutil.InternalExecutor, - txn *kv.Txn, - singleQuery bool, + ctx context.Context, member username.SQLUsername, txn isql.Txn, singleQuery bool, ) (map[username.SQLUsername]bool, error) { ret := map[username.SQLUsername]bool{} if singleQuery { @@ -583,7 +573,7 @@ func resolveMemberOfWithAdminOption( isAdmin bool } memberToRoles := make(map[username.SQLUsername][]membership) - if err := forEachRoleMembership(ctx, ie, txn, func(role, member username.SQLUsername, isAdmin bool) error { + if err := forEachRoleMembership(ctx, txn, func(role, member username.SQLUsername, isAdmin bool) error { memberToRoles[member] = append(memberToRoles[member], membership{role, isAdmin}) return nil }); err != nil { @@ -622,8 +612,10 @@ func resolveMemberOfWithAdminOption( } visited[m] = struct{}{} - it, err := ie.QueryIterator( - ctx, "expand-roles", txn, lookupRolesStmt, m.Normalized(), + it, err := txn.QueryIteratorEx( + ctx, "expand-roles", txn.KV(), sessiondata.InternalExecutorOverride{ + User: username.NodeUserName(), + }, lookupRolesStmt, m.Normalized(), ) if err != nil { return nil, err @@ -673,7 +665,7 @@ func (p *planner) HasRoleOption(ctx context.Context, roleOption roleoption.Optio return true, nil } - hasRolePrivilege, err := p.ExecCfg().InternalExecutor.QueryRowEx( + hasRolePrivilege, err := p.InternalSQLTxn().QueryRowEx( ctx, "has-role-option", p.Txn(), sessiondata.RootUserSessionDataOverride, fmt.Sprintf( @@ -784,7 +776,7 @@ func (p *planner) checkCanAlterToNewOwner( ctx context.Context, desc catalog.MutableDescriptor, newOwner username.SQLUsername, ) error { // Make sure the newOwner exists. - roleExists, err := RoleExists(ctx, p.ExecCfg().InternalExecutor, p.Txn(), newOwner) + roleExists, err := RoleExists(ctx, p.InternalSQLTxn(), newOwner) if err != nil { return err } diff --git a/pkg/sql/authorization_test.go b/pkg/sql/authorization_test.go index 5e7b357cb0e0..a92c76cba196 100644 --- a/pkg/sql/authorization_test.go +++ b/pkg/sql/authorization_test.go @@ -15,11 +15,9 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -39,44 +37,45 @@ func TestCheckAnyPrivilegeForNodeUser(t *testing.T) { require.NotNil(t, ts.InternalExecutor()) - ief := ts.InternalExecutorFactory().(descs.TxnManager) + ief := ts.InternalDB().(descs.DB) - if err := ief.DescsTxnWithExecutor(ctx, s.DB(), nil /* sessionData */, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor, + if err := ief.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { - row, err := ie.QueryRowEx( - ctx, "get-all-databases", txn, sessiondata.NodeUserSessionDataOverride, + + row, err := txn.QueryRowEx( + ctx, "get-all-databases", txn.KV(), sessiondata.NodeUserSessionDataOverride, "SELECT count(1) FROM crdb_internal.databases", ) require.NoError(t, err) // 3 databases (system, defaultdb, postgres). require.Equal(t, row.String(), "(3)") - _, err = ie.ExecEx(ctx, "create-database1", txn, sessiondata.RootUserSessionDataOverride, + _, err = txn.ExecEx(ctx, "create-database1", txn.KV(), sessiondata.RootUserSessionDataOverride, "CREATE DATABASE test1") require.NoError(t, err) - _, err = ie.ExecEx(ctx, "create-database2", txn, sessiondata.RootUserSessionDataOverride, + _, err = txn.ExecEx(ctx, "create-database2", txn.KV(), sessiondata.RootUserSessionDataOverride, "CREATE DATABASE test2") require.NoError(t, err) // Revoke CONNECT on all non-system databases and ensure that when querying // with node, we can still see all the databases. - _, err = ie.ExecEx(ctx, "revoke-privileges", txn, sessiondata.RootUserSessionDataOverride, + _, err = txn.ExecEx(ctx, "revoke-privileges", txn.KV(), sessiondata.RootUserSessionDataOverride, "REVOKE CONNECT ON DATABASE test1 FROM public") require.NoError(t, err) - _, err = ie.ExecEx(ctx, "revoke-privileges", txn, sessiondata.RootUserSessionDataOverride, + _, err = txn.ExecEx(ctx, "revoke-privileges", txn.KV(), sessiondata.RootUserSessionDataOverride, "REVOKE CONNECT ON DATABASE test2 FROM public") require.NoError(t, err) - _, err = ie.ExecEx(ctx, "revoke-privileges", txn, sessiondata.RootUserSessionDataOverride, + _, err = txn.ExecEx(ctx, "revoke-privileges", txn.KV(), sessiondata.RootUserSessionDataOverride, "REVOKE CONNECT ON DATABASE defaultdb FROM public") require.NoError(t, err) - _, err = ie.ExecEx(ctx, "revoke-privileges", txn, sessiondata.RootUserSessionDataOverride, + _, err = txn.ExecEx(ctx, "revoke-privileges", txn.KV(), sessiondata.RootUserSessionDataOverride, "REVOKE CONNECT ON DATABASE postgres FROM public") require.NoError(t, err) - row, err = ie.QueryRowEx( - ctx, "get-all-databases", txn, sessiondata.NodeUserSessionDataOverride, + row, err = txn.QueryRowEx( + ctx, "get-all-databases", txn.KV(), sessiondata.NodeUserSessionDataOverride, "SELECT count(1) FROM crdb_internal.databases", ) require.NoError(t, err) diff --git a/pkg/sql/backfill.go b/pkg/sql/backfill.go index 5062fd67578d..c7218c34c516 100644 --- a/pkg/sql/backfill.go +++ b/pkg/sql/backfill.go @@ -33,6 +33,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/row" @@ -43,7 +44,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -139,7 +139,7 @@ func (sc *SchemaChanger) getChunkSize(chunkSize int64) int64 { } // scTxnFn is the type of functions that operates using transactions in the backfiller. -type scTxnFn func(ctx context.Context, txn *kv.Txn, evalCtx *extendedEvalContext, ie sqlutil.InternalExecutor) error +type scTxnFn func(ctx context.Context, txn isql.Txn, evalCtx *extendedEvalContext) error // historicalTxnRunner is the type of the callback used by the various // helper functions to run checks at a fixed timestamp (logically, at @@ -151,14 +151,13 @@ func (sc *SchemaChanger) makeFixedTimestampRunner(readAsOf hlc.Timestamp) histor runner := func(ctx context.Context, retryable scTxnFn) error { return sc.fixedTimestampTxnWithExecutor(ctx, readAsOf, func( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, sd *sessiondata.SessionData, descriptors *descs.Collection, - ie sqlutil.InternalExecutor, ) error { // We need to re-create the evalCtx since the txn may retry. evalCtx := createSchemaChangeEvalCtx(ctx, sc.execCfg, sd, readAsOf, descriptors) - return retryable(ctx, txn, &evalCtx, ie) + return retryable(ctx, txn, &evalCtx) }) } return runner @@ -173,12 +172,11 @@ func (sc *SchemaChanger) makeFixedTimestampInternalExecRunner( ) error { return sc.fixedTimestampTxnWithExecutor(ctx, readAsOf, func( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, _ *sessiondata.SessionData, descriptors *descs.Collection, - ie sqlutil.InternalExecutor, ) error { - return retryable(ctx, txn, ie, descriptors) + return retryable(ctx, txn, descriptors) }) }) } @@ -186,11 +184,11 @@ func (sc *SchemaChanger) makeFixedTimestampInternalExecRunner( func (sc *SchemaChanger) fixedTimestampTxn( ctx context.Context, readAsOf hlc.Timestamp, - retryable func(ctx context.Context, txn *kv.Txn, descriptors *descs.Collection) error, + retryable func(ctx context.Context, txn isql.Txn, descriptors *descs.Collection) error, ) error { return sc.fixedTimestampTxnWithExecutor(ctx, readAsOf, func( - ctx context.Context, txn *kv.Txn, _ *sessiondata.SessionData, - descriptors *descs.Collection, _ sqlutil.InternalExecutor, + ctx context.Context, txn isql.Txn, _ *sessiondata.SessionData, + descriptors *descs.Collection, ) error { return retryable(ctx, txn, descriptors) }) @@ -201,20 +199,18 @@ func (sc *SchemaChanger) fixedTimestampTxnWithExecutor( readAsOf hlc.Timestamp, retryable func( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, sd *sessiondata.SessionData, descriptors *descs.Collection, - ie sqlutil.InternalExecutor, ) error, ) error { - return sc.txnWithExecutor(ctx, func( - ctx context.Context, txn *kv.Txn, sd *sessiondata.SessionData, - descriptors *descs.Collection, ie sqlutil.InternalExecutor, + return sc.txn(ctx, func( + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { - if err := txn.SetFixedTimestamp(ctx, readAsOf); err != nil { + if err := txn.KV().SetFixedTimestamp(ctx, readAsOf); err != nil { return err } - return retryable(ctx, txn, sd, descriptors, ie) + return retryable(ctx, txn, txn.SessionData(), descriptors) }) } @@ -449,12 +445,12 @@ func (sc *SchemaChanger) dropConstraints( } // Create update closure for the table and all other tables with backreferences. - if err := sc.txn(ctx, func(ctx context.Context, txn *kv.Txn, descsCol *descs.Collection) error { - scTable, err := descsCol.MutableByID(txn).Table(ctx, sc.descID) + if err := sc.txn(ctx, func(ctx context.Context, txn isql.Txn, descsCol *descs.Collection) error { + scTable, err := descsCol.MutableByID(txn.KV()).Table(ctx, sc.descID) if err != nil { return err } - b := txn.NewBatch() + b := txn.KV().NewBatch() for _, constraint := range constraints { if constraint.AsCheck() != nil { found := false @@ -480,7 +476,7 @@ func (sc *SchemaChanger) dropConstraints( if def.Name != constraint.GetName() { continue } - backrefTable, err := descsCol.MutableByID(txn).Table(ctx, fk.GetReferencedTableID()) + backrefTable, err := descsCol.MutableByID(txn.KV()).Table(ctx, fk.GetReferencedTableID()) if err != nil { return err } @@ -533,7 +529,7 @@ func (sc *SchemaChanger) dropConstraints( ); err != nil { return err } - return txn.Run(ctx, b) + return txn.KV().Run(ctx, b) }); err != nil { return nil, err } @@ -541,13 +537,13 @@ func (sc *SchemaChanger) dropConstraints( log.Info(ctx, "finished dropping constraints") tableDescs := make(map[descpb.ID]catalog.TableDescriptor, len(fksByBackrefTable)+1) if err := sc.txn(ctx, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) (err error) { - if tableDescs[sc.descID], err = descsCol.ByIDWithLeased(txn).WithoutNonPublic().Get().Table(ctx, sc.descID); err != nil { + if tableDescs[sc.descID], err = descsCol.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Table(ctx, sc.descID); err != nil { return err } for id := range fksByBackrefTable { - desc, err := descsCol.ByIDWithLeased(txn).WithoutOffline().Get().Table(ctx, id) + desc, err := descsCol.ByIDWithLeased(txn.KV()).WithoutOffline().Get().Table(ctx, id) if err != nil { return err } @@ -584,14 +580,14 @@ func (sc *SchemaChanger) addConstraints( // Create update closure for the table and all other tables with backreferences if err := sc.txn(ctx, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) error { - scTable, err := descsCol.MutableByID(txn).Table(ctx, sc.descID) + scTable, err := descsCol.MutableByID(txn.KV()).Table(ctx, sc.descID) if err != nil { return err } - b := txn.NewBatch() + b := txn.KV().NewBatch() for _, constraint := range constraints { if ck := constraint.AsCheck(); ck != nil { found := false @@ -635,7 +631,7 @@ func (sc *SchemaChanger) addConstraints( } if !foundExisting { scTable.OutboundFKs = append(scTable.OutboundFKs, *fk.ForeignKeyDesc()) - backrefTable, err := descsCol.MutableByID(txn).Table(ctx, fk.GetReferencedTableID()) + backrefTable, err := descsCol.MutableByID(txn.KV()).Table(ctx, fk.GetReferencedTableID()) if err != nil { return err } @@ -694,7 +690,7 @@ func (sc *SchemaChanger) addConstraints( ); err != nil { return err } - return txn.Run(ctx, b) + return txn.KV().Run(ctx, b) }); err != nil { return err } @@ -730,9 +726,9 @@ func (sc *SchemaChanger) validateConstraints( var tableDesc catalog.TableDescriptor if err := sc.fixedTimestampTxn(ctx, readAsOf, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { - tableDesc, err = descriptors.ByID(txn).WithoutNonPublic().Get().Table(ctx, sc.descID) + tableDesc, err = descriptors.ByID(txn.KV()).WithoutNonPublic().Get().Table(ctx, sc.descID) return err }); err != nil { return err @@ -759,14 +755,14 @@ func (sc *SchemaChanger) validateConstraints( } desc := descI.(*tabledesc.Mutable) // Each check operates at the historical timestamp. - return runHistoricalTxn(ctx, func(ctx context.Context, txn *kv.Txn, evalCtx *extendedEvalContext, ie sqlutil.InternalExecutor) error { + return runHistoricalTxn(ctx, func(ctx context.Context, txn isql.Txn, evalCtx *extendedEvalContext) error { // If the constraint is a check constraint that fails validation, we // need a semaContext set up that can resolve types in order to pretty // print the check expression back to the user. - evalCtx.Txn = txn + evalCtx.Txn = txn.KV() // Use the DistSQLTypeResolver because we need to resolve types by ID. collection := evalCtx.Descs - resolver := descs.NewDistSQLTypeResolver(collection, txn) + resolver := descs.NewDistSQLTypeResolver(collection, txn.KV()) semaCtx := tree.MakeSemaContext() semaCtx.TypeResolver = &resolver // TODO (rohany): When to release this? As of now this is only going to get released @@ -774,7 +770,7 @@ func (sc *SchemaChanger) validateConstraints( defer func() { collection.ReleaseAll(ctx) }() if ck := c.AsCheck(); ck != nil { if err := validateCheckInTxn( - ctx, &semaCtx, evalCtx.SessionData(), desc, txn, ie, ck.GetExpr(), + ctx, txn, &semaCtx, evalCtx.SessionData(), desc, ck.GetExpr(), ); err != nil { if ck.IsNotNullColumnConstraint() { // TODO (lucy): This should distinguish between constraint @@ -785,11 +781,11 @@ func (sc *SchemaChanger) validateConstraints( return err } } else if c.AsForeignKey() != nil { - if err := validateFkInTxn(ctx, desc, txn, ie, collection, c.GetName()); err != nil { + if err := validateFkInTxn(ctx, txn, collection, desc, c.GetName()); err != nil { return err } } else if c.AsUniqueWithoutIndex() != nil { - if err := validateUniqueWithoutIndexConstraintInTxn(ctx, desc, txn, ie, evalCtx.SessionData().User(), c.GetName()); err != nil { + if err := validateUniqueWithoutIndexConstraintInTxn(ctx, txn, desc, evalCtx.SessionData().User(), c.GetName()); err != nil { return err } } else { @@ -924,9 +920,13 @@ func (sc *SchemaChanger) distIndexBackfill( var todoSpans []roachpb.Span var mutationIdx int - if err := DescsTxn(ctx, sc.execCfg, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) (err error) { + if err := sc.txn(ctx, func( + ctx context.Context, txn isql.Txn, col *descs.Collection, + ) (err error) { todoSpans, _, mutationIdx, err = rowexec.GetResumeSpans( - ctx, sc.jobRegistry, txn, sc.execCfg.Codec, col, sc.descID, sc.mutationID, filter) + ctx, sc.jobRegistry, txn, sc.execCfg.Codec, col, sc.descID, + sc.mutationID, filter, + ) return err }); err != nil { return err @@ -940,7 +940,9 @@ func (sc *SchemaChanger) distIndexBackfill( writeAsOf := sc.job.Details().(jobspb.SchemaChangeDetails).WriteTimestamp if writeAsOf.IsEmpty() { - if err := sc.job.RunningStatus(ctx, nil /* txn */, func(_ context.Context, _ jobspb.Details) (jobs.RunningStatus, error) { + if err := sc.job.NoTxn().RunningStatus(ctx, func( + _ context.Context, _ jobspb.Details, + ) (jobs.RunningStatus, error) { return jobs.RunningStatus("scanning target index for in-progress transactions"), nil }); err != nil { return errors.Wrapf(err, "failed to update running status of job %d", errors.Safe(sc.job.ID())) @@ -957,13 +959,13 @@ func (sc *SchemaChanger) distIndexBackfill( const pageSize = 10000 noop := func(_ []kv.KeyValue) error { return nil } if err := sc.fixedTimestampTxn(ctx, writeAsOf, func( - ctx context.Context, txn *kv.Txn, _ *descs.Collection, + ctx context.Context, txn isql.Txn, _ *descs.Collection, ) error { for _, span := range targetSpans { // TODO(dt): a Count() request would be nice here if the target isn't // empty, since we don't need to drag all the results back just to // then ignore them -- we just need the iteration on the far end. - if err := txn.Iterate(ctx, span.Key, span.EndKey, pageSize, noop); err != nil { + if err := txn.KV().Iterate(ctx, span.Key, span.EndKey, pageSize, noop); err != nil { return err } } @@ -972,14 +974,18 @@ func (sc *SchemaChanger) distIndexBackfill( return err } log.Infof(ctx, "persisting target safe write time %v...", writeAsOf) - if err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := sc.txn(ctx, func( + ctx context.Context, txn isql.Txn, _ *descs.Collection, + ) error { details := sc.job.Details().(jobspb.SchemaChangeDetails) details.WriteTimestamp = writeAsOf - return sc.job.SetDetails(ctx, txn, details) + return sc.job.WithTxn(txn).SetDetails(ctx, details) }); err != nil { return err } - if err := sc.job.RunningStatus(ctx, nil /* txn */, func(_ context.Context, _ jobspb.Details) (jobs.RunningStatus, error) { + if err := sc.job.NoTxn().RunningStatus(ctx, func( + _ context.Context, _ jobspb.Details, + ) (jobs.RunningStatus, error) { return RunningStatusBackfill, nil }); err != nil { return errors.Wrapf(err, "failed to update running status of job %d", errors.Safe(sc.job.ID())) @@ -996,7 +1002,7 @@ func (sc *SchemaChanger) distIndexBackfill( // The txn is used to fetch a tableDesc, partition the spans and set the // evalCtx ts all of which is during planning of the DistSQL flow. if err := sc.txn(ctx, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { // It is okay to release the lease on the descriptor before running the @@ -1010,14 +1016,14 @@ func (sc *SchemaChanger) distIndexBackfill( // clear what this buys us in terms of checking the descriptors validity. // Thus, in favor of simpler code and no correctness concerns we release // the lease once the flow is planned. - tableDesc, err := sc.getTableVersion(ctx, txn, descriptors, version) + tableDesc, err := sc.getTableVersion(ctx, txn.KV(), descriptors, version) if err != nil { return err } sd := NewFakeSessionData(sc.execCfg.SV()) - evalCtx = createSchemaChangeEvalCtx(ctx, sc.execCfg, sd, txn.ReadTimestamp(), descriptors) + evalCtx = createSchemaChangeEvalCtx(ctx, sc.execCfg, sd, txn.KV().ReadTimestamp(), descriptors) planCtx = sc.distSQLPlanner.NewPlanningCtx(ctx, &evalCtx, nil, /* planner */ - txn, DistributionTypeSystemTenantOnly) + txn.KV(), DistributionTypeSystemTenantOnly) indexBatchSize := indexBackfillBatchSize.Get(&sc.execCfg.Settings.SV) chunkSize := sc.getChunkSize(indexBatchSize) spec, err := initIndexBackfillerSpec(*tableDesc.TableDesc(), writeAsOf, readAsOf, writeAtRequestTimestamp, chunkSize, addedIndexes) @@ -1100,14 +1106,14 @@ func (sc *SchemaChanger) distIndexBackfill( if updatedTodoSpans == nil { return nil } - nRanges, err := numRangesInSpans(ctx, sc.db, sc.distSQLPlanner, updatedTodoSpans) + nRanges, err := numRangesInSpans(ctx, sc.db.KV(), sc.distSQLPlanner, updatedTodoSpans) if err != nil { return err } if origNRanges == -1 { origNRanges = nRanges } - return sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + return sc.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { // No processor has returned completed spans yet. if nRanges < origNRanges { fractionRangesFinished := float32(origNRanges-nRanges) / float32(origNRanges) @@ -1115,8 +1121,9 @@ func (sc *SchemaChanger) distIndexBackfill( if err != nil { return err } - if err := sc.job.FractionProgressed(ctx, txn, - jobs.FractionUpdater(fractionCompleted)); err != nil { + if err := sc.job.WithTxn(txn).FractionProgressed( + ctx, jobs.FractionUpdater(fractionCompleted), + ); err != nil { return jobs.SimplifyInvalidStatusError(err) } } @@ -1131,7 +1138,7 @@ func (sc *SchemaChanger) distIndexBackfill( var updateJobMu syncutil.Mutex updateJobDetails = func() error { updatedTodoSpans := getTodoSpansForUpdate() - return sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + return sc.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { updateJobMu.Lock() defer updateJobMu.Unlock() // No processor has returned completed spans yet. @@ -1253,7 +1260,7 @@ func (sc *SchemaChanger) distColumnBackfill( // schema change state machine or from a previous backfill attempt, // we scale that fraction of ranges completed by the remaining fraction // of the job's progress bar. - nRanges, err := numRangesInSpans(ctx, sc.db, sc.distSQLPlanner, todoSpans) + nRanges, err := numRangesInSpans(ctx, sc.db.KV(), sc.distSQLPlanner, todoSpans) if err != nil { return err } @@ -1269,14 +1276,14 @@ func (sc *SchemaChanger) distColumnBackfill( // transaction being created as a part of this update. We want this // update operation to be short and to not be coupled to any other // backfill work, which may take much longer. - return sc.job.FractionProgressed( - ctx, nil /* txn */, jobs.FractionUpdater(fractionCompleted), + return sc.job.NoTxn().FractionProgressed( + ctx, jobs.FractionUpdater(fractionCompleted), ) } readAsOf := sc.clock.Now() var mutationIdx int - if err := DescsTxn(ctx, sc.execCfg, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) (err error) { + if err := DescsTxn(ctx, sc.execCfg, func(ctx context.Context, txn isql.Txn, col *descs.Collection) (err error) { todoSpans, _, mutationIdx, err = rowexec.GetResumeSpans( ctx, sc.jobRegistry, txn, sc.execCfg.Codec, col, sc.descID, sc.mutationID, filter) return err @@ -1294,10 +1301,10 @@ func (sc *SchemaChanger) distColumnBackfill( // variable and assign to todoSpans after committing. var updatedTodoSpans []roachpb.Span if err := sc.txn(ctx, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { updatedTodoSpans = todoSpans - tableDesc, err := sc.getTableVersion(ctx, txn, descriptors, version) + tableDesc, err := sc.getTableVersion(ctx, txn.KV(), descriptors, version) if err != nil { return err } @@ -1310,7 +1317,7 @@ func (sc *SchemaChanger) distColumnBackfill( } cbw := MetadataCallbackWriter{rowResultWriter: &errOnlyResultWriter{}, fn: metaFn} sd := NewFakeSessionData(sc.execCfg.SV()) - evalCtx := createSchemaChangeEvalCtx(ctx, sc.execCfg, sd, txn.ReadTimestamp(), descriptors) + evalCtx := createSchemaChangeEvalCtx(ctx, sc.execCfg, sd, txn.KV().ReadTimestamp(), descriptors) recv := MakeDistSQLReceiver( ctx, &cbw, @@ -1322,7 +1329,7 @@ func (sc *SchemaChanger) distColumnBackfill( ) defer recv.Release() - planCtx := sc.distSQLPlanner.NewPlanningCtx(ctx, &evalCtx, nil /* planner */, txn, + planCtx := sc.distSQLPlanner.NewPlanningCtx(ctx, &evalCtx, nil /* planner */, txn.KV(), DistributionTypeSystemTenantOnly) spec, err := initColumnBackfillerSpec(tableDesc, duration, chunkSize, backfillUpdateChunkSizeThresholdBytes, readAsOf) if err != nil { @@ -1347,7 +1354,7 @@ func (sc *SchemaChanger) distColumnBackfill( // Record what is left to do for the job. // TODO(spaskob): Execute this at a regular cadence. - if err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := sc.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { return rowexec.SetResumeSpansInJob(ctx, todoSpans, mutationIdx, txn, sc.job) }); err != nil { return err @@ -1365,9 +1372,9 @@ func (sc *SchemaChanger) distColumnBackfill( func (sc *SchemaChanger) updateJobRunningStatus( ctx context.Context, status jobs.RunningStatus, ) (tableDesc catalog.TableDescriptor, err error) { - err = DescsTxn(ctx, sc.execCfg, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) (err error) { + err = DescsTxn(ctx, sc.execCfg, func(ctx context.Context, txn isql.Txn, col *descs.Collection) (err error) { // Read table descriptor without holding a lease. - tableDesc, err = col.ByID(txn).Get().Table(ctx, sc.descID) + tableDesc, err = col.ByID(txn.KV()).Get().Table(ctx, sc.descID) if err != nil { return err } @@ -1388,8 +1395,9 @@ func (sc *SchemaChanger) updateJobRunningStatus( } } if updateJobRunningProgress && !tableDesc.Dropped() { - if err := sc.job.RunningStatus(ctx, txn, func( - ctx context.Context, details jobspb.Details) (jobs.RunningStatus, error) { + if err := sc.job.WithTxn(txn).RunningStatus(ctx, func( + ctx context.Context, details jobspb.Details, + ) (jobs.RunningStatus, error) { return status, nil }); err != nil { return errors.Wrapf(err, "failed to update running status of job %d", errors.Safe(sc.job.ID())) @@ -1423,9 +1431,9 @@ func (sc *SchemaChanger) validateIndexes(ctx context.Context) error { readAsOf := sc.clock.Now() var tableDesc catalog.TableDescriptor if err := sc.fixedTimestampTxn(ctx, readAsOf, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) (err error) { - tableDesc, err = descriptors.ByID(txn).WithoutNonPublic().Get().Table(ctx, sc.descID) + tableDesc, err = descriptors.ByID(txn.KV()).WithoutNonPublic().Get().Table(ctx, sc.descID) return err }); err != nil { return err @@ -1521,10 +1529,10 @@ func ValidateConstraint( // The check operates at the historical timestamp. return runHistoricalTxn.Exec(ctx, func( - ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { // Use a schema resolver because we need to resolve types by ID and table by name. - resolver := NewSkippingCacheSchemaResolver(descriptors, sessiondata.NewStack(sessionData), txn, nil /* authAccessor */) + resolver := NewSkippingCacheSchemaResolver(descriptors, sessiondata.NewStack(sessionData), txn.KV(), nil /* authAccessor */) semaCtx := tree.MakeSemaContext() semaCtx.TypeResolver = resolver semaCtx.TableNameResolver = resolver @@ -1533,36 +1541,43 @@ func ValidateConstraint( switch catalog.GetConstraintType(constraint) { case catconstants.ConstraintTypeCheck: ck := constraint.AsCheck() - return ie.WithSyntheticDescriptors( + return txn.WithSyntheticDescriptors( []catalog.Descriptor{tableDesc}, func() error { return validateCheckExpr(ctx, &semaCtx, txn, sessionData, ck.GetExpr(), - tableDesc.(*tabledesc.Mutable), ie, indexIDForValidation) + tableDesc.(*tabledesc.Mutable), indexIDForValidation) }, ) case catconstants.ConstraintTypeFK: fk := constraint.AsForeignKey() - targetTable, err := descriptors.ByID(txn).Get().Table(ctx, fk.GetReferencedTableID()) + targetTable, err := descriptors.ByID(txn.KV()).Get().Table(ctx, fk.GetReferencedTableID()) if err != nil { return err } if targetTable.GetID() == tableDesc.GetID() { targetTable = tableDesc } - return ie.WithSyntheticDescriptors( + return txn.WithSyntheticDescriptors( []catalog.Descriptor{tableDesc}, func() error { - return validateForeignKey(ctx, tableDesc.(*tabledesc.Mutable), targetTable, fk.ForeignKeyDesc(), - indexIDForValidation, txn, ie) + return validateForeignKey(ctx, txn, tableDesc.(*tabledesc.Mutable), targetTable, fk.ForeignKeyDesc(), + indexIDForValidation) }, ) case catconstants.ConstraintTypeUniqueWithoutIndex: uwi := constraint.AsUniqueWithoutIndex() - return ie.WithSyntheticDescriptors( + return txn.WithSyntheticDescriptors( []catalog.Descriptor{tableDesc}, func() error { - return validateUniqueConstraint(ctx, tableDesc, uwi.GetName(), uwi.CollectKeyColumnIDs().Ordered(), - uwi.GetPredicate(), indexIDForValidation, ie, txn, sessionData.User(), false) + return validateUniqueConstraint( + ctx, tableDesc, uwi.GetName(), + uwi.CollectKeyColumnIDs().Ordered(), + uwi.GetPredicate(), + indexIDForValidation, + txn, + sessionData.User(), + false, /* preExisting */ + ) }, ) default: @@ -1621,10 +1636,10 @@ func ValidateInvertedIndexes( key := span.Key endKey := span.EndKey if err := runHistoricalTxn.Exec(ctx, func( - ctx context.Context, txn *kv.Txn, _ sqlutil.InternalExecutor, _ *descs.Collection, + ctx context.Context, txn isql.Txn, _ *descs.Collection, ) error { for { - kvs, err := txn.Scan(ctx, key, endKey, 1000000) + kvs, err := txn.KV().Scan(ctx, key, endKey, 1000000) if err != nil { return err } @@ -1730,7 +1745,7 @@ func countExpectedRowsForInvertedIndex( var expectedCount int64 if err := runHistoricalTxn.Exec(ctx, func( - ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, _ *descs.Collection, + ctx context.Context, txn isql.Txn, _ *descs.Collection, ) error { var stmt string geoConfig := idx.GetGeoConfig() @@ -1750,8 +1765,8 @@ func countExpectedRowsForInvertedIndex( if idx.IsPartial() { stmt = fmt.Sprintf(`%s WHERE %s`, stmt, idx.GetPredicate()) } - return ie.WithSyntheticDescriptors([]catalog.Descriptor{desc}, func() error { - row, err := ie.QueryRowEx(ctx, "verify-inverted-idx-count", txn, execOverride, stmt) + return txn.WithSyntheticDescriptors([]catalog.Descriptor{desc}, func() error { + row, err := txn.QueryRowEx(ctx, "verify-inverted-idx-count", txn.KV(), execOverride, stmt) if err != nil { return err } @@ -1916,7 +1931,7 @@ func populateExpectedCounts( } var tableRowCount int64 if err := runHistoricalTxn.Exec(ctx, func( - ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, _ *descs.Collection, + ctx context.Context, txn isql.Txn, _ *descs.Collection, ) error { var s strings.Builder for _, idx := range indexes { @@ -1932,8 +1947,8 @@ func populateExpectedCounts( // query plan that uses the indexes being backfilled. query := fmt.Sprintf(`SELECT count(1)%s FROM [%d AS t]@[%d]`, partialIndexCounts, desc.GetID(), desc.GetPrimaryIndexID()) - return ie.WithSyntheticDescriptors([]catalog.Descriptor{desc}, func() error { - cnt, err := ie.QueryRowEx(ctx, "VERIFY INDEX", txn, execOverride, query) + return txn.WithSyntheticDescriptors([]catalog.Descriptor{desc}, func() error { + cnt, err := txn.QueryRowEx(ctx, "VERIFY INDEX", txn.KV(), execOverride, query) if err != nil { return err } @@ -2029,7 +2044,7 @@ func countIndexRowsAndMaybeCheckUniqueness( // Retrieve the row count in the index. var idxLen int64 if err := runHistoricalTxn.Exec(ctx, func( - ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, _ *descs.Collection, + ctx context.Context, txn isql.Txn, _ *descs.Collection, ) error { query := fmt.Sprintf(`SELECT count(1) FROM [%d AS t]@[%d]`, desc.GetID(), idx.GetID()) // If the index is a partial index the predicate must be added @@ -2037,8 +2052,8 @@ func countIndexRowsAndMaybeCheckUniqueness( if idx.IsPartial() { query = fmt.Sprintf(`%s WHERE %s`, query, idx.GetPredicate()) } - return ie.WithSyntheticDescriptors([]catalog.Descriptor{desc}, func() error { - row, err := ie.QueryRowEx(ctx, "verify-idx-count", txn, execOverride, query) + return txn.WithSyntheticDescriptors([]catalog.Descriptor{desc}, func() error { + row, err := txn.QueryRowEx(ctx, "verify-idx-count", txn.KV(), execOverride, query) if err != nil { return err } @@ -2057,7 +2072,6 @@ func countIndexRowsAndMaybeCheckUniqueness( idx.IndexDesc().KeyColumnIDs[idx.ImplicitPartitioningColumnCount():], idx.GetPredicate(), 0, /* indexIDForValidation */ - ie, txn, username.NodeUserName(), false, /* preExisting */ @@ -2167,9 +2181,9 @@ func (sc *SchemaChanger) backfillIndexes( // system tenant. Secondary tenants do not have mandatory split points // between tables or indexes. if sc.execCfg.Codec.ForSystemTenant() { - expirationTime := sc.db.Clock().Now().Add(time.Hour.Nanoseconds(), 0) + expirationTime := sc.db.KV().Clock().Now().Add(time.Hour.Nanoseconds(), 0) for _, span := range addingSpans { - if err := sc.db.AdminSplit(ctx, span.Key, expirationTime); err != nil { + if err := sc.db.KV().AdminSplit(ctx, span.Key, expirationTime); err != nil { return err } } @@ -2237,10 +2251,10 @@ func (sc *SchemaChanger) mergeFromTemporaryIndex( ) error { var tbl *tabledesc.Mutable if err := sc.txn(ctx, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) error { var err error - tbl, err = descsCol.MutableByID(txn).Table(ctx, sc.descID) + tbl, err = descsCol.MutableByID(txn.KV()).Table(ctx, sc.descID) return err }); err != nil { return err @@ -2259,9 +2273,9 @@ func (sc *SchemaChanger) mergeFromTemporaryIndex( func (sc *SchemaChanger) runStateMachineAfterTempIndexMerge(ctx context.Context) error { var runStatus jobs.RunningStatus return sc.txn(ctx, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) error { - tbl, err := descsCol.MutableByID(txn).Table(ctx, sc.descID) + tbl, err := descsCol.MutableByID(txn.KV()).Table(ctx, sc.descID) if err != nil { return err } @@ -2292,12 +2306,12 @@ func (sc *SchemaChanger) runStateMachineAfterTempIndexMerge(ctx context.Context) return nil } if err := descsCol.WriteDesc( - ctx, true /* kvTrace */, tbl, txn, + ctx, true /* kvTrace */, tbl, txn.KV(), ); err != nil { return err } if sc.job != nil { - if err := sc.job.RunningStatus(ctx, txn, func( + if err := sc.job.WithTxn(txn).RunningStatus(ctx, func( ctx context.Context, details jobspb.Details, ) (jobs.RunningStatus, error) { return runStatus, nil @@ -2428,7 +2442,7 @@ func runSchemaChangesInTxn( } } else if idx := m.AsIndex(); idx != nil { if err := indexTruncateInTxn( - ctx, planner.Txn(), planner.ExecCfg(), planner.Descriptors(), planner.EvalContext(), immutDesc, idx, traceKV, + ctx, planner.InternalSQLTxn(), planner.ExecCfg(), planner.Descriptors(), planner.EvalContext(), immutDesc, idx, traceKV, ); err != nil { return err } @@ -2510,9 +2524,9 @@ func runSchemaChangesInTxn( for _, c := range constraintAdditionMutations { if check := c.AsCheck(); check != nil { if check.GetConstraintValidity() == descpb.ConstraintValidity_Validating { - if err := planner.WithInternalExecutor(ctx, func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor) error { - return validateCheckInTxn(ctx, &planner.semaCtx, planner.SessionData(), tableDesc, txn, ie, check.GetExpr()) - }); err != nil { + if err := validateCheckInTxn( + ctx, planner.InternalSQLTxn(), &planner.semaCtx, planner.SessionData(), tableDesc, check.GetExpr(), + ); err != nil { return err } check.CheckDesc().Validity = descpb.ConstraintValidity_Validated @@ -2533,16 +2547,13 @@ func runSchemaChangesInTxn( fk.ForeignKeyDesc().Validity = descpb.ConstraintValidity_Unvalidated } else if uwi := c.AsUniqueWithoutIndex(); uwi != nil { if uwi.GetConstraintValidity() == descpb.ConstraintValidity_Validating { - if err := planner.WithInternalExecutor(ctx, func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor) error { - return validateUniqueWithoutIndexConstraintInTxn( - ctx, - tableDesc, - txn, - ie, - planner.User(), - c.GetName(), - ) - }); err != nil { + if err := validateUniqueWithoutIndexConstraintInTxn( + ctx, + planner.InternalSQLTxn(), + tableDesc, + planner.User(), + c.GetName(), + ); err != nil { return err } uwi.UniqueWithoutIndexDesc().Validity = descpb.ConstraintValidity_Validated @@ -2610,11 +2621,10 @@ func runSchemaChangesInTxn( // reuse an existing kv.Txn safely. func validateCheckInTxn( ctx context.Context, + txn isql.Txn, semaCtx *tree.SemaContext, sessionData *sessiondata.SessionData, tableDesc *tabledesc.Mutable, - txn *kv.Txn, - ie sqlutil.InternalExecutor, checkExpr string, ) error { var syntheticDescs []catalog.Descriptor @@ -2622,10 +2632,10 @@ func validateCheckInTxn( syntheticDescs = append(syntheticDescs, tableDesc) } - return ie.WithSyntheticDescriptors( + return txn.WithSyntheticDescriptors( syntheticDescs, func() error { - return validateCheckExpr(ctx, semaCtx, txn, sessionData, checkExpr, tableDesc, ie, 0 /* indexIDForValidation */) + return validateCheckExpr(ctx, semaCtx, txn, sessionData, checkExpr, tableDesc, 0 /* indexIDForValidation */) }) } @@ -2682,21 +2692,20 @@ func getTargetTablesAndFk( // reuse an existing kv.Txn safely. func validateFkInTxn( ctx context.Context, - srcTable *tabledesc.Mutable, - txn *kv.Txn, - ie sqlutil.InternalExecutor, + txn isql.Txn, descsCol *descs.Collection, + srcTable *tabledesc.Mutable, fkName string, ) error { - syntheticDescs, fk, targetTable, err := getTargetTablesAndFk(ctx, srcTable, txn, descsCol, fkName) + syntheticDescs, fk, targetTable, err := getTargetTablesAndFk(ctx, srcTable, txn.KV(), descsCol, fkName) if err != nil { return err } - return ie.WithSyntheticDescriptors( + return txn.WithSyntheticDescriptors( syntheticDescs, func() error { - return validateForeignKey(ctx, srcTable, targetTable, fk, 0 /* indexIDForValidation */, txn, ie) + return validateForeignKey(ctx, txn, srcTable, targetTable, fk, 0 /* indexIDForValidation */) }) } @@ -2714,9 +2723,8 @@ func validateFkInTxn( // reuse an existing kv.Txn safely. func validateUniqueWithoutIndexConstraintInTxn( ctx context.Context, + txn isql.Txn, tableDesc *tabledesc.Mutable, - txn *kv.Txn, - ie sqlutil.InternalExecutor, user username.SQLUsername, constraintName string, ) error { @@ -2736,7 +2744,7 @@ func validateUniqueWithoutIndexConstraintInTxn( return errors.AssertionFailedf("unique constraint %s does not exist", constraintName) } - return ie.WithSyntheticDescriptors( + return txn.WithSyntheticDescriptors( syntheticDescs, func() error { return validateUniqueConstraint( @@ -2746,7 +2754,6 @@ func validateUniqueWithoutIndexConstraintInTxn( uc.ColumnIDs, uc.Predicate, 0, /* indexIDForValidation */ - ie, txn, user, false, /* preExisting */ @@ -2847,7 +2854,7 @@ func indexBackfillInTxn( // reuse an existing kv.Txn safely. func indexTruncateInTxn( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, execCfg *ExecutorConfig, descriptors *descs.Collection, evalCtx *eval.Context, @@ -2864,7 +2871,7 @@ func indexTruncateInTxn( execCfg.GetRowMetrics(internal), ) td := tableDeleter{rd: rd, alloc: alloc} - if err := td.init(ctx, txn, evalCtx, &evalCtx.Settings.SV); err != nil { + if err := td.init(ctx, txn.KV(), evalCtx, &evalCtx.Settings.SV); err != nil { return err } var err error @@ -2925,7 +2932,7 @@ func (sc *SchemaChanger) distIndexMerge( // TODO(rui): these can be initialized along with other new schema changer dependencies. planner := NewIndexBackfillerMergePlanner(sc.execCfg) rc := func(ctx context.Context, spans []roachpb.Span) (int, error) { - return numRangesInSpans(ctx, sc.db, sc.distSQLPlanner, spans) + return numRangesInSpans(ctx, sc.db.KV(), sc.distSQLPlanner, spans) } tracker := NewIndexMergeTracker(progress, sc.job, rc, fractionScaler) periodicFlusher := newPeriodicProgressFlusher(sc.settings) diff --git a/pkg/sql/backfill/BUILD.bazel b/pkg/sql/backfill/BUILD.bazel index 680f8fb3cd5d..355923f21329 100644 --- a/pkg/sql/backfill/BUILD.bazel +++ b/pkg/sql/backfill/BUILD.bazel @@ -25,6 +25,7 @@ go_library( "//pkg/sql/catalog/typedesc", "//pkg/sql/execinfra", "//pkg/sql/execinfrapb", + "//pkg/sql/isql", "//pkg/sql/row", "//pkg/sql/rowenc", "//pkg/sql/rowinfra", diff --git a/pkg/sql/backfill/backfill.go b/pkg/sql/backfill/backfill.go index fc648accba06..4e1b0da02b2b 100644 --- a/pkg/sql/backfill/backfill.go +++ b/pkg/sql/backfill/backfill.go @@ -26,6 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/rowinfra" @@ -222,8 +223,8 @@ func (cb *ColumnBackfiller) InitForDistributedUse( var defaultExprs, computedExprs []tree.TypedExpr // Install type metadata in the target descriptors, as well as resolve any // user defined types in the column expressions. - if err := flowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - resolver := flowCtx.NewTypeResolver(txn) + if err := flowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + resolver := flowCtx.NewTypeResolver(txn.KV()) // Hydrate all the types present in the table. if err := typedesc.HydrateTypesInDescriptor(ctx, desc, &resolver); err != nil { return err @@ -650,8 +651,8 @@ func (ib *IndexBackfiller) InitForDistributedUse( // Install type metadata in the target descriptors, as well as resolve any // user defined types in partial index predicate expressions. - if err := flowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { - resolver := flowCtx.NewTypeResolver(txn) + if err := flowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { + resolver := flowCtx.NewTypeResolver(txn.KV()) // Hydrate all the types present in the table. if err = typedesc.HydrateTypesInDescriptor(ctx, desc, &resolver); err != nil { return err diff --git a/pkg/sql/backfill/mvcc_index_merger.go b/pkg/sql/backfill/mvcc_index_merger.go index e3758ebf2707..2d82970efed4 100644 --- a/pkg/sql/backfill/mvcc_index_merger.go +++ b/pkg/sql/backfill/mvcc_index_merger.go @@ -24,6 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -264,35 +265,34 @@ func (ibm *IndexBackfillMerger) scan( var nextStart roachpb.Key var br *roachpb.BatchResponse - if err := ibm.flowCtx.Cfg.DB.TxnWithAdmissionControl(ctx, roachpb.AdmissionHeader_FROM_SQL, admissionpb.BulkNormalPri, - func(ctx context.Context, txn *kv.Txn) error { - if err := txn.SetFixedTimestamp(ctx, readAsOf); err != nil { - return err - } - // For now just grab all of the destination KVs and merge the corresponding entries. - log.VInfof(ctx, 2, "scanning batch [%s, %s) at %v to merge", startKey, endKey, readAsOf) - ba := &roachpb.BatchRequest{} - ba.TargetBytes = chunkBytes - if err := ibm.growBoundAccount(ctx, chunkBytes); err != nil { - return errors.Wrap(err, "failed to fetch keys to merge from temp index") - } - defer ibm.shrinkBoundAccount(ctx, chunkBytes) - - ba.MaxSpanRequestKeys = chunkSize - ba.Add(&roachpb.ScanRequest{ - RequestHeader: roachpb.RequestHeader{ - Key: startKey, - EndKey: endKey, - }, - ScanFormat: roachpb.KEY_VALUES, - }) - var pErr *roachpb.Error - br, pErr = txn.Send(ctx, ba) - if pErr != nil { - return pErr.GoError() - } - return nil - }); err != nil { + if err := ibm.flowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + if err := txn.KV().SetFixedTimestamp(ctx, readAsOf); err != nil { + return err + } + // For now just grab all of the destination KVs and merge the corresponding entries. + log.VInfof(ctx, 2, "scanning batch [%s, %s) at %v to merge", startKey, endKey, readAsOf) + ba := &roachpb.BatchRequest{} + ba.TargetBytes = chunkBytes + if err := ibm.growBoundAccount(ctx, chunkBytes); err != nil { + return errors.Wrap(err, "failed to fetch keys to merge from temp index") + } + defer ibm.shrinkBoundAccount(ctx, chunkBytes) + + ba.MaxSpanRequestKeys = chunkSize + ba.Add(&roachpb.ScanRequest{ + RequestHeader: roachpb.RequestHeader{ + Key: startKey, + EndKey: endKey, + }, + ScanFormat: roachpb.KEY_VALUES, + }) + var pErr *roachpb.Error + br, pErr = txn.KV().Send(ctx, ba) + if pErr != nil { + return pErr.GoError() + } + return nil + }, isql.WithPriority(admissionpb.BulkNormalPri)); err != nil { return mergeChunk{}, nil, err } @@ -340,41 +340,44 @@ func (ibm *IndexBackfillMerger) merge( sourcePrefix := rowenc.MakeIndexKeyPrefix(codec, table.GetID(), sourceID) destPrefix := rowenc.MakeIndexKeyPrefix(codec, table.GetID(), destinationID) - err := ibm.flowCtx.Cfg.DB.TxnWithAdmissionControl(ctx, roachpb.AdmissionHeader_FROM_SQL, admissionpb.BulkNormalPri, - func(ctx context.Context, txn *kv.Txn) error { - var deletedCount int - txn.AddCommitTrigger(func(ctx context.Context) { - log.VInfof(ctx, 2, "merged batch of %d keys (%d deletes) (span: %s) (commit timestamp: %s)", - len(sourceKeys), - deletedCount, - sourceSpan, - txn.CommitTimestamp(), - ) - }) - if len(sourceKeys) == 0 { - return nil - } + err := ibm.flowCtx.Cfg.DB.Txn(ctx, func( + ctx context.Context, txn isql.Txn, + ) error { + var deletedCount int + txn.KV().AddCommitTrigger(func(ctx context.Context) { + log.VInfof(ctx, 2, "merged batch of %d keys (%d deletes) (span: %s) (commit timestamp: %s)", + len(sourceKeys), + deletedCount, + sourceSpan, + txn.KV().CommitTimestamp(), + ) + }) + if len(sourceKeys) == 0 { + return nil + } - wb, memUsedInMerge, deletedKeys, err := ibm.constructMergeBatch(ctx, txn, sourceKeys, sourcePrefix, destPrefix) - if err != nil { - return err - } + wb, memUsedInMerge, deletedKeys, err := ibm.constructMergeBatch( + ctx, txn.KV(), sourceKeys, sourcePrefix, destPrefix, + ) + if err != nil { + return err + } - defer ibm.shrinkBoundAccount(ctx, memUsedInMerge) - deletedCount = deletedKeys - if err := txn.Run(ctx, wb); err != nil { - return err - } + defer ibm.shrinkBoundAccount(ctx, memUsedInMerge) + deletedCount = deletedKeys + if err := txn.KV().Run(ctx, wb); err != nil { + return err + } - if knobs, ok := ibm.flowCtx.Cfg.TestingKnobs.IndexBackfillMergerTestingKnobs.(*IndexBackfillMergerTestingKnobs); ok { - if knobs != nil && knobs.RunDuringMergeTxn != nil { - if err := knobs.RunDuringMergeTxn(ctx, txn, sourceSpan.Key, sourceSpan.EndKey); err != nil { - return err - } + if knobs, ok := ibm.flowCtx.Cfg.TestingKnobs.IndexBackfillMergerTestingKnobs.(*IndexBackfillMergerTestingKnobs); ok { + if knobs != nil && knobs.RunDuringMergeTxn != nil { + if err := knobs.RunDuringMergeTxn(ctx, txn.KV(), sourceSpan.Key, sourceSpan.EndKey); err != nil { + return err } } - return nil - }) + } + return nil + }) return err } diff --git a/pkg/sql/catalog/descidgen/generate_id.go b/pkg/sql/catalog/descidgen/generate_id.go index c256d8340c0a..f043226b80d9 100644 --- a/pkg/sql/catalog/descidgen/generate_id.go +++ b/pkg/sql/catalog/descidgen/generate_id.go @@ -28,6 +28,7 @@ import ( type generator struct { settings *cluster.Settings codec keys.SQLCodec + key func(context.Context) (roachpb.Key, error) getOrInc func(ctx context.Context, key roachpb.Key, inc int64) (int64, error) } @@ -56,16 +57,9 @@ func (g *generator) PeekNextUniqueDescID(ctx context.Context) (descpb.ID, error) // run is a convenience method for accessing the descriptor ID counter. func (g *generator) run(ctx context.Context, inc int64) (catid.DescID, error) { - key := g.codec.SequenceKey(keys.DescIDSequenceID) - if cv := g.settings.Version; g.codec.ForSystemTenant() && - !cv.IsActive(ctx, clusterversion.V23_1DescIDSequenceForSystemTenant) { - // At this point, the system tenant may still be using a legacy non-SQL key, - // or may be in the process of undergoing the migration away from it, in - // which case descriptor ID generation is made unavailable. - if cv.IsActive(ctx, clusterversion.V23_1DescIDSequenceForSystemTenant-1) { - return catid.InvalidDescID, ErrDescIDSequenceMigrationInProgress - } - key = keys.LegacyDescIDGenerator + key, err := g.key(ctx) + if err != nil { + return 0, err } nextID, err := g.getOrInc(ctx, key, inc) return catid.DescID(nextID), err @@ -97,6 +91,9 @@ func NewGenerator(settings *cluster.Settings, codec keys.SQLCodec, db *kv.DB) ev return &generator{ settings: settings, codec: codec, + key: func(ctx context.Context) (roachpb.Key, error) { + return key(ctx, codec, settings) + }, getOrInc: func(ctx context.Context, key roachpb.Key, inc int64) (int64, error) { if inc == 0 { ret, err := db.Get(ctx, key) @@ -107,6 +104,23 @@ func NewGenerator(settings *cluster.Settings, codec keys.SQLCodec, db *kv.DB) ev } } +func key( + ctx context.Context, codec keys.SQLCodec, settings *cluster.Settings, +) (roachpb.Key, error) { + key := codec.SequenceKey(keys.DescIDSequenceID) + if cv := settings.Version; codec.ForSystemTenant() && + !cv.IsActive(ctx, clusterversion.V23_1DescIDSequenceForSystemTenant) { + // At this point, the system tenant may still be using a legacy non-SQL key, + // or may be in the process of undergoing the migration away from it, in + // which case descriptor ID generation is made unavailable. + if cv.IsActive(ctx, clusterversion.V23_1DescIDSequenceForSystemTenant-1) { + return nil, ErrDescIDSequenceMigrationInProgress + } + key = keys.LegacyDescIDGenerator + } + return key, nil +} + // NewTransactionalGenerator constructs a transactional eval.DescIDGenerator. func NewTransactionalGenerator( settings *cluster.Settings, codec keys.SQLCodec, txn *kv.Txn, @@ -114,6 +128,9 @@ func NewTransactionalGenerator( return &generator{ settings: settings, codec: codec, + key: func(ctx context.Context) (roachpb.Key, error) { + return key(ctx, codec, settings) + }, getOrInc: func(ctx context.Context, key roachpb.Key, inc int64) (_ int64, err error) { var ret kv.KeyValue if inc == 0 { @@ -135,6 +152,22 @@ func GenerateUniqueRoleID( return IncrementUniqueRoleID(ctx, db, codec, 1) } +// GenerateUniqueRoleIDInTxn is like GenerateUniqueRoleID but performs the +// operation in the provided transaction. +func GenerateUniqueRoleIDInTxn( + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, +) (catid.RoleID, error) { + res, err := txn.Inc(ctx, codec.SequenceKey(keys.RoleIDSequenceID), 1) + if err != nil { + return 0, err + } + newVal, err := res.Value.GetInt() + if err != nil { + return 0, errors.NewAssertionErrorWithWrappedErrf(err, "failed to get int from role_id sequence") + } + return catid.RoleID(newVal - 1), nil +} + // IncrementUniqueRoleID returns the next available Role ID and increments // the counter by inc. The incrementing is non-transactional, and the counter // could be incremented multiple times because of retries. diff --git a/pkg/sql/catalog/descs/BUILD.bazel b/pkg/sql/catalog/descs/BUILD.bazel index 933c21930773..c09938ec54c0 100644 --- a/pkg/sql/catalog/descs/BUILD.bazel +++ b/pkg/sql/catalog/descs/BUILD.bazel @@ -52,15 +52,14 @@ go_library( "//pkg/sql/catalog/tabledesc", "//pkg/sql/catalog/typedesc", "//pkg/sql/catalog/zone", + "//pkg/sql/isql", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/sem/catconstants", "//pkg/sql/sem/catid", "//pkg/sql/sem/tree", - "//pkg/sql/sessiondata", "//pkg/sql/sqlerrors", "//pkg/sql/sqlliveness", - "//pkg/sql/sqlutil", "//pkg/sql/types", "//pkg/util/hlc", "//pkg/util/intsets", @@ -111,12 +110,12 @@ go_test( "//pkg/sql/catalog/nstree", "//pkg/sql/catalog/tabledesc", "//pkg/sql/catalog/typedesc", + "//pkg/sql/isql", "//pkg/sql/parser", "//pkg/sql/privilege", "//pkg/sql/sem/catid", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/sql/tests", "//pkg/sql/types", "//pkg/testutils/datapathutils", diff --git a/pkg/sql/catalog/descs/collection.go b/pkg/sql/catalog/descs/collection.go index 081f41e678b0..10ee77bc25c8 100644 --- a/pkg/sql/catalog/descs/collection.go +++ b/pkg/sql/catalog/descs/collection.go @@ -32,11 +32,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/nstree" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/iterutil" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -48,6 +48,9 @@ import ( // collection is cleared using ReleaseAll() which is called at the // end of each transaction on the session, or on hitting conditions such // as errors, or retries that result in transaction timestamp changes. +// +// TODO(ajwerner): Remove the txn argument from the Collection by more tightly +// binding a collection to a *kv.Txn. type Collection struct { // settings dictate whether we validate descriptors on write. @@ -136,6 +139,15 @@ type Collection struct { sqlLivenessSession sqlliveness.Session } +// FromTxn is a convenience function to extract a descs.Collection which is +// being interface-smuggled through an isql.Txn. It may return nil. +func FromTxn(txn isql.Txn) *Collection { + if g, ok := txn.(Txn); ok { + return g.Descriptors() + } + return nil +} + // GetDeletedDescs returns the deleted descriptors of the collection. func (tc *Collection) GetDeletedDescs() catalog.DescriptorIDSet { return tc.deletedDescs @@ -204,6 +216,12 @@ func (tc *Collection) HasUncommittedTables() (has bool) { return has } +// HasUncommittedDescriptors returns true if the collection contains any +// uncommitted descriptors. +func (tc *Collection) HasUncommittedDescriptors() bool { + return tc.uncommitted.uncommitted.Len() > 0 +} + // HasUncommittedTypes returns true if the Collection contains uncommitted // types. func (tc *Collection) HasUncommittedTypes() (has bool) { @@ -1183,14 +1201,14 @@ func MakeTestCollection(ctx context.Context, leaseManager *lease.Manager) Collec } // InternalExecFn is the type of functions that operates using an internalExecutor. -type InternalExecFn func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, descriptors *Collection) error +type InternalExecFn func(ctx context.Context, txn isql.Txn, descriptors *Collection) error // HistoricalInternalExecTxnRunnerFn callback for executing with the internal executor // at a fixed timestamp. type HistoricalInternalExecTxnRunnerFn = func(ctx context.Context, fn InternalExecFn) error // HistoricalInternalExecTxnRunner is like historicalTxnRunner except it only -// passes the fn the exported InternalExecutor instead of the whole unexported +// passes the fn the exported Executor instead of the whole unexported // extendedEvalContext, so it can be implemented outside pkg/sql. type HistoricalInternalExecTxnRunner interface { // Exec executes the callback at a given timestamp. diff --git a/pkg/sql/catalog/descs/collection_test.go b/pkg/sql/catalog/descs/collection_test.go index 580451b0357e..1ef039c5ffe9 100644 --- a/pkg/sql/catalog/descs/collection_test.go +++ b/pkg/sql/catalog/descs/collection_test.go @@ -39,10 +39,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/nstree" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -183,15 +183,15 @@ func TestTxnClearsCollectionOnRetry(t *testing.T) { execCfg := s.ExecutorConfig().(sql.ExecutorConfig) err := sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { - txn.SetDebugName(txnName) - _, mut, err := descs.PrefixAndMutableTable(ctx, descriptors.MutableByName(txn), &tn) + txn.KV().SetDebugName(txnName) + _, mut, err := descs.PrefixAndMutableTable(ctx, descriptors.MutableByName(txn.KV()), &tn) require.NoError(t, err) // Verify that the descriptor version is always 1 prior to the write and 2 // after the write even after a retry. require.Equal(t, descpb.DescriptorVersion(1), mut.Version) - require.NoError(t, descriptors.WriteDesc(ctx, false /* kvTrace */, mut, txn)) + require.NoError(t, descriptors.WriteDesc(ctx, false /* kvTrace */, mut, txn.KV())) require.Equal(t, descpb.DescriptorVersion(2), mut.Version) return nil }, @@ -222,13 +222,13 @@ func TestAddUncommittedDescriptorAndMutableResolution(t *testing.T) { execCfg := s0.ExecutorConfig().(sql.ExecutorConfig) t.Run("database descriptors", func(t *testing.T) { require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { - mut, err := descriptors.MutableByName(txn).Database(ctx, "db") + mut, err := descriptors.MutableByName(txn.KV()).Database(ctx, "db") require.NoError(t, err) dbID := mut.GetID() - byID, err := descriptors.MutableByID(txn).Desc(ctx, dbID) + byID, err := descriptors.MutableByID(txn.KV()).Desc(ctx, dbID) require.NoError(t, err) require.Same(t, mut, byID) @@ -237,12 +237,12 @@ func TestAddUncommittedDescriptorAndMutableResolution(t *testing.T) { // Check that changes to the mutable descriptor don't impact the // collection until they're added as uncommitted - immByName, err := descriptors.ByNameWithLeased(txn).Get().Database(ctx, "db") + immByName, err := descriptors.ByNameWithLeased(txn.KV()).Get().Database(ctx, "db") require.NoError(t, err) require.Equal(t, dbID, immByName.GetID()) require.Equal(t, mut.OriginalVersion(), immByName.GetVersion()) - immByID, err := descriptors.ByIDWithLeased(txn).WithoutNonPublic().Get().Database(ctx, dbID) + immByID, err := descriptors.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Database(ctx, dbID) require.NoError(t, err) require.Same(t, immByName, immByID) @@ -250,29 +250,29 @@ func TestAddUncommittedDescriptorAndMutableResolution(t *testing.T) { b := &kv.Batch{} err = descriptors.InsertNamespaceEntryToBatch(ctx, false /* kvTrace */, mut, b) require.NoError(t, err) - err = txn.Run(ctx, b) + err = txn.KV().Run(ctx, b) require.NoError(t, err) // Should be able to get the database descriptor by the new name. - resolved, err := descriptors.ByNameWithLeased(txn).Get().Database(ctx, "new_name") + resolved, err := descriptors.ByNameWithLeased(txn.KV()).Get().Database(ctx, "new_name") require.Nil(t, err) require.Equal(t, dbID, resolved.GetID()) // Try to get the database descriptor by the old name and succeed but get // the old version with the old name because the new version has not yet // been written. - immResolvedWithNewNameButHasOldName, err := descriptors.ByNameWithLeased(txn).Get().Database(ctx, "db") + immResolvedWithNewNameButHasOldName, err := descriptors.ByNameWithLeased(txn.KV()).Get().Database(ctx, "db") require.NoError(t, err) require.Same(t, immByID, immResolvedWithNewNameButHasOldName) require.NoError(t, descriptors.AddUncommittedDescriptor(ctx, mut)) - immByNameAfter, err := descriptors.ByNameWithLeased(txn).Get().Database(ctx, "new_name") + immByNameAfter, err := descriptors.ByNameWithLeased(txn.KV()).Get().Database(ctx, "new_name") require.NoError(t, err) require.Equal(t, mut.GetVersion(), immByNameAfter.GetVersion()) require.Equal(t, mut.ImmutableCopy().DescriptorProto(), immByNameAfter.DescriptorProto()) - immByIDAfter, err := descriptors.ByIDWithLeased(txn).WithoutNonPublic().Get().Database(ctx, dbID) + immByIDAfter, err := descriptors.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Database(ctx, dbID) require.NoError(t, err) require.Same(t, immByNameAfter, immByIDAfter) @@ -281,22 +281,22 @@ func TestAddUncommittedDescriptorAndMutableResolution(t *testing.T) { }) t.Run("schema descriptors", func(t *testing.T) { require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { - db, err := descriptors.MutableByName(txn).Database(ctx, "db") + db, err := descriptors.MutableByName(txn.KV()).Database(ctx, "db") require.NoError(t, err) - schema, err := descriptors.MutableByName(txn).Schema(ctx, db, "sc") + schema, err := descriptors.MutableByName(txn.KV()).Schema(ctx, db, "sc") require.NoError(t, err) require.NotNil(t, schema) - resolved, err := descriptors.MutableByName(txn).Schema(ctx, db, "sc") + resolved, err := descriptors.MutableByName(txn.KV()).Schema(ctx, db, "sc") require.NoError(t, err) require.NotNil(t, schema) require.Same(t, schema, resolved) - byID, err := descriptors.MutableByID(txn).Desc(ctx, schema.GetID()) + byID, err := descriptors.MutableByID(txn.KV()).Desc(ctx, schema.GetID()) require.NoError(t, err) require.Same(t, schema, byID) @@ -305,19 +305,19 @@ func TestAddUncommittedDescriptorAndMutableResolution(t *testing.T) { }) t.Run("table descriptors", func(t *testing.T) { require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { tn := tree.MakeTableNameWithSchema("db", "sc", "tab") - _, tab, err := descs.PrefixAndMutableTable(ctx, descriptors.MutableByName(txn), &tn) + _, tab, err := descs.PrefixAndMutableTable(ctx, descriptors.MutableByName(txn.KV()), &tn) require.NoError(t, err) - _, resolved, err := descs.PrefixAndMutableTable(ctx, descriptors.MutableByName(txn), &tn) + _, resolved, err := descs.PrefixAndMutableTable(ctx, descriptors.MutableByName(txn.KV()), &tn) require.NoError(t, err) require.Same(t, tab, resolved) - byID, err := descriptors.MutableByID(txn).Desc(ctx, tab.GetID()) + byID, err := descriptors.MutableByID(txn.KV()).Desc(ctx, tab.GetID()) require.NoError(t, err) require.Same(t, tab, byID) @@ -326,18 +326,18 @@ func TestAddUncommittedDescriptorAndMutableResolution(t *testing.T) { }) t.Run("type descriptors", func(t *testing.T) { require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { tn := tree.MakeQualifiedTypeName("db", "sc", "typ") - _, typ, err := descs.PrefixAndMutableType(ctx, descriptors.MutableByName(txn), &tn) + _, typ, err := descs.PrefixAndMutableType(ctx, descriptors.MutableByName(txn.KV()), &tn) require.NoError(t, err) - _, resolved, err := descs.PrefixAndMutableType(ctx, descriptors.MutableByName(txn), &tn) + _, resolved, err := descs.PrefixAndMutableType(ctx, descriptors.MutableByName(txn.KV()), &tn) require.NoError(t, err) require.Same(t, typ, resolved) - byID, err := descriptors.MutableByID(txn).Type(ctx, typ.GetID()) + byID, err := descriptors.MutableByID(txn.KV()).Type(ctx, typ.GetID()) require.NoError(t, err) require.Same(t, typ, byID) @@ -369,11 +369,11 @@ func TestSyntheticDescriptorResolution(t *testing.T) { execCfg := s0.ExecutorConfig().(sql.ExecutorConfig) require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { // Resolve the descriptor so we can mutate it. tn := tree.MakeTableNameWithSchema("db", tree.PublicSchemaName, "tbl") - _, desc, err := descs.PrefixAndTable(ctx, descriptors.ByNameWithLeased(txn).MaybeGet(), &tn) + _, desc, err := descs.PrefixAndTable(ctx, descriptors.ByNameWithLeased(txn.KV()).MaybeGet(), &tn) require.NotNil(t, desc) require.NoError(t, err) @@ -382,23 +382,23 @@ func TestSyntheticDescriptorResolution(t *testing.T) { descriptors.SetSyntheticDescriptors([]catalog.Descriptor{desc}) // Resolve the table by name again. - _, desc, err = descs.PrefixAndTable(ctx, descriptors.ByNameWithLeased(txn).MaybeGet(), &tn) + _, desc, err = descs.PrefixAndTable(ctx, descriptors.ByNameWithLeased(txn.KV()).MaybeGet(), &tn) require.NotNil(t, desc) require.NoError(t, err) require.Equal(t, "bar", desc.PublicColumns()[0].GetName()) // Attempting to resolve the table mutably is not allowed. - _, _, err = descs.PrefixAndMutableTable(ctx, descriptors.MutableByName(txn), &tn) + _, _, err = descs.PrefixAndMutableTable(ctx, descriptors.MutableByName(txn.KV()), &tn) require.EqualError(t, err, fmt.Sprintf("attempted mutable access of synthetic descriptor %d", tableID)) // Resolution by ID. - desc, err = descriptors.ByIDWithLeased(txn).WithoutNonPublic().Get().Table(ctx, tableID) + desc, err = descriptors.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Table(ctx, tableID) require.NoError(t, err) require.Equal(t, "bar", desc.PublicColumns()[0].GetName()) // Attempting to resolve the table mutably is not allowed. - _, err = descriptors.MutableByID(txn).Table(ctx, tableID) + _, err = descriptors.MutableByID(txn.KV()).Table(ctx, tableID) require.EqualError(t, err, fmt.Sprintf("attempted mutable access of synthetic descriptor %d", tableID)) return nil @@ -427,9 +427,9 @@ func TestDistSQLTypeResolver_GetTypeDescriptor_FromTable(t *testing.T) { var name tree.TypeName var tdesc catalog.TypeDescriptor err := sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { - tr := descs.NewDistSQLTypeResolver(descriptors, txn) + tr := descs.NewDistSQLTypeResolver(descriptors, txn.KV()) var err error name, tdesc, err = tr.GetTypeDescriptor(ctx, id) return err @@ -472,13 +472,13 @@ CREATE TABLE test.schema.t(x INT); execCfg := s.ExecutorConfig().(sql.ExecutorConfig) require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) error { - dbDesc, err := descsCol.ByNameWithLeased(txn).Get().Database(ctx, "test") + dbDesc, err := descsCol.ByNameWithLeased(txn.KV()).Get().Database(ctx, "test") if err != nil { return err } - schemaDesc, err := descsCol.MutableByName(txn).Schema(ctx, dbDesc, "schema") + schemaDesc, err := descsCol.MutableByName(txn.KV()).Schema(ctx, dbDesc, "schema") if err != nil { return err } @@ -496,7 +496,7 @@ CREATE TABLE test.schema.t(x INT); } descsCol.SkipValidationOnWrite() - return descsCol.WriteDesc(ctx, false, schemaDesc, txn) + return descsCol.WriteDesc(ctx, false, schemaDesc, txn.KV()) }), ) @@ -534,14 +534,14 @@ func TestCollectionPreservesPostDeserializationChanges(t *testing.T) { // they'd need post-deserialization changes. execCfg := s.ExecutorConfig().(sql.ExecutorConfig) require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, col *descs.Collection, + ctx context.Context, txn isql.Txn, col *descs.Collection, ) error { - descs, err := col.MutableByID(txn).Descs(ctx, []descpb.ID{dbID, scID, typID, tabID}) + descs, err := col.MutableByID(txn.KV()).Descs(ctx, []descpb.ID{dbID, scID, typID, tabID}) if err != nil { return err } // Set version lower than minimum to force post-deserialization change. - b := txn.NewBatch() + b := txn.KV().NewBatch() for _, d := range descs { p := d.GetPrivileges() p.SetVersion(catpb.Version21_2 - 1) @@ -549,12 +549,12 @@ func TestCollectionPreservesPostDeserializationChanges(t *testing.T) { return err } } - return txn.Run(ctx, b) + return txn.KV().Run(ctx, b) })) require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, col *descs.Collection, + ctx context.Context, txn isql.Txn, col *descs.Collection, ) error { - immuts, err := col.ByID(txn).WithoutNonPublic().Get().Descs(ctx, []descpb.ID{dbID, scID, typID, tabID}) + immuts, err := col.ByID(txn.KV()).WithoutNonPublic().Get().Descs(ctx, []descpb.ID{dbID, scID, typID, tabID}) if err != nil { return err } @@ -569,9 +569,9 @@ func TestCollectionPreservesPostDeserializationChanges(t *testing.T) { return nil })) require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, col *descs.Collection, + ctx context.Context, txn isql.Txn, col *descs.Collection, ) error { - muts, err := col.MutableByID(txn).Descs(ctx, []descpb.ID{dbID, scID, typID, tabID}) + muts, err := col.MutableByID(txn.KV()).Descs(ctx, []descpb.ID{dbID, scID, typID, tabID}) if err != nil { return err } @@ -676,17 +676,17 @@ func TestDescriptorCache(t *testing.T) { execCfg := s0.ExecutorConfig().(sql.ExecutorConfig) t.Run("all descriptors", func(t *testing.T) { require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { descriptors.SkipValidationOnWrite() // Warm up cache. - _, err := descriptors.GetAllDescriptors(ctx, txn) + _, err := descriptors.GetAllDescriptors(ctx, txn.KV()) if err != nil { return err } // Modify table descriptor. tn := tree.MakeTableNameWithSchema("db", "schema", "table") - _, mut, err := descs.PrefixAndMutableTable(ctx, descriptors.MutableByName(txn), &tn) + _, mut, err := descs.PrefixAndMutableTable(ctx, descriptors.MutableByName(txn.KV()), &tn) if err != nil { return err } @@ -698,7 +698,7 @@ func TestDescriptorCache(t *testing.T) { return err } // The collection's all descriptors should include the modification. - cat, err := descriptors.GetAllDescriptors(ctx, txn) + cat, err := descriptors.GetAllDescriptors(ctx, txn.KV()) if err != nil { return err } @@ -710,17 +710,17 @@ func TestDescriptorCache(t *testing.T) { }) t.Run("all db descriptors", func(t *testing.T) { require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { descriptors.SkipValidationOnWrite() // Warm up cache. - dbDescs, err := descriptors.GetAllDatabaseDescriptors(ctx, txn) + dbDescs, err := descriptors.GetAllDatabaseDescriptors(ctx, txn.KV()) if err != nil { return err } require.Len(t, dbDescs, 4) // Modify database descriptor. - mut, err := descriptors.MutableByName(txn).Database(ctx, "db") + mut, err := descriptors.MutableByName(txn.KV()).Database(ctx, "db") if err != nil { return err } @@ -732,7 +732,7 @@ func TestDescriptorCache(t *testing.T) { } // The collection's all database descriptors should reflect the // modification. - dbDescs, err = descriptors.GetAllDatabaseDescriptors(ctx, txn) + dbDescs, err = descriptors.GetAllDatabaseDescriptors(ctx, txn.KV()) if err != nil { return err } @@ -743,21 +743,21 @@ func TestDescriptorCache(t *testing.T) { }) t.Run("schemas for database", func(t *testing.T) { require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { descriptors.SkipValidationOnWrite() // Warm up cache. - dbDesc, err := descriptors.MutableByName(txn).Database(ctx, "db") + dbDesc, err := descriptors.MutableByName(txn.KV()).Database(ctx, "db") if err != nil { return err } - _, err = descriptors.GetSchemasForDatabase(ctx, txn, dbDesc) + _, err = descriptors.GetSchemasForDatabase(ctx, txn.KV(), dbDesc) if err != nil { return err } // Modify schema name. var db catalog.DatabaseDescriptor = dbDesc - schemaDesc, err := descriptors.MutableByName(txn).Schema(ctx, db, "schema") + schemaDesc, err := descriptors.MutableByName(txn.KV()).Schema(ctx, db, "schema") if err != nil { return err } @@ -775,7 +775,7 @@ func TestDescriptorCache(t *testing.T) { return err } // The collection's schemas for database should reflect the modification. - schemas, err := descriptors.GetSchemasForDatabase(ctx, txn, dbDesc) + schemas, err := descriptors.GetSchemasForDatabase(ctx, txn.KV(), dbDesc) if err != nil { return err } @@ -804,18 +804,17 @@ func TestGetAllDescriptorsInDatabase(t *testing.T) { tdb.Exec(t, `CREATE TABLE db.schema.table()`) s0 := tc.Server(0) - tm := s0.InternalExecutorFactory().(descs.TxnManager) + tm := s0.InternalDB().(descs.DB) - sd := sql.NewFakeSessionData(&s0.ClusterSettings().SV) - sd.Database = "db" - require.NoError(t, tm.DescsTxnWithExecutor(ctx, s0.DB(), sd, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor, + run := func( + ctx context.Context, txn descs.Txn, ) error { - dbDesc, err := descriptors.ByName(txn).Get().Database(ctx, "db") + descriptors := txn.Descriptors() + dbDesc, err := descriptors.ByName(txn.KV()).Get().Database(ctx, "db") if err != nil { return err } - allDescs, err := descriptors.GetAllDescriptorsForDatabase(ctx, txn, dbDesc) + allDescs, err := txn.Descriptors().GetAllDescriptorsForDatabase(ctx, txn.KV(), dbDesc) if err != nil { return err } @@ -835,11 +834,11 @@ parent schema name id kind version dropped public `CREATE FUNCTION f() RETURNS INT LANGUAGE SQL IMMUTABLE AS $$ SELECT 1 $$`, `CREATE FUNCTION sc_foo.f() RETURNS INT LANGUAGE SQL IMMUTABLE AS $$ SELECT 1 $$`, } { - if _, err = ie.Exec(ctx, "test", txn, stmt); err != nil { + if _, err = txn.Exec(ctx, "test", txn.KV(), stmt); err != nil { return err } } - allDescs, err = descriptors.GetAllDescriptorsForDatabase(ctx, txn, dbDesc) + allDescs, err = txn.Descriptors().GetAllDescriptorsForDatabase(ctx, txn.KV(), dbDesc) if err != nil { return err } @@ -856,7 +855,10 @@ parent schema name id kind version dropped public 104 109 f 112 function 1 false true `, formatCatalog(allDescs.OrderedDescriptors())) return nil - })) + } + sd := sql.NewFakeSessionData(&s0.ClusterSettings().SV) + sd.Database = "db" + require.NoError(t, tm.DescsTxn(ctx, run, isql.WithSessionData(sd))) } // formatDescriptors formats descriptors into a text string @@ -920,12 +922,12 @@ func TestCollectionTimeTravelLookingTooFarBack(t *testing.T) { execCfg := s0.ExecutorConfig().(sql.ExecutorConfig) goFarBackInTime := func(fn func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error) error { - return sql.DescsTxn(ctx, &execCfg, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { + return sql.DescsTxn(ctx, &execCfg, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { veryFarBack := execCfg.Clock.Now().Add(-1000*time.Hour.Nanoseconds(), 0) - if err := txn.SetFixedTimestamp(ctx, veryFarBack); err != nil { + if err := txn.KV().SetFixedTimestamp(ctx, veryFarBack); err != nil { return err } - return fn(ctx, txn, col) + return fn(ctx, txn.KV(), col) }) } @@ -1017,9 +1019,9 @@ func TestHydrateCatalog(t *testing.T) { {replaceTypeDescWithNonTypeDesc("ctyp"), "referenced type ID 109: descriptor is a *dbdesc." + "" + "immutable: unexpected descriptor type"}, } { require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { - cat, err := descriptors.GetAllFromStorageUnvalidated(ctx, txn) + cat, err := descriptors.GetAllFromStorageUnvalidated(ctx, txn.KV()) if err != nil { return err } @@ -1033,15 +1035,15 @@ func TestHydrateCatalog(t *testing.T) { }) t.Run("valid catalog", func(t *testing.T) { require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { - cat, err := descriptors.GetAllFromStorageUnvalidated(ctx, txn) + cat, err := descriptors.GetAllFromStorageUnvalidated(ctx, txn.KV()) if err != nil { return err } mc := nstree.MutableCatalog{Catalog: cat} require.NoError(t, descs.HydrateCatalog(ctx, mc)) - tbl := desctestutils.TestingGetTableDescriptor(txn.DB(), keys.SystemSQLCodec, "db", "schema", "table") + tbl := desctestutils.TestingGetTableDescriptor(txn.KV().DB(), keys.SystemSQLCodec, "db", "schema", "table") tblDesc := cat.LookupDescriptor(tbl.GetID()).(catalog.TableDescriptor) expectedEnum := types.UserDefinedTypeMetadata{ Name: &types.UserDefinedTypeName{ @@ -1114,15 +1116,16 @@ SELECT id ec := s.ExecutorConfig().(sql.ExecutorConfig) codec := ec.Codec descIDGen := ec.DescIDGenerator - require.NoError(t, ec.InternalExecutorFactory.DescsTxnWithExecutor(ctx, s.DB(), nil /* sessionData */, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor, + require.NoError(t, ec.InternalDB.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { + descriptors := txn.Descriptors() checkImmutableDescriptor := func(id descpb.ID, expName string, f func(t *testing.T, desc catalog.Descriptor)) error { - tabImm, err := descriptors.ByIDWithLeased(txn).WithoutOffline().Get().Table(ctx, id) + tabImm, err := descriptors.ByIDWithLeased(txn.KV()).WithoutOffline().Get().Table(ctx, id) require.NoError(t, err) require.Equal(t, expName, tabImm.GetName()) f(t, tabImm) - all, err := descriptors.GetAllDescriptors(ctx, txn) + all, err := txn.Descriptors().GetAllDescriptors(ctx, txn.KV()) if err != nil { return err } @@ -1132,14 +1135,14 @@ SELECT id // Modify the table to have the name "bar", synthetically { - tab, err := descriptors.MutableByID(txn).Table(ctx, tabID) + tab, err := descriptors.MutableByID(txn.KV()).Table(ctx, tabID) if err != nil { return err } tab = tabledesc.NewBuilder(tab.TableDesc()).BuildCreatedMutableTable() tab.Name = "bar" tab.SetDropped() - descriptors.AddSyntheticDescriptor(tab) + txn.Descriptors().AddSyntheticDescriptor(tab) } // Retrieve the immutable descriptor, find the name "bar" if err := checkImmutableDescriptor(tabID, "bar", func(t *testing.T, desc catalog.Descriptor) { @@ -1149,28 +1152,28 @@ SELECT id return err } // Attempt to retrieve the mutable descriptor, validate the error. - _, err := descriptors.MutableByID(txn).Table(ctx, tabID) + _, err := descriptors.MutableByID(txn.KV()).Table(ctx, tabID) require.Regexp(t, `attempted mutable access of synthetic descriptor \d+`, err) - descriptors.ResetSyntheticDescriptors() + txn.Descriptors().ResetSyntheticDescriptors() // Retrieve the mutable descriptor, find the unmodified "foo". // Then modify the name to "baz" and write it. { - tabMut, err := descriptors.MutableByID(txn).Table(ctx, tabID) + tabMut, err := descriptors.MutableByID(txn.KV()).Table(ctx, tabID) require.NoError(t, err) require.Equal(t, "foo", tabMut.GetName()) tabMut.Name = "baz" - if _, err := txn.Del(ctx, catalogkeys.EncodeNameKey(codec, &descpb.NameInfo{ + if _, err := txn.KV().Del(ctx, catalogkeys.EncodeNameKey(codec, &descpb.NameInfo{ ParentID: tabMut.GetParentID(), ParentSchemaID: tabMut.GetParentSchemaID(), Name: tabMut.OriginalName(), })); err != nil { return err } - if err := txn.Put(ctx, catalogkeys.EncodeNameKey(codec, tabMut), int64(tabMut.ID)); err != nil { + if err := txn.KV().Put(ctx, catalogkeys.EncodeNameKey(codec, tabMut), int64(tabMut.ID)); err != nil { return err } const kvTrace = false - if err := descriptors.WriteDesc(ctx, kvTrace, tabMut, txn); err != nil { + if err := txn.Descriptors().WriteDesc(ctx, kvTrace, tabMut, txn.KV()); err != nil { return err } } @@ -1186,16 +1189,16 @@ SELECT id newDBID, err := descIDGen.GenerateUniqueDescID(ctx) require.NoError(t, err) newDB := dbdesc.NewInitial(newDBID, "newDB", username.RootUserName()) - descriptors.AddSyntheticDescriptor(newDB) + txn.Descriptors().AddSyntheticDescriptor(newDB) - curDatabase, err := descriptors.ByIDWithLeased(txn).WithoutNonPublic().Get().Database(ctx, curDatabaseID) + curDatabase, err := descriptors.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Database(ctx, curDatabaseID) if err != nil { return err } // Check that AllDatabaseDescriptors includes the synthetic database. { - allDBs, err := descriptors.GetAllDatabaseDescriptors(ctx, txn) + allDBs, err := txn.Descriptors().GetAllDatabaseDescriptors(ctx, txn.KV()) require.NoError(t, err) require.ElementsMatch(t, []string{ "system", "postgres", "defaultdb", "newDB", "otherDB", @@ -1203,7 +1206,7 @@ SELECT id } { - defaultDBSchemaNames, err := descriptors.GetSchemasForDatabase(ctx, txn, curDatabase) + defaultDBSchemaNames, err := txn.Descriptors().GetSchemasForDatabase(ctx, txn.KV(), curDatabase) if err != nil { return err } @@ -1212,20 +1215,20 @@ SELECT id } // Rename a schema synthetically, make sure that that propagates. - scDesc, err := descriptors.MutableByID(txn).Schema(ctx, scID) + scDesc, err := descriptors.MutableByID(txn.KV()).Schema(ctx, scID) if err != nil { return err } scDesc.SetName("sc2") scDesc.Version++ - require.NoError(t, descriptors.AddUncommittedDescriptor(ctx, scDesc)) + require.NoError(t, txn.Descriptors().AddUncommittedDescriptor(ctx, scDesc)) newSchema, _, err := sql.CreateSchemaDescriptorWithPrivileges(ctx, descIDGen, curDatabase, "newSC", username.RootUserName(), username.RootUserName(), true) require.NoError(t, err) - descriptors.AddSyntheticDescriptor(newSchema) + txn.Descriptors().AddSyntheticDescriptor(newSchema) { - defaultDBSchemaNames, err := descriptors.GetSchemasForDatabase(ctx, txn, curDatabase) + defaultDBSchemaNames, err := txn.Descriptors().GetSchemasForDatabase(ctx, txn.KV(), curDatabase) if err != nil { return err } @@ -1235,7 +1238,7 @@ SELECT id // Rename schema back to old name to prevent validation failure on commit. scDesc.SetName("sc") - require.NoError(t, descriptors.AddUncommittedDescriptor(ctx, scDesc)) + require.NoError(t, txn.Descriptors().AddUncommittedDescriptor(ctx, scDesc)) return nil })) } diff --git a/pkg/sql/catalog/descs/factory.go b/pkg/sql/catalog/descs/factory.go index 6c2eab2d77b5..efb0f1817465 100644 --- a/pkg/sql/catalog/descs/factory.go +++ b/pkg/sql/catalog/descs/factory.go @@ -14,15 +14,13 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/hydrateddesccache" "github.com/cockroachdb/cockroach/pkg/sql/catalog/internal/catkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" - "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/mon" ) @@ -45,24 +43,15 @@ func (cf *CollectionFactory) GetClusterSettings() *cluster.Settings { return cf.settings } -// TxnManager is used to enable running multiple queries with an internal -// executor in a transactional manner. -type TxnManager interface { - sqlutil.InternalExecutorFactory +type Txn interface { + isql.Txn + Descriptors() *Collection +} - // DescsTxnWithExecutor enables using an internal executor to run sql - // statements in a transactional manner. It creates a descriptor collection - // that lives within the scope of the passed in TxnWithExecutorFunc, and - // also ensures that the internal executor also share the same descriptor - // collection. Please use this interface if you want to run multiple sql - // statement with an internal executor in a txn. - DescsTxnWithExecutor( - ctx context.Context, - db *kv.DB, - sd *sessiondata.SessionData, - f TxnWithExecutorFunc, - opts ...sqlutil.TxnOption, - ) error +// DB is used to enable running multiple queries with an internal +// executor in a transactional manner. +type DB interface { + isql.DB // DescsTxn is similar to DescsTxnWithExecutor but without an internal executor. // It creates a descriptor collection that lives within the scope of the given @@ -70,16 +59,11 @@ type TxnManager interface { // them. DescsTxn( ctx context.Context, - db *kv.DB, - f func(context.Context, *kv.Txn, *Collection) error, - opts ...sqlutil.TxnOption, + f func(context.Context, Txn) error, + opts ...isql.TxnOption, ) error } -// InternalExecutorCommitTxnFunc is to commit the txn associated with an -// internal executor. -type InternalExecutorCommitTxnFunc func(ctx context.Context) error - // NewCollectionFactory constructs a new CollectionFactory which holds onto // the node-level dependencies needed to construct a Collection. func NewCollectionFactory( diff --git a/pkg/sql/catalog/descs/system_table.go b/pkg/sql/catalog/descs/system_table.go index 20af97e6a2cd..0c57e55838f9 100644 --- a/pkg/sql/catalog/descs/system_table.go +++ b/pkg/sql/catalog/descs/system_table.go @@ -14,28 +14,25 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" ) // systemTableIDResolver is the implementation for catalog.SystemTableIDResolver. type systemTableIDResolver struct { - collectionFactory *CollectionFactory - internalExecutorFactory TxnManager - db *kv.DB + collectionFactory *CollectionFactory + db DB } var _ catalog.SystemTableIDResolver = (*systemTableIDResolver)(nil) // MakeSystemTableIDResolver creates an object that implements catalog.SystemTableIDResolver. func MakeSystemTableIDResolver( - collectionFactory *CollectionFactory, internalExecutorFactory TxnManager, db *kv.DB, + collectionFactory *CollectionFactory, db DB, ) catalog.SystemTableIDResolver { return &systemTableIDResolver{ - collectionFactory: collectionFactory, - internalExecutorFactory: internalExecutorFactory, - db: db, + collectionFactory: collectionFactory, + db: db, } } @@ -45,15 +42,17 @@ func (r *systemTableIDResolver) LookupSystemTableID( ) (descpb.ID, error) { var id descpb.ID - if err := r.internalExecutorFactory.DescsTxn(ctx, r.db, func( - ctx context.Context, txn *kv.Txn, descriptors *Collection, + if err := r.db.DescsTxn(ctx, func( + ctx context.Context, txn Txn, ) (err error) { ni := descpb.NameInfo{ ParentID: keys.SystemDatabaseID, ParentSchemaID: keys.SystemPublicSchemaID, Name: tableName, } - read, err := descriptors.cr.GetByNames(ctx, txn, []descpb.NameInfo{ni}) + read, err := txn.Descriptors().cr.GetByNames( + ctx, txn.KV(), []descpb.NameInfo{ni}, + ) if err != nil { return err } diff --git a/pkg/sql/catalog/descs/txn.go b/pkg/sql/catalog/descs/txn.go index 559ba6524d8f..0173cd336abd 100644 --- a/pkg/sql/catalog/descs/txn.go +++ b/pkg/sql/catalog/descs/txn.go @@ -17,23 +17,14 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/retry" "github.com/cockroachdb/errors" ) -// TxnWithExecutorFunc is used to run a transaction in the context of a -// Collection and an InternalExecutor. -type TxnWithExecutorFunc = func( - ctx context.Context, - txn *kv.Txn, - descriptors *Collection, - ie sqlutil.InternalExecutor, -) error - // CheckTwoVersionInvariant checks whether any new schema being modified written // at a version V has only valid leases at version = V - 1. A transaction retry // error as well as a boolean is returned whenever the invariant is violated. @@ -62,7 +53,7 @@ type TxnWithExecutorFunc = func( func CheckTwoVersionInvariant( ctx context.Context, clock *hlc.Clock, - ie sqlutil.InternalExecutor, + noTxnExec isql.Executor, descsCol *Collection, txn *kv.Txn, onRetryBackoff func(), @@ -97,7 +88,7 @@ func CheckTwoVersionInvariant( // transaction ends up committing then there won't have been any created // in the meantime. count, err := lease.CountLeases( - ctx, ie, withNewVersion, txn.ProvisionalCommitTimestamp(), + ctx, noTxnExec, withNewVersion, txn.ProvisionalCommitTimestamp(), ) if err != nil { return err @@ -124,7 +115,7 @@ func CheckTwoVersionInvariant( for r := retry.StartWithCtx(ctx, base.DefaultRetryOptions()); r.Next(); { // Use the current clock time. now := clock.Now() - count, err := lease.CountLeases(ctx, ie, withNewVersion, now) + count, err := lease.CountLeases(ctx, noTxnExec, withNewVersion, now) if err != nil { return err } diff --git a/pkg/sql/catalog/descs/txn_external_test.go b/pkg/sql/catalog/descs/txn_external_test.go index f9cc296d50aa..89b00e29dccd 100644 --- a/pkg/sql/catalog/descs/txn_external_test.go +++ b/pkg/sql/catalog/descs/txn_external_test.go @@ -15,9 +15,8 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -32,22 +31,22 @@ func TestTxnWithStepping(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) - ief := s.InternalExecutorFactory().(descs.TxnManager) + db := s.InternalDB().(descs.DB) scratchKey, err := s.ScratchRange() require.NoError(t, err) // Write a key, read in the transaction without stepping, ensure we // do not see the value, step the transaction, then ensure that we do. - require.NoError(t, ief.DescsTxn(ctx, kvDB, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + require.NoError(t, db.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { - if err := txn.Put(ctx, scratchKey, 1); err != nil { + if err := txn.KV().Put(ctx, scratchKey, 1); err != nil { return err } { - got, err := txn.Get(ctx, scratchKey) + got, err := txn.KV().Get(ctx, scratchKey) if err != nil { return err } @@ -55,11 +54,11 @@ func TestTxnWithStepping(t *testing.T) { return errors.AssertionFailedf("expected no value, got %v", got) } } - if err := txn.Step(ctx); err != nil { + if err := txn.KV().Step(ctx); err != nil { return err } { - got, err := txn.Get(ctx, scratchKey) + got, err := txn.KV().Get(ctx, scratchKey) if err != nil { return err } @@ -68,5 +67,5 @@ func TestTxnWithStepping(t *testing.T) { } } return nil - }, sqlutil.SteppingEnabled())) + }, isql.SteppingEnabled())) } diff --git a/pkg/sql/catalog/descs/txn_with_executor_datadriven_test.go b/pkg/sql/catalog/descs/txn_with_executor_datadriven_test.go index 24196eaee7da..8657ef8cce48 100644 --- a/pkg/sql/catalog/descs/txn_with_executor_datadriven_test.go +++ b/pkg/sql/catalog/descs/txn_with_executor_datadriven_test.go @@ -20,12 +20,10 @@ import ( "text/tabwriter" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/testutils/datapathutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -49,7 +47,7 @@ func TestTxnWithExecutorDataDriven(t *testing.T) { ctx := context.Background() datadriven.Walk(t, datapathutils.TestDataPath(t, ""), func(t *testing.T, path string) { - s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string { stmts, err := parser.Parse(d.Input) @@ -69,21 +67,19 @@ func TestTxnWithExecutorDataDriven(t *testing.T) { searchPath = sessiondata.MakeSearchPath(strings.Split(sp, ",")) } sd.SearchPath = &searchPath - ief := s.InternalExecutorFactory().(descs.TxnManager) - err = ief.DescsTxnWithExecutor(ctx, kvDB, nil /* sessionData */, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor, - ) error { + ief := s.InternalDB().(descs.DB) + err = ief.DescsTxn(ctx, func(ctx context.Context, txn descs.Txn) error { for _, stmt := range stmts { switch d.Cmd { case "exec": - n, err := ie.ExecEx(ctx, "test", txn, sd, stmt.SQL) + n, err := txn.ExecEx(ctx, "test", txn.KV(), sd, stmt.SQL) if err != nil { return err } fmt.Fprintf(&out, "%d\t", n) case "query": - rows, cols, err := ie.QueryBufferedExWithCols( - ctx, "test", txn, sd, stmt.SQL, + rows, cols, err := txn.QueryBufferedExWithCols( + ctx, "test", txn.KV(), sd, stmt.SQL, ) if err != nil { return err diff --git a/pkg/sql/catalog/lease/BUILD.bazel b/pkg/sql/catalog/lease/BUILD.bazel index 6ed7e97a6f4f..7885be8f4603 100644 --- a/pkg/sql/catalog/lease/BUILD.bazel +++ b/pkg/sql/catalog/lease/BUILD.bazel @@ -37,11 +37,11 @@ go_library( "//pkg/sql/catalog/systemschema", "//pkg/sql/catalog/tabledesc", "//pkg/sql/enum", + "//pkg/sql/isql", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/storage", "//pkg/util/grpcutil", "//pkg/util/hlc", @@ -96,12 +96,12 @@ go_test( "//pkg/sql/catalog/systemschema", "//pkg/sql/catalog/tabledesc", "//pkg/sql/enum", + "//pkg/sql/isql", "//pkg/sql/pgwire/pgcode", "//pkg/sql/rowenc/keyside", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", "//pkg/sql/sqltestutils", - "//pkg/sql/sqlutil", "//pkg/sql/tests", "//pkg/sql/types", "//pkg/storage", diff --git a/pkg/sql/catalog/lease/count.go b/pkg/sql/catalog/lease/count.go index 3eb31c691319..e5355021acf4 100644 --- a/pkg/sql/catalog/lease/count.go +++ b/pkg/sql/catalog/lease/count.go @@ -15,9 +15,9 @@ import ( "fmt" "strings" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/errors" ) @@ -25,7 +25,7 @@ import ( // CountLeases returns the number of unexpired leases for a number of descriptors // each at a particular version at a particular time. func CountLeases( - ctx context.Context, executor sqlutil.InternalExecutor, versions []IDVersion, at hlc.Timestamp, + ctx context.Context, executor isql.Executor, versions []IDVersion, at hlc.Timestamp, ) (int, error) { var whereClauses []string for _, t := range versions { diff --git a/pkg/sql/catalog/lease/helpers_test.go b/pkg/sql/catalog/lease/helpers_test.go index 2bc869a9d696..b833ad0eece1 100644 --- a/pkg/sql/catalog/lease/helpers_test.go +++ b/pkg/sql/catalog/lease/helpers_test.go @@ -167,7 +167,7 @@ func (m *Manager) PublishMultiple( descs := make(map[descpb.ID]catalog.MutableDescriptor) // There should be only one version of the descriptor, but it's // a race now to update to the next version. - err := m.storage.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + err := m.storage.db.KV().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { versions := make(map[descpb.ID]descpb.DescriptorVersion) descsToUpdate := make(map[descpb.ID]catalog.MutableDescriptor) for _, id := range ids { diff --git a/pkg/sql/catalog/lease/ie_writer_test.go b/pkg/sql/catalog/lease/ie_writer_test.go index 52912a0a9072..3e4f69a0d80a 100644 --- a/pkg/sql/catalog/lease/ie_writer_test.go +++ b/pkg/sql/catalog/lease/ie_writer_test.go @@ -16,17 +16,17 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/errors" ) type ieWriter struct { insertQuery string deleteQuery string - ie sqlutil.InternalExecutor + ie isql.Executor } -func newInternalExecutorWriter(ie sqlutil.InternalExecutor, tableName string) *ieWriter { +func newInternalExecutorWriter(ie isql.Executor, tableName string) *ieWriter { if systemschema.TestSupportMultiRegion() { const ( deleteLease = ` diff --git a/pkg/sql/catalog/lease/kv_writer_test.go b/pkg/sql/catalog/lease/kv_writer_test.go index 898afed9c167..e512905123c2 100644 --- a/pkg/sql/catalog/lease/kv_writer_test.go +++ b/pkg/sql/catalog/lease/kv_writer_test.go @@ -24,8 +24,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" "github.com/cockroachdb/cockroach/pkg/sql/enum" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" kvstorage "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" @@ -76,7 +76,7 @@ func TestKVWriterMatchesIEWriter(t *testing.T) { lease1ID := makeTable("lease1") lease2ID := makeTable("lease2") - ie := s.InternalExecutor().(sqlutil.InternalExecutor) + ie := s.InternalExecutor().(isql.Executor) codec := s.LeaseManager().(*Manager).Codec() w := teeWriter{ a: newInternalExecutorWriter(ie, "defaultdb.public.lease1"), diff --git a/pkg/sql/catalog/lease/lease.go b/pkg/sql/catalog/lease/lease.go index d94a2394e7e6..323873503ca7 100644 --- a/pkg/sql/catalog/lease/lease.go +++ b/pkg/sql/catalog/lease/lease.go @@ -34,9 +34,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/internal/catkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" "github.com/cockroachdb/cockroach/pkg/sql/enum" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" kvstorage "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -90,7 +90,7 @@ func (m *Manager) WaitForNoVersion( stmt := fmt.Sprintf(`SELECT count(1) FROM system.public.lease AS OF SYSTEM TIME '%s' WHERE ("descID" = %d AND expiration > $1)`, now.AsOfSystemTime(), id) - values, err := m.storage.internalExecutor.QueryRowEx( + values, err := m.storage.db.Executor().QueryRowEx( ctx, "count-leases", nil, /* txn */ sessiondata.RootUserSessionDataOverride, stmt, now.GoTime(), @@ -128,7 +128,7 @@ func (m *Manager) WaitForOneVersion( ctx context.Context, id descpb.ID, retryOpts retry.Options, ) (desc catalog.Descriptor, _ error) { for lastCount, r := 0, retry.Start(retryOpts); r.Next(); { - if err := m.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + if err := m.storage.db.KV().Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { // Use the lower-level MaybeGetDescriptorByIDUnvalidated to avoid // performing validation while waiting for leases to drain. // Validation is somewhat expensive but more importantly, is not @@ -155,7 +155,7 @@ func (m *Manager) WaitForOneVersion( // version of the descriptor. now := m.storage.clock.Now() descs := []IDVersion{NewIDVersionPrev(desc.GetName(), desc.GetID(), desc.GetVersion())} - count, err := CountLeases(ctx, m.storage.internalExecutor, descs, now) + count, err := CountLeases(ctx, m.storage.db.Executor(), descs, now) if err != nil { return nil, err } @@ -398,7 +398,9 @@ func (m *Manager) readOlderVersionForTimestamp( // Retrieve descriptors in range [timestamp, endTimestamp) in decreasing // modification time order. - descs, err := getDescriptorsFromStoreForInterval(ctx, m.DB(), m.Codec(), id, timestamp, endTimestamp) + descs, err := getDescriptorsFromStoreForInterval( + ctx, m.storage.db.KV(), m.Codec(), id, timestamp, endTimestamp, + ) if err != nil { return nil, err } @@ -704,9 +706,8 @@ const leaseConcurrencyLimit = 5 func NewLeaseManager( ambientCtx log.AmbientContext, nodeIDContainer *base.SQLIDContainer, - db *kv.DB, + db isql.DB, clock *hlc.Clock, - internalExecutor sqlutil.InternalExecutor, settings *cluster.Settings, codec keys.SQLCodec, testingKnobs ManagerTestingKnobs, @@ -715,16 +716,15 @@ func NewLeaseManager( ) *Manager { lm := &Manager{ storage: storage{ - nodeIDContainer: nodeIDContainer, - writer: newKVWriter(codec, db, keys.LeaseTableID), - db: db, - clock: clock, - internalExecutor: internalExecutor, - settings: settings, - codec: codec, - sysDBCache: catkv.NewSystemDatabaseCache(codec, settings), - group: singleflight.NewGroup("acquire-lease", "descriptor ID"), - testingKnobs: testingKnobs.LeaseStoreTestingKnobs, + nodeIDContainer: nodeIDContainer, + writer: newKVWriter(codec, db.KV(), keys.LeaseTableID), + db: db, + clock: clock, + settings: settings, + codec: codec, + sysDBCache: catkv.NewSystemDatabaseCache(codec, settings), + group: singleflight.NewGroup("acquire-lease", "descriptor ID"), + testingKnobs: testingKnobs.LeaseStoreTestingKnobs, outstandingLeases: metric.NewGauge(metric.Metadata{ Name: "sql.leases.active", Help: "The number of outstanding SQL schema leases.", @@ -743,7 +743,7 @@ func NewLeaseManager( lm.storage.regionPrefix.Store(enum.One) lm.stopper.AddCloser(lm.sem.Closer("stopper")) lm.mu.descriptors = make(map[descpb.ID]*descriptorState) - lm.mu.updatesResolvedTimestamp = db.Clock().Now() + lm.mu.updatesResolvedTimestamp = clock.Now() lm.draining.Store(false) return lm @@ -928,7 +928,7 @@ func (m *Manager) resolveName( name string, ) (id descpb.ID, _ error) { req := []descpb.NameInfo{{ParentID: parentID, ParentSchemaID: parentSchemaID, Name: name}} - if err := m.storage.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := m.storage.db.KV().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { // Run the name lookup as high-priority, thereby pushing any intents out of // its way. We don't want schema changes to prevent name resolution/lease // acquisitions; we'd rather force them to refresh. Also this prevents @@ -1273,7 +1273,9 @@ func (m *Manager) refreshSomeLeases(ctx context.Context) { if errors.Is(err, catalog.ErrDescriptorNotFound) { // Lease renewal failed due to removed descriptor; Remove this descriptor from cache. - if err := purgeOldVersions(ctx, m.DB(), id, true /* dropped */, 0 /* minVersion */, m); err != nil { + if err := purgeOldVersions( + ctx, m.storage.db.KV(), id, true /* dropped */, 0 /* minVersion */, m, + ); err != nil { log.Warningf(ctx, "error purging leases for descriptor %d: %s", id, err) } @@ -1334,7 +1336,7 @@ SELECT "descID", version, expiration FROM system.public.lease AS OF SYSTEM TIME // The retry is required because of errors caused by node restarts. Retry 30 times. if err := retry.WithMaxAttempts(ctx, retryOptions, 30, func() error { var err error - rows, err = m.storage.internalExecutor.QueryBuffered( + rows, err = m.storage.db.Executor().QueryBuffered( ctx, "read orphaned leases", nil /*txn*/, sqlQuery, ) return err @@ -1379,11 +1381,6 @@ SELECT "descID", version, expiration FROM system.public.lease AS OF SYSTEM TIME }) } -// DB returns the Manager's handle to a kv.DB. -func (m *Manager) DB() *kv.DB { - return m.storage.db -} - // Codec returns the Manager's SQLCodec. func (m *Manager) Codec() keys.SQLCodec { return m.storage.codec diff --git a/pkg/sql/catalog/lease/lease_test.go b/pkg/sql/catalog/lease/lease_test.go index d7378a1d7ad5..bbfe65b599f8 100644 --- a/pkg/sql/catalog/lease/lease_test.go +++ b/pkg/sql/catalog/lease/lease_test.go @@ -43,12 +43,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqltestutils" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -85,14 +85,14 @@ func init() { lease.MoveTablePrimaryIndexIDto2 = func( ctx context.Context, t *testing.T, s serverutils.TestServerInterface, id descpb.ID, ) { - require.NoError(t, sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - t, err := col.MutableByID(txn).Table(ctx, id) + require.NoError(t, sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + t, err := col.MutableByID(txn.KV()).Table(ctx, id) if err != nil { return err } t.PrimaryIndex.ID = 2 t.NextIndexID++ - return col.WriteDesc(ctx, false /* kvTrace */, t, txn) + return col.WriteDesc(ctx, false /* kvTrace */, t, txn.KV()) })) } @@ -249,9 +249,8 @@ func (t *leaseTest) node(nodeID uint32) *lease.Manager { mgr = lease.NewLeaseManager( ambientCtx, nc, - cfgCpy.DB, + cfgCpy.InternalDB, cfgCpy.Clock, - cfgCpy.InternalExecutor, cfgCpy.Settings, cfgCpy.Codec, t.leaseManagerTestingKnobs, @@ -1336,7 +1335,6 @@ func TestLeaseRenewedAutomatically(testingT *testing.T) { var testAcquiredCount int32 var testAcquisitionBlockCount int32 - params := createTestServerParams() params.Knobs = base.TestingKnobs{ SQLLeaseManager: &lease.ManagerTestingKnobs{ @@ -1347,7 +1345,7 @@ func TestLeaseRenewedAutomatically(testingT *testing.T) { if err != nil { return } - if !catalog.IsSystemDescriptor(desc) { + if _, isTable := desc.(catalog.TableDescriptor); isTable && !catalog.IsSystemDescriptor(desc) { atomic.AddInt32(&testAcquiredCount, 1) } }, @@ -1791,10 +1789,10 @@ func TestLeaseRenewedPeriodically(testingT *testing.T) { ctx := context.Background() var mu syncutil.Mutex - releasedIDs := make(map[descpb.ID]struct{}) - + releasedIDs := catalog.DescriptorIDSet{} var testAcquiredCount int32 var testAcquisitionBlockCount int32 + var expected catalog.DescriptorIDSet params := createTestServerParams() params.Knobs = base.TestingKnobs{ @@ -1807,16 +1805,18 @@ func TestLeaseRenewedPeriodically(testingT *testing.T) { atomic.AddInt32(&testAcquiredCount, 1) } }, - LeaseReleasedEvent: func(id descpb.ID, _ descpb.DescriptorVersion, _ error) { - if uint32(id) < bootstrap.TestingMinUserDescID() { - return - } + LeaseReleasedEvent: func(id descpb.ID, v descpb.DescriptorVersion, err error) { mu.Lock() defer mu.Unlock() - releasedIDs[id] = struct{}{} + if !expected.Contains(id) { + return + } + releasedIDs.Add(id) }, LeaseAcquireResultBlockEvent: func(typ lease.AcquireType, id descpb.ID) { - if uint32(id) < bootstrap.TestingMinUserDescID() || typ == lease.AcquireBackground { + mu.Lock() + defer mu.Unlock() + if !expected.Contains(id) || typ == lease.AcquireBackground { return } atomic.AddInt32(&testAcquisitionBlockCount, 1) @@ -1848,19 +1848,23 @@ CREATE TABLE t.test2 (); t.Fatal(err) } - test1Desc := desctestutils.TestingGetPublicTableDescriptor(t.kvDB, keys.SystemSQLCodec, "t", "test2") + test1Desc := desctestutils.TestingGetPublicTableDescriptor(t.kvDB, keys.SystemSQLCodec, "t", "test1") test2Desc := desctestutils.TestingGetPublicTableDescriptor(t.kvDB, keys.SystemSQLCodec, "t", "test2") dbID := test2Desc.GetParentID() + func() { + mu.Lock() + defer mu.Unlock() + expected = catalog.MakeDescriptorIDSet(test1Desc.GetID(), test2Desc.GetID()) + atomic.StoreInt32(&testAcquisitionBlockCount, 0) + }() - atomic.StoreInt32(&testAcquisitionBlockCount, 0) - - numReleasedLeases := func() int { + releasedLeases := func() catalog.DescriptorIDSet { mu.Lock() defer mu.Unlock() - return len(releasedIDs) + return catalog.MakeDescriptorIDSet(releasedIDs.Ordered()...) } - if count := numReleasedLeases(); count != 0 { - t.Fatalf("expected no leases to be releases, released %d", count) + if released := releasedLeases(); released.Len() != 0 { + t.Fatalf("expected no leases to be released, released %v", released.Ordered()) } // Acquire a lease on test1 by name. @@ -1902,9 +1906,9 @@ CREATE TABLE t.test2 (); if count := atomic.LoadInt32(&testAcquiredCount); count <= 4 { return errors.Errorf("expected more than 4 leases to be acquired, but acquired %d times", count) } - - if count := numReleasedLeases(); count != 2 { - return errors.Errorf("expected 2 leases to be releases, released %d", count) + released := releasedLeases() + if notYetReleased := expected.Difference(released); notYetReleased.Len() != 0 { + return errors.Errorf("expected %v to be released, released %v", expected.Ordered(), released.Ordered()) } return nil }) @@ -2432,13 +2436,13 @@ func TestLeaseWithOfflineTables(t *testing.T) { setTableState := func(expected descpb.DescriptorState, next descpb.DescriptorState) { execCfg := s.ExecutorConfig().(sql.ExecutorConfig) require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) error { - desc, err := descsCol.MutableByID(txn).Table(ctx, testTableID()) + desc, err := descsCol.MutableByID(txn.KV()).Table(ctx, testTableID()) require.NoError(t, err) require.Equal(t, desc.State, expected) desc.State = next - return descsCol.WriteDesc(ctx, false /* kvTrace */, desc, txn) + return descsCol.WriteDesc(ctx, false /* kvTrace */, desc, txn.KV()) })) // Wait for the lease manager's refresh worker to have processed the @@ -2806,16 +2810,16 @@ CREATE TABLE d1.t2 (name int); cfg := s.ExecutorConfig().(sql.ExecutorConfig) var tableID descpb.ID require.NoError(t, sql.DescsTxn(ctx, &cfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { tn := tree.NewTableNameWithSchema("d1", "public", "t1") - _, tableDesc, err := descs.PrefixAndMutableTable(ctx, descriptors.MutableByName(txn), tn) + _, tableDesc, err := descs.PrefixAndMutableTable(ctx, descriptors.MutableByName(txn.KV()), tn) if err != nil { return err } tableID = tableDesc.GetID() tableDesc.SetOffline("For unit test") - err = descriptors.WriteDesc(ctx, false, tableDesc, txn) + err = descriptors.WriteDesc(ctx, false, tableDesc, txn.KV()) if err != nil { return err } @@ -2824,21 +2828,21 @@ CREATE TABLE d1.t2 (name int); go func() { err := sql.DescsTxn(ctx, &cfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { close(waitForRqstFilter) mu.Lock() waitForRqstFilter = make(chan chan struct{}) - txnID = txn.ID() + txnID = txn.KV().ID() mu.Unlock() // Online the descriptor by making it public - tableDesc, err := descriptors.MutableByID(txn).Table(ctx, tableID) + tableDesc, err := descriptors.MutableByID(txn.KV()).Table(ctx, tableID) if err != nil { return err } tableDesc.SetPublic() - err = descriptors.WriteDesc(ctx, false, tableDesc, txn) + err = descriptors.WriteDesc(ctx, false, tableDesc, txn.KV()) if err != nil { return err } @@ -2850,7 +2854,7 @@ CREATE TABLE d1.t2 (name int); <-notify // Select from an unrelated table - _, err = s.InternalExecutor().(sqlutil.InternalExecutor).ExecEx(ctx, "inline-exec", txn, + _, err = txn.ExecEx(ctx, "inline-exec", txn.KV(), sessiondata.RootUserSessionDataOverride, "insert into d1.t2 values (10);") return err diff --git a/pkg/sql/catalog/lease/storage.go b/pkg/sql/catalog/lease/storage.go index 10d4dd7a3298..16fceab5eac2 100644 --- a/pkg/sql/catalog/lease/storage.go +++ b/pkg/sql/catalog/lease/storage.go @@ -27,10 +27,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/internal/catkv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/internal/validate" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/grpcutil" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -47,14 +47,13 @@ import ( // the manager. Some of these fields belong on the manager, in any case, since // they're only used by the manager and not by the store itself. type storage struct { - nodeIDContainer *base.SQLIDContainer - db *kv.DB - clock *hlc.Clock - internalExecutor sqlutil.InternalExecutor - settings *cluster.Settings - codec keys.SQLCodec - regionPrefix *atomic.Value - sysDBCache *catkv.SystemDatabaseCache + nodeIDContainer *base.SQLIDContainer + db isql.DB + clock *hlc.Clock + settings *cluster.Settings + codec keys.SQLCodec + regionPrefix *atomic.Value + sysDBCache *catkv.SystemDatabaseCache // group is used for all calls made to acquireNodeLease to prevent // concurrent lease acquisitions from the store. @@ -175,7 +174,7 @@ func (s storage) acquire( // Run a retry loop to deal with AmbiguousResultErrors. All other error types // are propagated up to the caller. for r := retry.StartWithCtx(ctx, retry.Options{}); r.Next(); { - err := s.db.Txn(ctx, acquireInTxn) + err := s.db.KV().Txn(ctx, acquireInTxn) var pErr *roachpb.AmbiguousResultError switch { case errors.As(err, &pErr): @@ -251,7 +250,7 @@ func (s storage) getForExpiration( ctx context.Context, expiration hlc.Timestamp, id descpb.ID, ) (catalog.Descriptor, error) { var desc catalog.Descriptor - err := s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + err := s.db.KV().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { prevTimestamp := expiration.Prev() err := txn.SetFixedTimestamp(ctx, prevTimestamp) if err != nil { diff --git a/pkg/sql/catalog/resolver/BUILD.bazel b/pkg/sql/catalog/resolver/BUILD.bazel index bbbb5ecd86bb..9dc8d921e06e 100644 --- a/pkg/sql/catalog/resolver/BUILD.bazel +++ b/pkg/sql/catalog/resolver/BUILD.bazel @@ -33,7 +33,6 @@ go_test( deps = [ ":resolver", "//pkg/base", - "//pkg/kv", "//pkg/security/securityassets", "//pkg/security/securitytest", "//pkg/security/username", @@ -45,6 +44,7 @@ go_test( "//pkg/sql/catalog/descs", "//pkg/sql/catalog/schemadesc", "//pkg/sql/catalog/tabledesc", + "//pkg/sql/isql", "//pkg/sql/parser", "//pkg/sql/schemachanger/scbuild", "//pkg/sql/sem/catconstants", diff --git a/pkg/sql/catalog/resolver/resolver_test.go b/pkg/sql/catalog/resolver/resolver_test.go index 650bbaba1c00..63502c675376 100644 --- a/pkg/sql/catalog/resolver/resolver_test.go +++ b/pkg/sql/catalog/resolver/resolver_test.go @@ -16,7 +16,6 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" @@ -26,6 +25,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scbuild" "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" @@ -613,10 +613,10 @@ CREATE TABLE c (a INT, INDEX idx2(a));`, require.NoError(t, protoutil.Unmarshal(sessionSerialized, &sessionData)) } - err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { + err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { execCfg := s.ExecutorConfig().(sql.ExecutorConfig) planner, cleanup := sql.NewInternalPlanner( - "resolve-index", txn, username.RootUserName(), &sql.MemoryMetrics{}, &execCfg, sessionData, + "resolve-index", txn.KV(), username.RootUserName(), &sql.MemoryMetrics{}, &execCfg, sessionData, ) defer cleanup() @@ -627,7 +627,7 @@ CREATE TABLE c (a INT, INDEX idx2(a));`, searchPath := ec.SessionData().SearchPath.GetPathArray() ec.SessionData().SearchPath = ec.SessionData().SearchPath.UpdatePaths(append([]string{"test_sc"}, searchPath...)) schemaResolver := sql.NewSkippingCacheSchemaResolver( - col, ec.SessionDataStack, txn, planner.(scbuild.AuthorizationAccessor), + col, ec.SessionDataStack, txn.KV(), planner.(scbuild.AuthorizationAccessor), ) // Make sure we're looking at correct default db and search path. @@ -676,12 +676,12 @@ CREATE TABLE baz (i INT PRIMARY KEY, s STRING); CREATE INDEX baz_idx ON baz (s); `) - err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { + err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { tn := tree.NewTableNameWithSchema("defaultdb", "public", "baz") - _, tbl, err := descs.PrefixAndMutableTable(ctx, col.MutableByName(txn), tn) + _, tbl, err := descs.PrefixAndMutableTable(ctx, col.MutableByName(txn.KV()), tn) require.NoError(t, err) tbl.SetOffline("testing-index-resolving") - err = col.WriteDesc(ctx, false, tbl, txn) + err = col.WriteDesc(ctx, false, tbl, txn.KV()) require.NoError(t, err) return nil }) @@ -694,10 +694,10 @@ CREATE INDEX baz_idx ON baz (s); require.NoError(t, protoutil.Unmarshal(sessionSerialized, &sessionData)) } - err = sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { + err = sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { execCfg := s.ExecutorConfig().(sql.ExecutorConfig) planner, cleanup := sql.NewInternalPlanner( - "resolve-index", txn, username.RootUserName(), &sql.MemoryMetrics{}, &execCfg, sessionData, + "resolve-index", txn.KV(), username.RootUserName(), &sql.MemoryMetrics{}, &execCfg, sessionData, ) defer cleanup() @@ -705,7 +705,7 @@ CREATE INDEX baz_idx ON baz (s); // Set "defaultdb" as current database. ec.SessionData().Database = "defaultdb" schemaResolver := sql.NewSkippingCacheSchemaResolver( - col, ec.SessionDataStack, txn, planner.(scbuild.AuthorizationAccessor), + col, ec.SessionDataStack, txn.KV(), planner.(scbuild.AuthorizationAccessor), ) // Make sure we're looking at correct default db and search path. require.Equal(t, "defaultdb", schemaResolver.CurrentDatabase()) diff --git a/pkg/sql/catalog/schematelemetry/BUILD.bazel b/pkg/sql/catalog/schematelemetry/BUILD.bazel index 4beb7bc68995..faf2ec26f4b0 100644 --- a/pkg/sql/catalog/schematelemetry/BUILD.bazel +++ b/pkg/sql/catalog/schematelemetry/BUILD.bazel @@ -13,7 +13,6 @@ go_library( deps = [ "//pkg/jobs", "//pkg/jobs/jobspb", - "//pkg/kv", "//pkg/scheduledjobs", "//pkg/security/username", "//pkg/server/telemetry", @@ -25,11 +24,11 @@ go_library( "//pkg/sql/catalog/nstree", "//pkg/sql/catalog/redact", "//pkg/sql/catalog/schematelemetry/schematelemetrycontroller", + "//pkg/sql/isql", "//pkg/sql/sem/builtins", "//pkg/sql/sem/builtins/builtinconstants", "//pkg/sql/sem/tree", "//pkg/sql/sqltelemetry", - "//pkg/sql/sqlutil", "//pkg/util/hlc", "//pkg/util/log", "//pkg/util/log/eventpb", diff --git a/pkg/sql/catalog/schematelemetry/scheduled_job_executor.go b/pkg/sql/catalog/schematelemetry/scheduled_job_executor.go index 4625e3e88939..4b937c646950 100644 --- a/pkg/sql/catalog/schematelemetry/scheduled_job_executor.go +++ b/pkg/sql/catalog/schematelemetry/scheduled_job_executor.go @@ -15,14 +15,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schematelemetry/schematelemetrycontroller" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/errors" ) @@ -49,7 +48,7 @@ func (s schemaTelemetryExecutor) OnDrop( scheduleControllerEnv scheduledjobs.ScheduleControllerEnv, env scheduledjobs.JobSchedulerEnv, schedule *jobs.ScheduledJob, - txn *kv.Txn, + txn isql.Txn, descsCol *descs.Collection, ) (int, error) { return 0, errScheduleUndroppable @@ -60,10 +59,10 @@ var errScheduleUndroppable = errors.New("SQL schema telemetry schedule cannot be // ExecuteJob is part of the jobs.ScheduledJobExecutor interface. func (s schemaTelemetryExecutor) ExecuteJob( ctx context.Context, + txn isql.Txn, cfg *scheduledjobs.JobExecutionConfig, env scheduledjobs.JobSchedulerEnv, sj *jobs.ScheduledJob, - txn *kv.Txn, ) (err error) { defer func() { if err == nil { @@ -72,7 +71,7 @@ func (s schemaTelemetryExecutor) ExecuteJob( s.metrics.NumFailed.Inc(1) } }() - p, cleanup := cfg.PlanHookMaker("invoke-schema-telemetry", txn, username.NodeUserName()) + p, cleanup := cfg.PlanHookMaker("invoke-schema-telemetry", txn.KV(), username.NodeUserName()) defer cleanup() jr := p.(sql.PlanHookState).ExecCfg().JobRegistry r := schematelemetrycontroller.CreateSchemaTelemetryJobRecord(jobs.CreatedByScheduledJobs, sj.ScheduleID()) @@ -83,13 +82,12 @@ func (s schemaTelemetryExecutor) ExecuteJob( // NotifyJobTermination is part of the jobs.ScheduledJobExecutor interface. func (s schemaTelemetryExecutor) NotifyJobTermination( ctx context.Context, + txn isql.Txn, jobID jobspb.JobID, jobStatus jobs.Status, details jobspb.Details, env scheduledjobs.JobSchedulerEnv, sj *jobs.ScheduledJob, - ex sqlutil.InternalExecutor, - txn *kv.Txn, ) error { switch jobStatus { case jobs.StatusFailed: @@ -110,12 +108,7 @@ func (s schemaTelemetryExecutor) Metrics() metric.Struct { // GetCreateScheduleStatement is part of the jobs.ScheduledJobExecutor interface. func (s schemaTelemetryExecutor) GetCreateScheduleStatement( - ctx context.Context, - env scheduledjobs.JobSchedulerEnv, - txn *kv.Txn, - descsCol *descs.Collection, - sj *jobs.ScheduledJob, - ex sqlutil.InternalExecutor, + ctx context.Context, txn isql.Txn, env scheduledjobs.JobSchedulerEnv, sj *jobs.ScheduledJob, ) (string, error) { // This schedule cannot be created manually. return "", nil diff --git a/pkg/sql/catalog/schematelemetry/schema_telemetry_event.go b/pkg/sql/catalog/schematelemetry/schema_telemetry_event.go index 682c86a7ffb3..896d57dd0f58 100644 --- a/pkg/sql/catalog/schematelemetry/schema_telemetry_event.go +++ b/pkg/sql/catalog/schematelemetry/schema_telemetry_event.go @@ -14,13 +14,13 @@ import ( "context" "math/rand" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/nstree" "github.com/cockroachdb/cockroach/pkg/sql/catalog/redact" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" @@ -48,12 +48,12 @@ func CollectClusterSchemaForTelemetry( ) ([]logpb.EventPayload, error) { // Scrape the raw catalog. var raw nstree.Catalog - if err := sql.DescsTxn(ctx, cfg, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - err := txn.SetFixedTimestamp(ctx, asOf) + if err := sql.DescsTxn(ctx, cfg, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + err := txn.KV().SetFixedTimestamp(ctx, asOf) if err != nil { return err } - raw, err = col.GetAllFromStorageUnvalidated(ctx, txn) + raw, err = col.GetAllFromStorageUnvalidated(ctx, txn.KV()) return err }); err != nil { return nil, err diff --git a/pkg/sql/catalog/schematelemetry/schematelemetrycontroller/BUILD.bazel b/pkg/sql/catalog/schematelemetry/schematelemetrycontroller/BUILD.bazel index 54f3221278f4..1bf71e55fc41 100644 --- a/pkg/sql/catalog/schematelemetry/schematelemetrycontroller/BUILD.bazel +++ b/pkg/sql/catalog/schematelemetry/schematelemetrycontroller/BUILD.bazel @@ -28,14 +28,13 @@ go_library( "//pkg/clusterversion", "//pkg/jobs", "//pkg/jobs/jobspb", - "//pkg/kv", "//pkg/scheduledjobs", "//pkg/security/username", "//pkg/settings", "//pkg/settings/cluster", + "//pkg/sql/isql", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/util/log", "//pkg/util/mon", "//pkg/util/retry", diff --git a/pkg/sql/catalog/schematelemetry/schematelemetrycontroller/controller.go b/pkg/sql/catalog/schematelemetry/schematelemetrycontroller/controller.go index d4debef8151c..afe278b5627a 100644 --- a/pkg/sql/catalog/schematelemetry/schematelemetrycontroller/controller.go +++ b/pkg/sql/catalog/schematelemetry/schematelemetrycontroller/controller.go @@ -20,14 +20,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/mon" "github.com/cockroachdb/cockroach/pkg/util/retry" @@ -75,8 +74,7 @@ var ErrVersionGate = errors.New("SQL schema telemetry jobs or schedules not supp // of the database (e.g. status server, builtins) to control the behavior of the // SQL schema telemetry subsystem. type Controller struct { - db *kv.DB - ie sqlutil.InternalExecutor + db isql.DB mon *mon.BytesMonitor st *cluster.Settings jr *jobs.Registry @@ -89,8 +87,7 @@ type Controller struct { // sql.Server. This is the reason why it and the definition of the Controller // object live in their own package separate from schematelemetry. func NewController( - db *kv.DB, - ie sqlutil.InternalExecutor, + db isql.DB, mon *mon.BytesMonitor, st *cluster.Settings, jr *jobs.Registry, @@ -98,7 +95,6 @@ func NewController( ) *Controller { return &Controller{ db: db, - ie: ie, mon: mon, st: st, jr: jr, @@ -123,7 +119,7 @@ func (c *Controller) Start(ctx context.Context, stopper *stop.Stopper) { case <-stopper.ShouldQuiesce(): return case <-ch: - updateSchedule(stopCtx, c.db, c.ie, c.st, c.clusterID()) + updateSchedule(stopCtx, c.db, c.st, c.clusterID()) } } }) @@ -148,13 +144,7 @@ func (c *Controller) Start(ctx context.Context, stopper *stop.Stopper) { }) } -func updateSchedule( - ctx context.Context, - db *kv.DB, - ie sqlutil.InternalExecutor, - st *cluster.Settings, - clusterID uuid.UUID, -) { +func updateSchedule(ctx context.Context, db isql.DB, st *cluster.Settings, clusterID uuid.UUID) { if !st.Version.IsActive(ctx, clusterversion.V22_2SQLSchemaTelemetryScheduledJobs) { log.Infof(ctx, "failed to update SQL schema telemetry schedule: %s", ErrVersionGate) } @@ -163,18 +153,18 @@ func updateSchedule( MaxBackoff: 10 * time.Minute, } for r := retry.StartWithCtx(ctx, retryOptions); r.Next(); { - if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { // Ensure schedule exists. var sj *jobs.ScheduledJob { - id, err := GetSchemaTelemetryScheduleID(ctx, ie, txn) + id, err := GetSchemaTelemetryScheduleID(ctx, txn) if err != nil { return err } if id == 0 { - sj, err = CreateSchemaTelemetrySchedule(ctx, ie, txn, st) + sj, err = CreateSchemaTelemetrySchedule(ctx, txn, st) } else { - sj, err = jobs.LoadScheduledJob(ctx, scheduledjobs.ProdJobSchedulerEnv, id, ie, txn) + sj, err = jobs.ScheduledJobTxn(txn).Load(ctx, scheduledjobs.ProdJobSchedulerEnv, id) } if err != nil { return err @@ -191,7 +181,7 @@ func updateSchedule( return err } sj.SetScheduleStatus(string(jobs.StatusPending)) - return sj.Update(ctx, ie, txn) + return jobs.ScheduledJobTxn(txn).Update(ctx, sj) }); err != nil && ctx.Err() == nil { log.Warningf(ctx, "failed to update SQL schema telemetry schedule: %s", err) } else { @@ -237,7 +227,7 @@ func (c *Controller) CreateSchemaTelemetryJob( return 0, ErrVersionGate } var j *jobs.Job - if err := c.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + if err := c.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { r := CreateSchemaTelemetryJobRecord(createdByName, createdByID) j, err = c.jr.CreateJobWithTxn(ctx, r, c.jr.MakeJobID(), txn) return err @@ -265,9 +255,9 @@ func CreateSchemaTelemetryJobRecord(createdByName string, createdByID int64) job // the scheduled job subsystem so that the schema telemetry job can be run // periodically. This is done during the cluster startup upgrade. func CreateSchemaTelemetrySchedule( - ctx context.Context, ie sqlutil.InternalExecutor, txn *kv.Txn, st *cluster.Settings, + ctx context.Context, txn isql.Txn, st *cluster.Settings, ) (*jobs.ScheduledJob, error) { - id, err := GetSchemaTelemetryScheduleID(ctx, ie, txn) + id, err := GetSchemaTelemetryScheduleID(ctx, txn) if err != nil { return nil, err } @@ -300,7 +290,7 @@ func CreateSchemaTelemetrySchedule( ) scheduledJob.SetScheduleStatus(string(jobs.StatusPending)) - if err = scheduledJob.Create(ctx, ie, txn); err != nil { + if err = jobs.ScheduledJobTxn(txn).Create(ctx, scheduledJob); err != nil { return nil, err } @@ -309,13 +299,11 @@ func CreateSchemaTelemetrySchedule( // GetSchemaTelemetryScheduleID returns the ID of the schema telemetry schedule // if it exists, 0 if it does not exist yet. -func GetSchemaTelemetryScheduleID( - ctx context.Context, ie sqlutil.InternalExecutor, txn *kv.Txn, -) (id int64, _ error) { - row, err := ie.QueryRowEx( +func GetSchemaTelemetryScheduleID(ctx context.Context, txn isql.Txn) (id int64, _ error) { + row, err := txn.QueryRowEx( ctx, "check-existing-schema-telemetry-schedule", - txn, + txn.KV(), sessiondata.NodeUserSessionDataOverride, `SELECT schedule_id FROM system.scheduled_jobs WHERE schedule_name = $1 ORDER BY schedule_id ASC LIMIT 1`, SchemaTelemetryScheduleName, diff --git a/pkg/sql/check.go b/pkg/sql/check.go index fcddb6e10565..09c922b44965 100644 --- a/pkg/sql/check.go +++ b/pkg/sql/check.go @@ -18,7 +18,6 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/jobs" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" @@ -26,12 +25,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/flowinfra" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/semenumpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/retry" @@ -49,11 +48,10 @@ import ( func validateCheckExpr( ctx context.Context, semaCtx *tree.SemaContext, - txn *kv.Txn, + txn isql.Txn, sessionData *sessiondata.SessionData, exprStr string, tableDesc *tabledesc.Mutable, - ie sqlutil.InternalExecutor, indexIDForValidation descpb.IndexID, ) error { expr, err := schemaexpr.FormatExprForDisplay(ctx, tableDesc, exprStr, semaCtx, sessionData, tree.FmtParsable) @@ -67,10 +65,10 @@ func validateCheckExpr( queryStr = fmt.Sprintf(`SELECT %s FROM [%d AS t]@[%d] WHERE NOT (%s) LIMIT 1`, columns, tableDesc.GetID(), indexIDForValidation, exprStr) } log.Infof(ctx, "validating check constraint %q with query %q", expr, queryStr) - rows, err := ie.QueryRowEx( + rows, err := txn.QueryRowEx( ctx, "validate check constraint", - txn, + txn.KV(), sessiondata.RootUserSessionDataOverride, queryStr) if err != nil { @@ -282,12 +280,11 @@ func nonMatchingRowQuery( // reuse an existing kv.Txn safely. func validateForeignKey( ctx context.Context, + txn isql.Txn, srcTable *tabledesc.Mutable, targetTable catalog.TableDescriptor, fk *descpb.ForeignKeyConstraint, indexIDForValidation descpb.IndexID, - txn *kv.Txn, - ie sqlutil.InternalExecutor, ) error { nCols := len(fk.OriginColumnIDs) @@ -314,8 +311,8 @@ func validateForeignKey( query, ) - values, err := ie.QueryRowEx(ctx, "validate foreign key constraint", - txn, + values, err := txn.QueryRowEx(ctx, "validate foreign key constraint", + txn.KV(), sessiondata.NodeUserSessionDataOverride, query) if err != nil { return err @@ -338,7 +335,7 @@ func validateForeignKey( query, ) - values, err := ie.QueryRowEx(ctx, "validate fk constraint", txn, + values, err := txn.QueryRowEx(ctx, "validate fk constraint", txn.KV(), sessiondata.NodeUserSessionDataOverride, query) if err != nil { return err @@ -446,7 +443,7 @@ func (p *planner) RevalidateUniqueConstraintsInCurrentDB(ctx context.Context) er return err } return RevalidateUniqueConstraintsInTable( - ctx, p.Txn(), p.User(), p.ExecCfg().InternalExecutor, tableDesc, + ctx, p.InternalSQLTxn(), p.User(), tableDesc, ) }) } @@ -461,7 +458,7 @@ func (p *planner) RevalidateUniqueConstraintsInTable(ctx context.Context, tableI return err } return RevalidateUniqueConstraintsInTable( - ctx, p.Txn(), p.User(), p.ExecCfg().InternalExecutor, tableDesc, + ctx, p.InternalSQLTxn(), p.User(), tableDesc, ) } @@ -492,8 +489,7 @@ func (p *planner) RevalidateUniqueConstraint( index.IndexDesc().KeyColumnIDs[index.ImplicitPartitioningColumnCount():], index.GetPredicate(), 0, /* indexIDForValidation */ - p.ExecCfg().InternalExecutor, - p.Txn(), + p.InternalSQLTxn(), p.User(), true, /* preExisting */ ) @@ -513,8 +509,7 @@ func (p *planner) RevalidateUniqueConstraint( uc.CollectKeyColumnIDs().Ordered(), uc.GetPredicate(), 0, /* indexIDForValidation */ - p.ExecCfg().InternalExecutor, - p.Txn(), + p.InternalSQLTxn(), p.User(), true, /* preExisting */ ) @@ -562,11 +557,7 @@ func HasVirtualUniqueConstraints(tableDesc catalog.TableDescriptor) bool { // enforced by an index. This includes implicitly partitioned UNIQUE indexes // and UNIQUE WITHOUT INDEX constraints. func RevalidateUniqueConstraintsInTable( - ctx context.Context, - txn *kv.Txn, - user username.SQLUsername, - ie sqlutil.InternalExecutor, - tableDesc catalog.TableDescriptor, + ctx context.Context, txn isql.Txn, user username.SQLUsername, tableDesc catalog.TableDescriptor, ) error { // Check implicitly partitioned UNIQUE indexes. for _, index := range tableDesc.ActiveIndexes() { @@ -578,7 +569,6 @@ func RevalidateUniqueConstraintsInTable( index.IndexDesc().KeyColumnIDs[index.ImplicitPartitioningColumnCount():], index.GetPredicate(), 0, /* indexIDForValidation */ - ie, txn, user, true, /* preExisting */ @@ -599,7 +589,6 @@ func RevalidateUniqueConstraintsInTable( uc.CollectKeyColumnIDs().Ordered(), uc.GetPredicate(), 0, /* indexIDForValidation */ - ie, txn, user, true, /* preExisting */ @@ -634,8 +623,7 @@ func validateUniqueConstraint( columnIDs []descpb.ColumnID, pred string, indexIDForValidation descpb.IndexID, - ie sqlutil.InternalExecutor, - txn *kv.Txn, + txn isql.Txn, user username.SQLUsername, preExisting bool, ) error { @@ -672,7 +660,7 @@ func validateUniqueConstraint( MaxRetries: 5, } for r := retry.StartWithCtx(ctx, retryOptions); r.Next(); { - values, err = ie.QueryRowEx(ctx, "validate unique constraint", txn, sessionDataOverride, query) + values, err = txn.QueryRowEx(ctx, "validate unique constraint", txn.KV(), sessionDataOverride, query) if err == nil { break } @@ -744,7 +732,7 @@ func (p *planner) validateTTLScheduledJobInTable( ttl := tableDesc.GetRowLevelTTL() execCfg := p.ExecCfg() - env := JobSchedulerEnv(execCfg) + env := JobSchedulerEnv(execCfg.JobsKnobs()) wrapError := func(origErr error) error { return errors.WithHintf( @@ -754,13 +742,7 @@ func (p *planner) validateTTLScheduledJobInTable( ) } - sj, err := jobs.LoadScheduledJob( - ctx, - env, - ttl.ScheduleID, - execCfg.InternalExecutor, - p.txn, - ) + sj, err := jobs.ScheduledJobTxn(p.InternalSQLTxn()).Load(ctx, env, ttl.ScheduleID) if err != nil { if jobs.HasScheduledJobNotFoundError(err) { return wrapError( @@ -818,8 +800,8 @@ func (p *planner) RepairTTLScheduledJobForTable(ctx context.Context, tableID int } sj, err := CreateRowLevelTTLScheduledJob( ctx, - p.ExecCfg(), - p.txn, + p.ExecCfg().JobsKnobs(), + jobs.ScheduledJobTxn(p.InternalSQLTxn()), p.User(), tableDesc.GetID(), tableDesc.GetRowLevelTTL(), diff --git a/pkg/sql/check_test.go b/pkg/sql/check_test.go index 42d601330640..29fc70d0e03d 100644 --- a/pkg/sql/check_test.go +++ b/pkg/sql/check_test.go @@ -28,8 +28,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -53,16 +53,16 @@ func TestValidateTTLScheduledJobs(t *testing.T) { { desc: "not pointing at a valid scheduled job", setup: func(t *testing.T, sqlDB *gosql.DB, kvDB *kv.DB, s serverutils.TestServerInterface, tableDesc *tabledesc.Mutable, scheduleID int64) { - require.NoError(t, sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) (err error) { + require.NoError(t, sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) (err error) { // We need the collection to read the descriptor from storage for // the subsequent write to succeed. - tableDesc, err = col.MutableByID(txn).Table(ctx, tableDesc.GetID()) + tableDesc, err = col.MutableByID(txn.KV()).Table(ctx, tableDesc.GetID()) tableDesc.RowLevelTTL.ScheduleID = 0 tableDesc.Version++ if err != nil { return err } - return col.WriteDesc(ctx, false /* kvBatch */, tableDesc, txn) + return col.WriteDesc(ctx, false /* kvBatch */, tableDesc, txn.KV()) })) }, expectedErrRe: func(tableID descpb.ID, scheduleID int64) string { @@ -72,9 +72,10 @@ func TestValidateTTLScheduledJobs(t *testing.T) { { desc: "scheduled job points at an different table", setup: func(t *testing.T, sqlDB *gosql.DB, kvDB *kv.DB, s serverutils.TestServerInterface, tableDesc *tabledesc.Mutable, scheduleID int64) { - ie := s.InternalExecutor().(sqlutil.InternalExecutor) - require.NoError(t, kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - sj, err := jobs.LoadScheduledJob( + db := s.InternalDB().(isql.DB) + require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + schedules := jobs.ScheduledJobTxn(txn) + sj, err := schedules.Load( ctx, jobstest.NewJobSchedulerTestEnv( jobstest.UseSystemTables, @@ -82,8 +83,6 @@ func TestValidateTTLScheduledJobs(t *testing.T) { tree.ScheduledBackupExecutor, ), scheduleID, - ie, - txn, ) if err != nil { return err @@ -98,7 +97,7 @@ func TestValidateTTLScheduledJobs(t *testing.T) { return err } sj.SetExecutionDetails(sj.ExecutorType(), jobspb.ExecutionArguments{Args: any}) - return sj.Update(ctx, ie, txn) + return schedules.Update(ctx, sj) })) }, expectedErrRe: func(tableID descpb.ID, scheduleID int64) string { diff --git a/pkg/sql/compact_sql_stats.go b/pkg/sql/compact_sql_stats.go index 88bba49e6bbb..037cde8431b8 100644 --- a/pkg/sql/compact_sql_stats.go +++ b/pkg/sql/compact_sql_stats.go @@ -16,15 +16,14 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/persistedsqlstats" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/errors" @@ -42,27 +41,27 @@ var _ jobs.Resumer = &sqlStatsCompactionResumer{} func (r *sqlStatsCompactionResumer) Resume(ctx context.Context, execCtx interface{}) error { log.Infof(ctx, "starting sql stats compaction job") p := execCtx.(JobExecContext) - ie := p.ExecCfg().InternalExecutor - db := p.ExecCfg().DB var ( scheduledJobID int64 err error ) - if err = db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - scheduledJobID, err = r.getScheduleID(ctx, ie, txn, scheduledjobs.ProdJobSchedulerEnv) + if err = p.ExecCfg().InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + scheduledJobID, err = r.getScheduleID(ctx, txn, scheduledjobs.ProdJobSchedulerEnv) if err != nil { return err } if scheduledJobID != jobs.InvalidScheduleID { - r.sj, err = jobs.LoadScheduledJob(ctx, scheduledjobs.ProdJobSchedulerEnv, scheduledJobID, ie, txn) + schedules := jobs.ScheduledJobTxn(txn) + r.sj, err = schedules.Load(ctx, scheduledjobs.ProdJobSchedulerEnv, scheduledJobID) if err != nil { return err } r.sj.SetScheduleStatus(string(jobs.StatusRunning)) - return r.sj.Update(ctx, ie, txn) + + return schedules.Update(ctx, r.sj) } return nil }); err != nil { @@ -71,9 +70,8 @@ func (r *sqlStatsCompactionResumer) Resume(ctx context.Context, execCtx interfac statsCompactor := persistedsqlstats.NewStatsCompactor( r.st, - ie, - db, - ie.s.ServerMetrics.StatsMetrics.SQLStatsRemovedRows, + p.ExecCfg().InternalDB, + p.ExecCfg().InternalDB.server.ServerMetrics.StatsMetrics.SQLStatsRemovedRows, p.ExecCfg().SQLStatsTestingKnobs) if err = statsCompactor.DeleteOldestEntries(ctx); err != nil { return err @@ -81,8 +79,8 @@ func (r *sqlStatsCompactionResumer) Resume(ctx context.Context, execCtx interfac return r.maybeNotifyJobTerminated( ctx, - ie, - p.ExecCfg(), + p.ExecCfg().InternalDB, + p.ExecCfg().JobsKnobs(), jobs.StatusSucceeded) } @@ -92,38 +90,33 @@ func (r *sqlStatsCompactionResumer) OnFailOrCancel( ) error { p := execCtx.(JobExecContext) execCfg := p.ExecCfg() - ie := execCfg.InternalExecutor - return r.maybeNotifyJobTerminated(ctx, ie, execCfg, jobs.StatusFailed) + return r.maybeNotifyJobTerminated(ctx, execCfg.InternalDB, execCfg.JobsKnobs(), jobs.StatusFailed) } // maybeNotifyJobTerminated will notify the job termination // (with termination status). func (r *sqlStatsCompactionResumer) maybeNotifyJobTerminated( - ctx context.Context, ie sqlutil.InternalExecutor, exec *ExecutorConfig, status jobs.Status, + ctx context.Context, db isql.DB, jobKnobs *jobs.TestingKnobs, status jobs.Status, ) error { log.Infof(ctx, "sql stats compaction job terminated with status = %s", status) - if r.sj != nil { - env := scheduledjobs.ProdJobSchedulerEnv - if knobs, ok := exec.DistSQLSrv.TestingKnobs.JobsTestingKnobs.(*jobs.TestingKnobs); ok { - if knobs.JobSchedulerEnv != nil { - env = knobs.JobSchedulerEnv - } - } - if err := jobs.NotifyJobTermination( - ctx, env, r.job.ID(), status, r.job.Details(), r.sj.ScheduleID(), - ie, nil /* txn */); err != nil { - return err - } - + if r.sj == nil { return nil } - return nil + return db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + env := scheduledjobs.ProdJobSchedulerEnv + if jobKnobs != nil && jobKnobs.JobSchedulerEnv != nil { + env = jobKnobs.JobSchedulerEnv + } + return jobs.NotifyJobTermination( + ctx, txn, env, r.job.ID(), status, r.job.Details(), r.sj.ScheduleID(), + ) + }) } func (r *sqlStatsCompactionResumer) getScheduleID( - ctx context.Context, ie sqlutil.InternalExecutor, txn *kv.Txn, env scheduledjobs.JobSchedulerEnv, + ctx context.Context, txn isql.Txn, env scheduledjobs.JobSchedulerEnv, ) (scheduleID int64, _ error) { - row, err := ie.QueryRowEx(ctx, "lookup-sql-stats-schedule", txn, + row, err := txn.QueryRowEx(ctx, "lookup-sql-stats-schedule", txn.KV(), sessiondata.NodeUserSessionDataOverride, fmt.Sprintf("SELECT created_by_id FROM %s WHERE id=$1 AND created_by_type=$2", env.SystemJobsTableName()), r.job.ID(), jobs.CreatedByScheduledJobs, @@ -165,7 +158,7 @@ func (e *scheduledSQLStatsCompactionExecutor) OnDrop( scheduleControllerEnv scheduledjobs.ScheduleControllerEnv, env scheduledjobs.JobSchedulerEnv, schedule *jobs.ScheduledJob, - txn *kv.Txn, + txn isql.Txn, descsCol *descs.Collection, ) (int, error) { return 0, persistedsqlstats.ErrScheduleUndroppable @@ -174,10 +167,10 @@ func (e *scheduledSQLStatsCompactionExecutor) OnDrop( // ExecuteJob implements the jobs.ScheduledJobExecutor interface. func (e *scheduledSQLStatsCompactionExecutor) ExecuteJob( ctx context.Context, + txn isql.Txn, cfg *scheduledjobs.JobExecutionConfig, env scheduledjobs.JobSchedulerEnv, sj *jobs.ScheduledJob, - txn *kv.Txn, ) error { if err := e.createSQLStatsCompactionJob(ctx, cfg, sj, txn); err != nil { e.metrics.NumFailed.Inc(1) @@ -188,9 +181,9 @@ func (e *scheduledSQLStatsCompactionExecutor) ExecuteJob( } func (e *scheduledSQLStatsCompactionExecutor) createSQLStatsCompactionJob( - ctx context.Context, cfg *scheduledjobs.JobExecutionConfig, sj *jobs.ScheduledJob, txn *kv.Txn, + ctx context.Context, cfg *scheduledjobs.JobExecutionConfig, sj *jobs.ScheduledJob, txn isql.Txn, ) error { - p, cleanup := cfg.PlanHookMaker("invoke-sql-stats-compact", txn, username.NodeUserName()) + p, cleanup := cfg.PlanHookMaker("invoke-sql-stats-compact", txn.KV(), username.NodeUserName()) defer cleanup() _, err := @@ -209,13 +202,12 @@ func (e *scheduledSQLStatsCompactionExecutor) createSQLStatsCompactionJob( // NotifyJobTermination implements the jobs.ScheduledJobExecutor interface. func (e *scheduledSQLStatsCompactionExecutor) NotifyJobTermination( ctx context.Context, + txn isql.Txn, jobID jobspb.JobID, jobStatus jobs.Status, details jobspb.Details, env scheduledjobs.JobSchedulerEnv, sj *jobs.ScheduledJob, - ex sqlutil.InternalExecutor, - txn *kv.Txn, ) error { if jobStatus == jobs.StatusFailed { jobs.DefaultHandleFailedRun(sj, "sql stats compaction %d failed", jobID) @@ -239,12 +231,7 @@ func (e *scheduledSQLStatsCompactionExecutor) Metrics() metric.Struct { // GetCreateScheduleStatement implements the jobs.ScheduledJobExecutor interface. func (e *scheduledSQLStatsCompactionExecutor) GetCreateScheduleStatement( - ctx context.Context, - env scheduledjobs.JobSchedulerEnv, - txn *kv.Txn, - descsCol *descs.Collection, - sj *jobs.ScheduledJob, - ex sqlutil.InternalExecutor, + ctx context.Context, txn isql.Txn, env scheduledjobs.JobSchedulerEnv, sj *jobs.ScheduledJob, ) (string, error) { return "SELECT crdb_internal.schedule_sql_stats_compact()", nil } diff --git a/pkg/sql/conn_executor.go b/pkg/sql/conn_executor.go index de31b039d505..b0d4d4dea05c 100644 --- a/pkg/sql/conn_executor.go +++ b/pkg/sql/conn_executor.go @@ -375,8 +375,7 @@ func NewServer(cfg *ExecutorConfig, pool *mon.BytesMonitor) *Server { nil, /* reportedProvider */ cfg.SQLStatsTestingKnobs, ) - reportedSQLStatsController := - reportedSQLStats.GetController(cfg.SQLStatusServer, cfg.DB, cfg.InternalExecutor) + reportedSQLStatsController := reportedSQLStats.GetController(cfg.SQLStatusServer) memSQLStats := sslocal.New( cfg.Settings, sqlstats.MaxMemSQLStatsStmtFingerprints, @@ -416,27 +415,30 @@ func NewServer(cfg *ExecutorConfig, pool *mon.BytesMonitor) *Server { sqlStatsInternalExecutorMonitor := MakeInternalExecutorMemMonitor(MemoryMetrics{}, s.GetExecutorConfig().Settings) sqlStatsInternalExecutorMonitor.StartNoReserved(context.Background(), s.GetBytesMonitor()) - sqlStatsInternalExecutor := MakeInternalExecutor(s, MemoryMetrics{}, sqlStatsInternalExecutorMonitor) persistedSQLStats := persistedsqlstats.New(&persistedsqlstats.Config{ Settings: s.cfg.Settings, - InternalExecutor: &sqlStatsInternalExecutor, InternalExecutorMonitor: sqlStatsInternalExecutorMonitor, - KvDB: cfg.DB, - SQLIDContainer: cfg.NodeInfo.NodeID, - JobRegistry: s.cfg.JobRegistry, - Knobs: cfg.SQLStatsTestingKnobs, - FlushCounter: serverMetrics.StatsMetrics.SQLStatsFlushStarted, - FailureCounter: serverMetrics.StatsMetrics.SQLStatsFlushFailure, - FlushDuration: serverMetrics.StatsMetrics.SQLStatsFlushDuration, + DB: NewInternalDB( + s, MemoryMetrics{}, sqlStatsInternalExecutorMonitor, + ), + SQLIDContainer: cfg.NodeInfo.NodeID, + JobRegistry: s.cfg.JobRegistry, + Knobs: cfg.SQLStatsTestingKnobs, + FlushCounter: serverMetrics.StatsMetrics.SQLStatsFlushStarted, + FailureCounter: serverMetrics.StatsMetrics.SQLStatsFlushFailure, + FlushDuration: serverMetrics.StatsMetrics.SQLStatsFlushDuration, }, memSQLStats) s.sqlStats = persistedSQLStats s.sqlStatsController = persistedSQLStats.GetController(cfg.SQLStatusServer) schemaTelemetryIEMonitor := MakeInternalExecutorMemMonitor(MemoryMetrics{}, s.GetExecutorConfig().Settings) schemaTelemetryIEMonitor.StartNoReserved(context.Background(), s.GetBytesMonitor()) - schemaTelemetryIE := MakeInternalExecutor(s, MemoryMetrics{}, schemaTelemetryIEMonitor) s.schemaTelemetryController = schematelemetrycontroller.NewController( - s.cfg.DB, &schemaTelemetryIE, schemaTelemetryIEMonitor, s.cfg.Settings, s.cfg.JobRegistry, + NewInternalDB( + s, MemoryMetrics{}, schemaTelemetryIEMonitor, + ), + schemaTelemetryIEMonitor, + s.cfg.Settings, s.cfg.JobRegistry, s.cfg.NodeInfo.LogicalClusterID, ) s.indexUsageStatsController = idxusage.NewController(cfg.SQLStatusServer) @@ -718,7 +720,7 @@ func (s *Server) SetupConn( memMetrics MemoryMetrics, onDefaultIntSizeChange func(newSize int32), ) (ConnectionHandler, error) { - sd := s.newSessionData(args) + sd := newSessionData(args) sds := sessiondata.NewStack(sd) // Set the SessionData from args.SessionDefaults. This also validates the // respective values. @@ -840,7 +842,7 @@ func (s *Server) GetLocalIndexStatistics() *idxusage.LocalIndexUsageStats { } // newSessionData a SessionData that can be passed to newConnExecutor. -func (s *Server) newSessionData(args SessionArgs) *sessiondata.SessionData { +func newSessionData(args SessionArgs) *sessiondata.SessionData { sd := &sessiondata.SessionData{ SessionData: sessiondatapb.SessionData{ UserProto: args.User.EncodeProto(), @@ -860,7 +862,7 @@ func (s *Server) newSessionData(args SessionArgs) *sessiondata.SessionData { sd.CustomOptions[k] = v } } - s.populateMinimalSessionData(sd) + populateMinimalSessionData(sd) return sd } @@ -879,7 +881,7 @@ func (s *Server) makeSessionDataMutatorIterator( // populateMinimalSessionData populates sd with some minimal values needed for // not crashing. Fields of sd that are already set are not overwritten. -func (s *Server) populateMinimalSessionData(sd *sessiondata.SessionData) { +func populateMinimalSessionData(sd *sessiondata.SessionData) { if sd.SequenceState == nil { sd.SequenceState = sessiondata.NewSequenceState() } @@ -1128,9 +1130,7 @@ func (ex *connExecutor) close(ctx context.Context, closeType closeType) { if ex.hasCreatedTemporarySchema && !ex.server.cfg.TestingKnobs.DisableTempObjectsCleanupOnSessionExit { err := cleanupSessionTempObjects( ctx, - ex.server.cfg.Settings, - ex.server.cfg.InternalExecutorFactory, - ex.server.cfg.DB, + ex.server.cfg.InternalDB, ex.server.cfg.Codec, ex.sessionID, ) @@ -1762,7 +1762,7 @@ func (ex *connExecutor) Ctx() context.Context { if _, ok := ex.machine.CurState().(stateNoTxn); ok { ctx = ex.ctxHolder.ctx() } - // stateInternalError is used by the InternalExecutor. + // stateInternalError is used by the Executor. if _, ok := ex.machine.CurState().(stateInternalError); ok { ctx = ex.ctxHolder.ctx() } @@ -3078,9 +3078,7 @@ func (ex *connExecutor) txnStateTransitionsApplyWrapper( ex.statsCollector.PhaseTimes().SetSessionPhaseTime(sessionphase.SessionStartPostCommitJob, timeutil.Now()) if err := ex.server.cfg.JobRegistry.Run( - ex.ctxHolder.connCtx, - ex.server.cfg.InternalExecutor, - *ex.extraTxnState.jobs, + ex.ctxHolder.connCtx, *ex.extraTxnState.jobs, ); err != nil { handleErr(err) } @@ -3379,7 +3377,7 @@ func (ex *connExecutor) runPreCommitStages(ctx context.Context) error { ex.planner.SessionData(), ex.planner.User(), ex.server.cfg, - ex.planner.txn, + ex.planner.InternalSQLTxn(), ex.extraTxnState.descCollection, ex.planner.EvalContext(), ex.planner.ExtendedEvalContext().Tracing.KVTracingEnabled(), diff --git a/pkg/sql/conn_executor_exec.go b/pkg/sql/conn_executor_exec.go index 430d16abf957..a410f3866f3b 100644 --- a/pkg/sql/conn_executor_exec.go +++ b/pkg/sql/conn_executor_exec.go @@ -20,7 +20,6 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/jobs" - "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/multitenant" "github.com/cockroachdb/cockroach/pkg/multitenant/multitenantcpu" @@ -32,6 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/clusterunique" "github.com/cockroachdb/cockroach/pkg/sql/contentionpb" "github.com/cockroachdb/cockroach/pkg/sql/execstats" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec/explain" "github.com/cockroachdb/cockroach/pkg/sql/paramparse" "github.com/cockroachdb/cockroach/pkg/sql/parser" @@ -46,7 +46,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/buildutil" @@ -843,7 +842,7 @@ func (ex *connExecutor) handleAOST(ctx context.Context, stmt tree.Statement) err } // If we're in an explicit txn, we allow AOST but only if it matches with // the transaction's timestamp. This is useful for running AOST statements - // using the InternalExecutor inside an external transaction; one might want + // using the Executor inside an external transaction; one might want // to do that to force p.avoidLeasedDescriptors to be set below. if asOf.BoundedStaleness { return pgerror.Newf( @@ -901,7 +900,7 @@ func (ex *connExecutor) checkDescriptorTwoVersionInvariant(ctx context.Context) return descs.CheckTwoVersionInvariant( ctx, ex.server.cfg.Clock, - ex.server.cfg.InternalExecutor, + ex.server.cfg.InternalDB.Executor(), ex.extraTxnState.descCollection, ex.state.mu.txn, inRetryBackoff, @@ -1022,22 +1021,24 @@ func (ex *connExecutor) commitSQLTransactionInternal(ctx context.Context) error } } - if err := ex.extraTxnState.descCollection.ValidateUncommittedDescriptors(ctx, ex.state.mu.txn); err != nil { - return err - } + if ex.extraTxnState.descCollection.HasUncommittedDescriptors() { + if err := ex.extraTxnState.descCollection.ValidateUncommittedDescriptors(ctx, ex.state.mu.txn); err != nil { + return err + } - if err := descs.CheckSpanCountLimit( - ctx, - ex.extraTxnState.descCollection, - ex.server.cfg.SpanConfigSplitter, - ex.server.cfg.SpanConfigLimiter, - ex.state.mu.txn, - ); err != nil { - return err - } + if err := descs.CheckSpanCountLimit( + ctx, + ex.extraTxnState.descCollection, + ex.server.cfg.SpanConfigSplitter, + ex.server.cfg.SpanConfigLimiter, + ex.state.mu.txn, + ); err != nil { + return err + } - if err := ex.checkDescriptorTwoVersionInvariant(ctx); err != nil { - return err + if err := ex.checkDescriptorTwoVersionInvariant(ctx); err != nil { + return err + } } if err := ex.state.mu.txn.Commit(ctx); err != nil { @@ -1065,12 +1066,10 @@ func (ex *connExecutor) createJobs(ctx context.Context) error { for _, record := range ex.extraTxnState.schemaChangeJobRecords { records = append(records, record) } - var jobIDs []jobspb.JobID - var err error - if err := ex.planner.WithInternalExecutor(ctx, func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor) error { - jobIDs, err = ex.server.cfg.JobRegistry.CreateJobsWithTxn(ctx, ex.planner.Txn(), ie, records) - return err - }); err != nil { + jobIDs, err := ex.server.cfg.JobRegistry.CreateJobsWithTxn( + ctx, ex.planner.InternalSQLTxn(), records, + ) + if err != nil { return err } ex.planner.extendedEvalCtx.Jobs.add(jobIDs...) @@ -2030,7 +2029,6 @@ func (ex *connExecutor) runShowCompletions( ) error { res.SetColumns(ctx, colinfo.ShowCompletionsColumns) log.Warningf(ctx, "COMPLETION GENERATOR FOR: %+v", *n) - ie := ex.server.cfg.InternalExecutor sd := ex.planner.SessionData() override := sessiondata.InternalExecutorOverride{ SearchPath: &sd.SearchPath, @@ -2042,12 +2040,12 @@ func (ex *connExecutor) runShowCompletions( // // TODO(janexing): better bind the internal executor with the txn. var txn *kv.Txn + var ie isql.Executor if _, ok := ex.machine.CurState().(stateOpen); ok { - txn = func() *kv.Txn { - ex.state.mu.RLock() - defer ex.state.mu.RUnlock() - return ex.state.mu.txn - }() + ie = ex.planner.InternalSQLTxn() + txn = ex.planner.Txn() + } else { + ie = ex.server.cfg.InternalDB.Executor() } queryIterFn := func(ctx context.Context, opName string, stmt string, args ...interface{}) (eval.InternalRows, error) { return ie.QueryIteratorEx(ctx, opName, txn, diff --git a/pkg/sql/conn_executor_internal_test.go b/pkg/sql/conn_executor_internal_test.go index 635ebd3f79bb..784a3c0a8104 100644 --- a/pkg/sql/conn_executor_internal_test.go +++ b/pkg/sql/conn_executor_internal_test.go @@ -330,7 +330,7 @@ func startConnExecutor( ), QueryCache: querycache.New(0), TestingKnobs: ExecutorTestingKnobs{}, - StmtDiagnosticsRecorder: stmtdiagnostics.NewRegistry(nil, nil, st), + StmtDiagnosticsRecorder: stmtdiagnostics.NewRegistry(nil, st), HistogramWindowInterval: base.DefaultHistogramWindowInterval(), CollectionFactory: descs.NewBareBonesCollectionFactory(st, keys.SystemSQLCodec), } diff --git a/pkg/sql/conn_executor_prepare.go b/pkg/sql/conn_executor_prepare.go index 2cbdc92d6ef5..e2dd0119355e 100644 --- a/pkg/sql/conn_executor_prepare.go +++ b/pkg/sql/conn_executor_prepare.go @@ -619,7 +619,7 @@ func (ex *connExecutor) execDescribe( } // Sending a nil formatCodes is equivalent to sending all text format // codes. - res.SetPortalOutput(ctx, cursor.InternalRows.Types(), nil /* formatCodes */) + res.SetPortalOutput(ctx, cursor.Rows.Types(), nil /* formatCodes */) return nil, nil } diff --git a/pkg/sql/control_jobs.go b/pkg/sql/control_jobs.go index 6ed0bb708a9a..9523e52c004f 100644 --- a/pkg/sql/control_jobs.go +++ b/pkg/sql/control_jobs.go @@ -66,7 +66,7 @@ func (n *controlJobsNode) startExec(params runParams) error { return errors.AssertionFailedf("%q: expected *DInt, found %T", jobIDDatum, jobIDDatum) } - job, err := reg.LoadJobWithTxn(params.ctx, jobspb.JobID(jobID), params.p.Txn()) + job, err := reg.LoadJobWithTxn(params.ctx, jobspb.JobID(jobID), params.p.InternalSQLTxn()) if err != nil { return err } @@ -76,14 +76,14 @@ func (n *controlJobsNode) startExec(params runParams) error { job.ID(), &payload, jobsauth.ControlAccess); err != nil { return err } - + ctrl := job.WithTxn(params.p.InternalSQLTxn()) switch n.desiredStatus { case jobs.StatusPaused: - err = reg.PauseRequested(params.ctx, params.p.txn, jobspb.JobID(jobID), n.reason) + err = ctrl.PauseRequested(params.ctx, n.reason) case jobs.StatusRunning: - err = reg.Unpause(params.ctx, params.p.txn, jobspb.JobID(jobID)) + err = ctrl.Unpaused(params.ctx) case jobs.StatusCanceled: - err = reg.CancelRequested(params.ctx, params.p.txn, jobspb.JobID(jobID)) + err = ctrl.CancelRequested(params.ctx) default: err = errors.AssertionFailedf("unhandled status %v", n.desiredStatus) } diff --git a/pkg/sql/control_schedules.go b/pkg/sql/control_schedules.go index c0ea588f8a2e..24240f5aff56 100644 --- a/pkg/sql/control_schedules.go +++ b/pkg/sql/control_schedules.go @@ -15,9 +15,9 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/jobs" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/server/telemetry" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -49,18 +49,16 @@ func (n *controlSchedulesNode) FastPathResults() (int, bool) { } // JobSchedulerEnv returns JobSchedulerEnv. -func JobSchedulerEnv(execCfg *ExecutorConfig) scheduledjobs.JobSchedulerEnv { - if knobs, ok := execCfg.DistSQLSrv.TestingKnobs.JobsTestingKnobs.(*jobs.TestingKnobs); ok { - if knobs.JobSchedulerEnv != nil { - return knobs.JobSchedulerEnv - } +func JobSchedulerEnv(knobs *jobs.TestingKnobs) scheduledjobs.JobSchedulerEnv { + if knobs != nil && knobs.JobSchedulerEnv != nil { + return knobs.JobSchedulerEnv } return scheduledjobs.ProdJobSchedulerEnv } // loadSchedule loads schedule information as the node user. func loadSchedule(params runParams, scheduleID tree.Datum) (*jobs.ScheduledJob, error) { - env := JobSchedulerEnv(params.ExecCfg()) + env := JobSchedulerEnv(params.ExecCfg().JobsKnobs()) schedule := jobs.NewScheduledJob(env) // Load schedule expression. This is needed for resume command, but we @@ -68,7 +66,7 @@ func loadSchedule(params runParams, scheduleID tree.Datum) (*jobs.ScheduledJob, // // Run the query as the node user since we perform our own privilege checks // before using the returned schedule. - datums, cols, err := params.ExecCfg().InternalExecutor.QueryRowExWithCols( + datums, cols, err := params.p.InternalSQLTxn().QueryRowExWithCols( params.ctx, "load-schedule", params.p.Txn(), sessiondata.NodeUserSessionDataOverride, @@ -92,24 +90,15 @@ func loadSchedule(params runParams, scheduleID tree.Datum) (*jobs.ScheduledJob, return schedule, nil } -// updateSchedule executes update for the schedule. -func updateSchedule(params runParams, schedule *jobs.ScheduledJob) error { - return schedule.Update( - params.ctx, - params.ExecCfg().InternalExecutor, - params.p.Txn(), - ) -} - // DeleteSchedule deletes specified schedule. func DeleteSchedule( - ctx context.Context, execCfg *ExecutorConfig, txn *kv.Txn, scheduleID int64, + ctx context.Context, execCfg *ExecutorConfig, txn isql.Txn, scheduleID int64, ) error { - env := JobSchedulerEnv(execCfg) - _, err := execCfg.InternalExecutor.ExecEx( + env := JobSchedulerEnv(execCfg.JobsKnobs()) + _, err := txn.ExecEx( ctx, "delete-schedule", - txn, + txn.KV(), sessiondata.RootUserSessionDataOverride, fmt.Sprintf( "DELETE FROM %s WHERE schedule_id = $1", @@ -153,14 +142,15 @@ func (n *controlSchedulesNode) startExec(params runParams) error { switch n.command { case tree.PauseSchedule: schedule.Pause() - err = updateSchedule(params, schedule) + err = jobs.ScheduledJobTxn(params.p.InternalSQLTxn()). + Update(params.ctx, schedule) case tree.ResumeSchedule: // Only schedule the next run time on PAUSED schedules, since ACTIVE schedules may // have a custom next run time set by first_run. if schedule.IsPaused() { - err = schedule.ScheduleNextRun() - if err == nil { - err = updateSchedule(params, schedule) + if err = schedule.ScheduleNextRun(); err == nil { + err = jobs.ScheduledJobTxn(params.p.InternalSQLTxn()). + Update(params.ctx, schedule) } } case tree.DropSchedule: @@ -171,13 +161,14 @@ func (n *controlSchedulesNode) startExec(params runParams) error { } if controller, ok := ex.(jobs.ScheduledJobController); ok { scheduleControllerEnv := scheduledjobs.MakeProdScheduleControllerEnv( - params.ExecCfg().ProtectedTimestampProvider, params.ExecCfg().InternalExecutor) + params.ExecCfg().ProtectedTimestampProvider.WithTxn(params.p.InternalSQLTxn()), + ) additionalDroppedSchedules, err := controller.OnDrop( params.ctx, scheduleControllerEnv, scheduledjobs.ProdJobSchedulerEnv, schedule, - params.p.Txn(), + params.p.InternalSQLTxn(), params.p.Descriptors(), ) if err != nil { @@ -185,7 +176,10 @@ func (n *controlSchedulesNode) startExec(params runParams) error { } n.numRows += additionalDroppedSchedules } - err = DeleteSchedule(params.ctx, params.ExecCfg(), params.p.txn, schedule.ScheduleID()) + err = DeleteSchedule( + params.ctx, params.ExecCfg(), params.p.InternalSQLTxn(), + schedule.ScheduleID(), + ) default: err = errors.AssertionFailedf("unhandled command %s", n.command) } diff --git a/pkg/sql/crdb_internal.go b/pkg/sql/crdb_internal.go index f101fb42face..a616efcb404a 100644 --- a/pkg/sql/crdb_internal.go +++ b/pkg/sql/crdb_internal.go @@ -58,6 +58,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" "github.com/cockroachdb/cockroach/pkg/sql/clusterunique" "github.com/cockroachdb/cockroach/pkg/sql/idxusage" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -699,7 +700,7 @@ CREATE TABLE crdb_internal.table_row_statistics ( ) AS l ON l."tableID" = s."tableID" AND l.last_dt = s."createdAt" AS OF SYSTEM TIME '%s' GROUP BY s."tableID"`, statsAsOfTimeClusterMode.String(&p.ExecCfg().Settings.SV)) - statRows, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.QueryBufferedEx( + statRows, err := p.ExtendedEvalContext().ExecCfg.InternalDB.Executor().QueryBufferedEx( ctx, "crdb-internal-statistics-table", nil, sessiondata.RootUserSessionDataOverride, query) @@ -947,7 +948,7 @@ func populateSystemJobsTableRows( matched := false // Note: we query system.jobs as root, so we must be careful about which rows we return. - it, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.QueryIteratorEx(ctx, + it, err := p.InternalSQLTxn().QueryIteratorEx(ctx, "system-jobs-scan", p.Txn(), sessiondata.InternalExecutorOverride{User: username.RootUserName()}, @@ -1069,7 +1070,7 @@ func makeJobsTableRows( // instead of using InternalExecutor.QueryIterator because // the latter is being deprecated for sometimes executing // the query as the root user. - it, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.QueryIteratorEx( + it, err := p.InternalSQLTxn().QueryIteratorEx( ctx, "crdb-internal-jobs-table", p.txn, sessiondata.InternalExecutorOverride{User: p.User()}, query, params...) @@ -3896,24 +3897,21 @@ CREATE TABLE crdb_internal.ranges_no_leases ( // getAllNames returns a map from ID to namespaceKey for every entry in // system.namespace. func (p *planner) getAllNames(ctx context.Context) (map[descpb.ID]catalog.NameKey, error) { - return getAllNames(ctx, p.txn, p.ExtendedEvalContext().ExecCfg.InternalExecutor) + return getAllNames(ctx, p.InternalSQLTxn()) } // TestingGetAllNames is a wrapper for getAllNames. -func TestingGetAllNames( - ctx context.Context, txn *kv.Txn, executor *InternalExecutor, -) (map[descpb.ID]catalog.NameKey, error) { - return getAllNames(ctx, txn, executor) +func TestingGetAllNames(ctx context.Context, txn isql.Txn) (map[descpb.ID]catalog.NameKey, error) { + return getAllNames(ctx, txn) } // getAllNames is the testable implementation of getAllNames. // It is public so that it can be tested outside the sql package. -func getAllNames( - ctx context.Context, txn *kv.Txn, executor *InternalExecutor, -) (map[descpb.ID]catalog.NameKey, error) { +func getAllNames(ctx context.Context, txn isql.Txn) (map[descpb.ID]catalog.NameKey, error) { namespace := map[descpb.ID]catalog.NameKey{} - it, err := executor.QueryIterator( - ctx, "get-all-names", txn, + it, err := txn.QueryIteratorEx( + ctx, "get-all-names", txn.KV(), + sessiondata.NodeUserSessionDataOverride, `SELECT id, "parentID", "parentSchemaID", name FROM system.namespace`, ) if err != nil { @@ -3980,8 +3978,10 @@ CREATE TABLE crdb_internal.zones ( // For some reason, if we use the iterator API here, "concurrent txn use // detected" error might occur, so we buffer up all zones first. - rows, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.QueryBuffered( - ctx, "crdb-internal-zones-table", p.txn, `SELECT id, config FROM system.zones`) + rows, err := p.InternalSQLTxn().QueryBufferedEx( + ctx, "crdb-internal-zones-table", p.txn, sessiondata.NodeUserSessionDataOverride, + `SELECT id, config FROM system.zones`, + ) if err != nil { return err } @@ -5211,7 +5211,7 @@ func collectMarshaledJobMetadataMap( // Build job map with referenced job IDs. m := make(marshaledJobMetadataMap) query := `SELECT id, status, payload, progress FROM system.jobs` - it, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.QueryIteratorEx( + it, err := p.InternalSQLTxn().QueryIteratorEx( ctx, "crdb-internal-jobs-table", p.Txn(), sessiondata.RootUserSessionDataOverride, query) diff --git a/pkg/sql/crdb_internal_test.go b/pkg/sql/crdb_internal_test.go index 135dfc23bc84..c8d8edcc5033 100644 --- a/pkg/sql/crdb_internal_test.go +++ b/pkg/sql/crdb_internal_test.go @@ -36,12 +36,14 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/distsql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -83,7 +85,13 @@ func TestGetAllNamesInternal(t *testing.T) { }) require.NoError(t, err) - names, err := sql.TestingGetAllNames(ctx, nil, s.InternalExecutor().(*sql.InternalExecutor)) + var names map[descpb.ID]catalog.NameKey + require.NoError(t, s.InternalDB().(isql.DB).Txn(ctx, func( + ctx context.Context, txn isql.Txn, + ) (err error) { + names, err = sql.TestingGetAllNames(ctx, txn) + return err + })) require.NoError(t, err) assert.Equal(t, descpb.NameInfo{ParentID: 999, ParentSchemaID: 444, Name: "bob"}, names[9999]) diff --git a/pkg/sql/create_external_connection.go b/pkg/sql/create_external_connection.go index fc691b8803d9..5e45f5bc9965 100644 --- a/pkg/sql/create_external_connection.go +++ b/pkg/sql/create_external_connection.go @@ -17,13 +17,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/cloud" "github.com/cockroachdb/cockroach/pkg/cloud/externalconn" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/syntheticprivilege" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" @@ -115,24 +113,23 @@ func (p *planner) createExternalConnection( ex.SetConnectionType(exConn.ConnectionType()) ex.SetOwner(p.User()) - return p.WithInternalExecutor(params.ctx, func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor) error { - // Create the External Connection and persist it in the - // `system.external_connections` table. - if err := ex.Create(params.ctx, ie, p.User(), txn); err != nil { - return errors.Wrap(err, "failed to create external connection") - } - - // Grant user `ALL` on the newly created External Connection. - grantStatement := fmt.Sprintf(`GRANT ALL ON EXTERNAL CONNECTION "%s" TO %s`, - ec.name, p.User().SQLIdentifier()) - _, err = ie.ExecEx(params.ctx, - "grant-on-create-external-connection", txn, - sessiondata.NodeUserSessionDataOverride, grantStatement) - if err != nil { - return errors.Wrap(err, "failed to grant on newly created External Connection") - } - return nil - }) + txn := p.InternalSQLTxn() + // Create the External Connection and persist it in the + // `system.external_connections` table. + if err := ex.Create(params.ctx, txn, p.User()); err != nil { + return errors.Wrap(err, "failed to create external connection") + } + + // Grant user `ALL` on the newly created External Connection. + grantStatement := fmt.Sprintf(`GRANT ALL ON EXTERNAL CONNECTION "%s" TO %s`, + ec.name, p.User().SQLIdentifier()) + _, err = txn.ExecEx(params.ctx, + "grant-on-create-external-connection", txn.KV(), + sessiondata.NodeUserSessionDataOverride, grantStatement) + if err != nil { + return errors.Wrap(err, "failed to grant on newly created External Connection") + } + return nil } func logAndSanitizeExternalConnectionURI(ctx context.Context, externalConnectionURI string) error { diff --git a/pkg/sql/create_function_test.go b/pkg/sql/create_function_test.go index ecd428a95c67..88f45e196c88 100644 --- a/pkg/sql/create_function_test.go +++ b/pkg/sql/create_function_test.go @@ -22,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/tests" @@ -64,8 +65,8 @@ CREATE SCHEMA test_sc; `, ) - err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - funcDesc, err := col.ByIDWithLeased(txn).WithoutNonPublic().Get().Function(ctx, 110) + err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + funcDesc, err := col.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Function(ctx, 110) require.NoError(t, err) require.Equal(t, funcDesc.GetName(), "f") @@ -94,7 +95,7 @@ SELECT nextval(105:::REGCLASS);`, // Make sure columns and indexes has correct back references. tn := tree.MakeTableNameWithSchema("defaultdb", "public", "t") - _, tbl, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).Get(), &tn) + _, tbl, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).Get(), &tn) require.NoError(t, err) require.Equal(t, "t", tbl.GetName()) require.Equal(t, @@ -108,7 +109,7 @@ SELECT nextval(105:::REGCLASS);`, // Make sure sequence has correct back references. sqn := tree.MakeTableNameWithSchema("defaultdb", "public", "sq1") - _, seq, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).Get(), &sqn) + _, seq, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).Get(), &sqn) require.NoError(t, err) require.Equal(t, "sq1", seq.GetName()) require.Equal(t, @@ -120,7 +121,7 @@ SELECT nextval(105:::REGCLASS);`, // Make sure view has correct back references. vn := tree.MakeTableNameWithSchema("defaultdb", "public", "v") - _, view, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).Get(), &vn) + _, view, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).Get(), &vn) require.NoError(t, err) require.Equal(t, "v", view.GetName()) require.Equal(t, @@ -132,7 +133,7 @@ SELECT nextval(105:::REGCLASS);`, // Make sure type has correct back references. typn := tree.MakeQualifiedTypeName("defaultdb", "public", "notmyworkday") - _, typ, err := descs.PrefixAndType(ctx, col.ByNameWithLeased(txn).Get(), &typn) + _, typ, err := descs.PrefixAndType(ctx, col.ByNameWithLeased(txn.KV()).Get(), &typn) require.NoError(t, err) require.Equal(t, "notmyworkday", typ.GetName()) require.Equal(t, @@ -249,8 +250,8 @@ $$; `, ) - err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - funcDesc, err := col.ByIDWithLeased(txn).WithoutNonPublic().Get().Function(ctx, 112) + err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + funcDesc, err := col.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Function(ctx, 112) require.NoError(t, err) require.Equal(t, funcDesc.GetName(), "f") @@ -271,13 +272,13 @@ SELECT nextval(106:::REGCLASS);`, // Make sure type has correct back references. typn := tree.MakeQualifiedTypeName("defaultdb", "public", "notmyworkday") - _, typ, err := descs.PrefixAndType(ctx, col.ByNameWithLeased(txn).Get(), &typn) + _, typ, err := descs.PrefixAndType(ctx, col.ByNameWithLeased(txn.KV()).Get(), &typn) require.NoError(t, err) require.Equal(t, []descpb.ID{112}, typ.GetReferencingDescriptorIDs()) // All objects with "1" suffix should have back references to the function, // "2" should have empty references since it's not used yet. - validateReferences(ctx, txn, col, "1", "2") + validateReferences(ctx, txn.KV(), col, "1", "2") return nil }) require.NoError(t, err) @@ -292,8 +293,8 @@ CREATE OR REPLACE FUNCTION f(a notmyworkday) RETURNS INT IMMUTABLE LANGUAGE SQL $$; `) - err = sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - funcDesc, err := col.ByID(txn).WithoutNonPublic().Get().Function(ctx, 112) + err = sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + funcDesc, err := col.ByID(txn.KV()).WithoutNonPublic().Get().Function(ctx, 112) require.NoError(t, err) require.Equal(t, funcDesc.GetName(), "f") @@ -314,13 +315,13 @@ SELECT nextval(107:::REGCLASS);`, // Make sure type has correct back references. typn := tree.MakeQualifiedTypeName("defaultdb", "public", "notmyworkday") - _, typ, err := descs.PrefixAndType(ctx, col.ByNameWithLeased(txn).Get(), &typn) + _, typ, err := descs.PrefixAndType(ctx, col.ByNameWithLeased(txn.KV()).Get(), &typn) require.NoError(t, err) require.Equal(t, []descpb.ID{112}, typ.GetReferencingDescriptorIDs()) // Now all objects with "2" suffix in name should have back references "1" // had before, and "1" should have empty references. - validateReferences(ctx, txn, col, "2", "1") + validateReferences(ctx, txn.KV(), col, "2", "1") return nil }) require.NoError(t, err) diff --git a/pkg/sql/create_index.go b/pkg/sql/create_index.go index 6a96d1a1728f..0889257dae5c 100644 --- a/pkg/sql/create_index.go +++ b/pkg/sql/create_index.go @@ -903,7 +903,7 @@ func (p *planner) configureZoneConfigForNewIndexPartitioning( if err := ApplyZoneConfigForMultiRegionTable( ctx, - p.txn, + p.Txn(), p.ExecCfg(), p.extendedEvalCtx.Tracing.KVTracingEnabled(), p.Descriptors(), diff --git a/pkg/sql/create_role.go b/pkg/sql/create_role.go index 465083bf8ad5..c0ad36b91d9e 100644 --- a/pkg/sql/create_role.go +++ b/pkg/sql/create_role.go @@ -137,7 +137,7 @@ func (n *CreateRoleNode) startExec(params runParams) error { } // Check if the user/role exists. - row, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.QueryRowEx( + row, err := params.p.InternalSQLTxn().QueryRowEx( params.ctx, opName, params.p.txn, @@ -162,9 +162,10 @@ func (n *CreateRoleNode) startExec(params runParams) error { if err != nil { return err } - rowsAffected, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.Exec( - params.ctx, opName, params.p.txn, stmt, - n.roleName, hashedPassword, n.isRole, roleID, + rowsAffected, err := params.p.InternalSQLTxn().ExecEx( + params.ctx, opName, params.p.txn, + sessiondata.InternalExecutorOverride{User: username.NodeUserName()}, + stmt, n.roleName, hashedPassword, n.isRole, roleID, ) if err != nil { return err @@ -222,7 +223,7 @@ func updateRoleOptions( if isNull { // If the value of the role option is NULL, ensure that nil is passed // into the statement placeholder, since val is string type "NULL" - // will not be interpreted as NULL by the InternalExecutor. + // will not be interpreted as NULL by the Executor. qargs = append(qargs, nil) } else { qargs = append(qargs, val) @@ -230,7 +231,7 @@ func updateRoleOptions( } if withID { - idRow, err := params.p.ExecCfg().InternalExecutor.QueryRowEx( + idRow, err := params.p.InternalSQLTxn().QueryRowEx( params.ctx, `get-user-id`, params.p.Txn(), sessiondata.NodeUserSessionDataOverride, `SELECT user_id FROM system.users WHERE username = $1`, roleName.Normalized(), ) @@ -240,7 +241,7 @@ func updateRoleOptions( qargs = append(qargs, tree.MustBeDOid(idRow[0])) } - affected, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( + affected, err := params.p.InternalSQLTxn().ExecEx( params.ctx, opName, params.p.txn, diff --git a/pkg/sql/create_schema.go b/pkg/sql/create_schema.go index 8ab6383830ce..176714de9704 100644 --- a/pkg/sql/create_schema.go +++ b/pkg/sql/create_schema.go @@ -15,7 +15,6 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" @@ -32,7 +31,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" ) @@ -49,9 +47,7 @@ func CreateUserDefinedSchemaDescriptor( ctx context.Context, sessionData *sessiondata.SessionData, n *tree.CreateSchema, - txn *kv.Txn, - descriptors *descs.Collection, - ie sqlutil.InternalExecutor, + txn descs.Txn, descIDGenerator eval.DescIDGenerator, db catalog.DatabaseDescriptor, allocateID bool, @@ -71,7 +67,7 @@ func CreateUserDefinedSchemaDescriptor( } // Ensure there aren't any name collisions. - exists, schemaID, err := schemaExists(ctx, txn, descriptors, db.GetID(), schemaName) + exists, schemaID, err := schemaExists(ctx, txn.KV(), txn.Descriptors(), db.GetID(), schemaName) if err != nil { return nil, nil, err } @@ -82,7 +78,7 @@ func CreateUserDefinedSchemaDescriptor( // and can't be in a dropping state. if schemaID != descpb.InvalidID { // Check if the object already exists in a dropped state - sc, err := descriptors.ByID(txn).Get().Schema(ctx, schemaID) + sc, err := txn.Descriptors().ByID(txn.KV()).Get().Schema(ctx, schemaID) if err != nil || sc.SchemaKind() != catalog.SchemaUserDefined { return nil, nil, err } @@ -104,7 +100,7 @@ func CreateUserDefinedSchemaDescriptor( owner := user if !n.AuthRole.Undefined() { - exists, err := RoleExists(ctx, ie, txn, authRole) + exists, err := RoleExists(ctx, txn, authRole) if err != nil { return nil, nil, err } @@ -202,9 +198,8 @@ func (p *planner) createUserDefinedSchema(params runParams, n *tree.CreateSchema } desc, privs, err := CreateUserDefinedSchemaDescriptor( - params.ctx, params.SessionData(), n, p.Txn(), p.Descriptors(), - p.ExecCfg().InternalExecutor, p.extendedEvalCtx.DescIDGenerator, - db, true, /* allocateID */ + params.ctx, params.SessionData(), n, p.InternalSQLTxn(), + p.extendedEvalCtx.DescIDGenerator, db, true, /* allocateID */ ) if err != nil { return err diff --git a/pkg/sql/create_stats.go b/pkg/sql/create_stats.go index f5e1c3af181a..3b9f554d440f 100644 --- a/pkg/sql/create_stats.go +++ b/pkg/sql/create_stats.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -27,6 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -160,7 +160,7 @@ func (n *createStatsNode) startJob(ctx context.Context, resultsCh chan<- tree.Da var job *jobs.StartableJob jobID := n.p.ExecCfg().JobRegistry.MakeJobID() - if err := n.p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + if err := n.p.ExecCfg().InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { return n.p.ExecCfg().JobRegistry.CreateStartableJobWithTxn(ctx, &job, jobID, txn, *record) }); err != nil { if job != nil { @@ -178,7 +178,7 @@ func (n *createStatsNode) startJob(ctx context.Context, resultsCh chan<- tree.Da if errors.Is(err, stats.ConcurrentCreateStatsError) { // Delete the job so users don't see it and get confused by the error. const stmt = `DELETE FROM system.jobs WHERE id = $1` - if _ /* cols */, delErr := n.p.ExecCfg().InternalExecutor.Exec( + if _ /* cols */, delErr := n.p.ExecCfg().InternalDB.Executor().Exec( ctx, "delete-job", nil /* txn */, stmt, jobID, ); delErr != nil { log.Warningf(ctx, "failed to delete job: %v", delErr) @@ -602,26 +602,26 @@ func (r *createStatsResumer) Resume(ctx context.Context, execCtx interface{}) er evalCtx := p.ExtendedEvalContext() dsp := p.DistSQLPlanner() - if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := p.ExecCfg().InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { // Set the transaction on the EvalContext to this txn. This allows for // use of the txn during processor setup during the execution of the flow. - evalCtx.Txn = txn + evalCtx.Txn = txn.KV() if details.AsOf != nil { p.ExtendedEvalContext().AsOfSystemTime = &eval.AsOfSystemTime{Timestamp: *details.AsOf} p.ExtendedEvalContext().SetTxnTimestamp(details.AsOf.GoTime()) - if err := txn.SetFixedTimestamp(ctx, *details.AsOf); err != nil { + if err := txn.KV().SetFixedTimestamp(ctx, *details.AsOf); err != nil { return err } } - planCtx := dsp.NewPlanningCtx(ctx, evalCtx, nil /* planner */, txn, + planCtx := dsp.NewPlanningCtx(ctx, evalCtx, nil /* planner */, txn.KV(), DistributionTypeSystemTenantOnly) // CREATE STATS flow doesn't produce any rows and only emits the // metadata, so we can use a nil rowContainerHelper. resultWriter := NewRowResultWriter(nil /* rowContainer */) if err := dsp.planAndRunCreateStats( - ctx, evalCtx, planCtx, txn, r.job, resultWriter, + ctx, evalCtx, planCtx, txn.KV(), r.job, resultWriter, ); err != nil { // Check if this was a context canceled error and restart if it was. if grpcutil.IsContextCanceled(err) { @@ -635,12 +635,12 @@ func (r *createStatsResumer) Resume(ctx context.Context, execCtx interface{}) er // job progress to coerce out the correct error type. If the update succeeds // then return the original error, otherwise return this error instead so // it can be cleaned up at a higher level. - if jobErr := r.job.FractionProgressed( - ctx, nil, /* txn */ - func(ctx context.Context, _ jobspb.ProgressDetails) float32 { - // The job failed so the progress value here doesn't really matter. - return 0 - }, + if jobErr := r.job.NoTxn().FractionProgressed(ctx, func( + ctx context.Context, _ jobspb.ProgressDetails, + ) float32 { + // The job failed so the progress value here doesn't really matter. + return 0 + }, ); jobErr != nil { return jobErr } @@ -666,7 +666,7 @@ func (r *createStatsResumer) Resume(ctx context.Context, execCtx interface{}) er // TODO(knz): figure out why this is not triggered for a regular // CREATE STATISTICS statement. // See: https://github.com/cockroachdb/cockroach/issues/57739 - return evalCtx.ExecCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + return evalCtx.ExecCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { return logEventInternalForSQLStatements(ctx, evalCtx.ExecCfg, txn, 0, /* depth: use event_log=2 for vmodule filtering */ @@ -695,11 +695,13 @@ func checkRunningJobs(ctx context.Context, job *jobs.Job, p JobExecContext) erro if job != nil { jobID = job.ID() } - exists, err := jobs.RunningJobExists(ctx, jobID, p.ExecCfg().InternalExecutor, nil /* txn */, func(payload *jobspb.Payload) bool { - return payload.Type() == jobspb.TypeCreateStats || payload.Type() == jobspb.TypeAutoCreateStats - }) - - if err != nil { + var exists bool + if err := p.ExecCfg().InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { + exists, err = jobs.RunningJobExists(ctx, jobID, txn, func(payload *jobspb.Payload) bool { + return payload.Type() == jobspb.TypeCreateStats || payload.Type() == jobspb.TypeAutoCreateStats + }) + return err + }); err != nil { return err } diff --git a/pkg/sql/create_table.go b/pkg/sql/create_table.go index 0398fcb2fa85..60b9028f80bb 100644 --- a/pkg/sql/create_table.go +++ b/pkg/sql/create_table.go @@ -455,7 +455,7 @@ func (n *createTableNode) startExec(params runParams) error { if err := ApplyZoneConfigForMultiRegionTable( params.ctx, - params.p.txn, + params.p.Txn(), params.p.ExecCfg(), params.p.extendedEvalCtx.Tracing.KVTracingEnabled(), params.p.Descriptors(), @@ -2339,10 +2339,11 @@ func newTableDesc( return nil, err } + params.p.Txn() j, err := CreateRowLevelTTLScheduledJob( params.ctx, - params.ExecCfg(), - params.p.txn, + params.ExecCfg().JobsKnobs(), + jobs.ScheduledJobTxn(params.p.InternalSQLTxn()), params.p.User(), ret.GetID(), ttl, @@ -2392,19 +2393,19 @@ func newRowLevelTTLScheduledJob( // CreateRowLevelTTLScheduledJob creates a new row-level TTL schedule. func CreateRowLevelTTLScheduledJob( ctx context.Context, - execCfg *ExecutorConfig, - txn *kv.Txn, + knobs *jobs.TestingKnobs, + s jobs.ScheduledJobStorage, owner username.SQLUsername, tblID descpb.ID, ttl *catpb.RowLevelTTL, ) (*jobs.ScheduledJob, error) { telemetry.Inc(sqltelemetry.RowLevelTTLCreated) - env := JobSchedulerEnv(execCfg) + env := JobSchedulerEnv(knobs) j, err := newRowLevelTTLScheduledJob(env, owner, tblID, ttl) if err != nil { return nil, err } - if err := j.Create(ctx, execCfg.InternalExecutor, txn); err != nil { + if err := s.Create(ctx, j); err != nil { return nil, err } return j, nil diff --git a/pkg/sql/create_view.go b/pkg/sql/create_view.go index 0030fe29f60e..6e8f66348702 100644 --- a/pkg/sql/create_view.go +++ b/pkg/sql/create_view.go @@ -341,7 +341,7 @@ func (n *createViewNode) startExec(params runParams) error { } if err := ApplyZoneConfigForMultiRegionTable( params.ctx, - params.p.txn, + params.p.Txn(), params.p.ExecCfg(), params.p.extendedEvalCtx.Tracing.KVTracingEnabled(), params.p.Descriptors(), diff --git a/pkg/sql/database_region_change_finalizer.go b/pkg/sql/database_region_change_finalizer.go index 2b86156afd3c..e9f28f86e693 100644 --- a/pkg/sql/database_region_change_finalizer.go +++ b/pkg/sql/database_region_change_finalizer.go @@ -21,7 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/multiregion" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" - "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/types" ) @@ -41,27 +41,22 @@ type databaseRegionChangeFinalizer struct { // newDatabaseRegionChangeFinalizer returns a databaseRegionChangeFinalizer. // It pre-fetches all REGIONAL BY ROW tables from the database. func newDatabaseRegionChangeFinalizer( - ctx context.Context, - txn *kv.Txn, - execCfg *ExecutorConfig, - descsCol *descs.Collection, - dbID descpb.ID, - typeID descpb.ID, + ctx context.Context, txn descs.Txn, execCfg *ExecutorConfig, dbID descpb.ID, typeID descpb.ID, ) (*databaseRegionChangeFinalizer, error) { p, cleanup := NewInternalPlanner( "repartition-regional-by-row-tables", - txn, + txn.KV(), username.RootUserName(), &MemoryMetrics{}, execCfg, - sessiondatapb.SessionData{}, - WithDescCollection(descsCol), + txn.SessionData().SessionData, + WithDescCollection(txn.Descriptors()), ) localPlanner := p.(*planner) var regionalByRowTables []*tabledesc.Mutable if err := func() error { - dbDesc, err := descsCol.ByID(txn).WithoutNonPublic().Get().Database(ctx, dbID) + dbDesc, err := txn.Descriptors().ByID(txn.KV()).WithoutNonPublic().Get().Database(ctx, dbID) if err != nil { return err } @@ -104,8 +99,8 @@ func (r *databaseRegionChangeFinalizer) cleanup() { // finalize updates the zone configurations of the database and all enclosed // REGIONAL BY ROW tables once the region promotion/demotion is complete. -func (r *databaseRegionChangeFinalizer) finalize(ctx context.Context, txn *kv.Txn) error { - if err := r.updateDatabaseZoneConfig(ctx, txn); err != nil { +func (r *databaseRegionChangeFinalizer) finalize(ctx context.Context, txn descs.Txn) error { + if err := r.updateDatabaseZoneConfig(ctx, txn.KV()); err != nil { return err } if err := r.preDrop(ctx, txn); err != nil { @@ -119,19 +114,19 @@ func (r *databaseRegionChangeFinalizer) finalize(ctx context.Context, txn *kv.Tx // advance of the type descriptor change, to ensure that the table and type // descriptors never become incorrect (from a query perspective). For more info, // see the callers. -func (r *databaseRegionChangeFinalizer) preDrop(ctx context.Context, txn *kv.Txn) error { - repartitioned, zoneConfigUpdates, err := r.repartitionRegionalByRowTables(ctx, txn) +func (r *databaseRegionChangeFinalizer) preDrop(ctx context.Context, txn isql.Txn) error { + repartitioned, zoneConfigUpdates, err := r.repartitionRegionalByRowTables(ctx, txn.KV()) if err != nil { return err } for _, update := range zoneConfigUpdates { if _, err := writeZoneConfigUpdate( - ctx, txn, r.localPlanner.ExtendedEvalContext().Tracing.KVTracingEnabled(), r.localPlanner.Descriptors(), update, + ctx, txn.KV(), r.localPlanner.ExtendedEvalContext().Tracing.KVTracingEnabled(), r.localPlanner.Descriptors(), update, ); err != nil { return err } } - b := txn.NewBatch() + b := txn.KV().NewBatch() for _, t := range repartitioned { const kvTrace = false if err := r.localPlanner.Descriptors().WriteDescToBatch( @@ -140,7 +135,7 @@ func (r *databaseRegionChangeFinalizer) preDrop(ctx context.Context, txn *kv.Txn return err } } - return txn.Run(ctx, b) + return txn.KV().Run(ctx, b) } // updateGlobalTablesZoneConfig refreshes all global tables' zone configs so @@ -150,9 +145,9 @@ func (r *databaseRegionChangeFinalizer) preDrop(ctx context.Context, txn *kv.Txn // will inherit the database's constraints. In the RESTRICTED case, however, // constraints must be explicitly refreshed when new regions are added/removed. func (r *databaseRegionChangeFinalizer) updateGlobalTablesZoneConfig( - ctx context.Context, txn *kv.Txn, + ctx context.Context, txn isql.Txn, ) error { - regionConfig, err := SynthesizeRegionConfig(ctx, txn, r.dbID, r.localPlanner.Descriptors()) + regionConfig, err := SynthesizeRegionConfig(ctx, txn.KV(), r.dbID, r.localPlanner.Descriptors()) if err != nil { return err } @@ -165,7 +160,7 @@ func (r *databaseRegionChangeFinalizer) updateGlobalTablesZoneConfig( descsCol := r.localPlanner.Descriptors() - dbDesc, err := descsCol.ByID(txn).WithoutNonPublic().Get().Database(ctx, r.dbID) + dbDesc, err := descsCol.ByID(txn.KV()).WithoutNonPublic().Get().Database(ctx, r.dbID) if err != nil { return err } diff --git a/pkg/sql/database_test.go b/pkg/sql/database_test.go index 40b7e013b2f8..7b6807ac42fb 100644 --- a/pkg/sql/database_test.go +++ b/pkg/sql/database_test.go @@ -16,10 +16,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -32,8 +32,8 @@ func TestDatabaseAccessors(t *testing.T) { s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(context.Background()) - if err := TestingDescsTxn(context.Background(), s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - _, err := col.ByID(txn).Get().Database(ctx, keys.SystemDatabaseID) + if err := TestingDescsTxn(context.Background(), s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + _, err := col.ByID(txn.KV()).Get().Database(ctx, keys.SystemDatabaseID) return err }); err != nil { t.Fatal(err) diff --git a/pkg/sql/delete_preserving_index_test.go b/pkg/sql/delete_preserving_index_test.go index 8fbb66a7d4ce..3419b93b8183 100644 --- a/pkg/sql/delete_preserving_index_test.go +++ b/pkg/sql/delete_preserving_index_test.go @@ -36,6 +36,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/rowenc/rowencpb" @@ -618,7 +619,7 @@ func TestMergeProcessor(t *testing.T) { mm := mon.NewUnlimitedMonitor(ctx, "MemoryMonitor", mon.MemoryResource, nil, nil, math.MaxInt64, settings) flowCtx := execinfra.FlowCtx{ Cfg: &execinfra.ServerConfig{ - DB: kvDB, + DB: execCfg.InternalDB, Settings: settings, Codec: codec, BackfillerMonitor: mm, @@ -676,14 +677,14 @@ func TestMergeProcessor(t *testing.T) { } require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection) error { - mut, err := descriptors.MutableByID(txn).Table(ctx, tableDesc.GetID()) + ctx context.Context, txn isql.Txn, descriptors *descs.Collection) error { + mut, err := descriptors.MutableByID(txn.KV()).Table(ctx, tableDesc.GetID()) if err != nil { return err } require.Equal(t, test.dstContentsBeforeMerge, - datumSliceToStrMatrix(fetchIndex(ctx, t, txn, mut, test.dstIndex))) + datumSliceToStrMatrix(fetchIndex(ctx, t, txn.KV(), mut, test.dstIndex))) return nil })) @@ -709,14 +710,14 @@ func TestMergeProcessor(t *testing.T) { } require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection) error { - mut, err := descriptors.MutableByID(txn).Table(ctx, tableDesc.GetID()) + ctx context.Context, txn isql.Txn, descriptors *descs.Collection) error { + mut, err := descriptors.MutableByID(txn.KV()).Table(ctx, tableDesc.GetID()) if err != nil { return err } require.Equal(t, test.dstContentsAfterMerge, - datumSliceToStrMatrix(fetchIndex(ctx, t, txn, mut, test.dstIndex))) + datumSliceToStrMatrix(fetchIndex(ctx, t, txn.KV(), mut, test.dstIndex))) return nil })) } diff --git a/pkg/sql/descmetadata/BUILD.bazel b/pkg/sql/descmetadata/BUILD.bazel index 4a4e9184d775..a4bf90e5ff74 100644 --- a/pkg/sql/descmetadata/BUILD.bazel +++ b/pkg/sql/descmetadata/BUILD.bazel @@ -8,14 +8,13 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/keys", - "//pkg/kv", "//pkg/settings", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/descs", + "//pkg/sql/isql", "//pkg/sql/schemachanger/scexec", "//pkg/sql/sessiondata", "//pkg/sql/sessioninit", - "//pkg/sql/sqlutil", ], ) diff --git a/pkg/sql/descmetadata/metadata_updater.go b/pkg/sql/descmetadata/metadata_updater.go index c733e3c1b127..f9767e8a25b4 100644 --- a/pkg/sql/descmetadata/metadata_updater.go +++ b/pkg/sql/descmetadata/metadata_updater.go @@ -15,22 +15,20 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sessioninit" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" ) // metadataUpdater which implements scexec.MetaDataUpdater that is used to update // comments on different schema objects. type metadataUpdater struct { ctx context.Context - txn *kv.Txn - ieFactory sqlutil.InternalExecutorFactory + txn isql.Txn sessionData *sessiondata.SessionData descriptors *descs.Collection cacheEnabled bool @@ -41,16 +39,14 @@ type metadataUpdater struct { // schema objects. func NewMetadataUpdater( ctx context.Context, - ieFactory sqlutil.InternalExecutorFactory, + txn isql.Txn, descriptors *descs.Collection, settings *settings.Values, - txn *kv.Txn, sessionData *sessiondata.SessionData, ) scexec.DescriptorMetadataUpdater { return metadataUpdater{ ctx: ctx, txn: txn, - ieFactory: ieFactory, sessionData: sessionData, descriptors: descriptors, cacheEnabled: sessioninit.CacheEnabled.Get(settings), @@ -59,10 +55,9 @@ func NewMetadataUpdater( // DeleteDatabaseRoleSettings implement scexec.DescriptorMetaDataUpdater. func (mu metadataUpdater) DeleteDatabaseRoleSettings(ctx context.Context, dbID descpb.ID) error { - ie := mu.ieFactory.NewInternalExecutor(mu.sessionData) - rowsDeleted, err := ie.ExecEx(ctx, + rowsDeleted, err := mu.txn.ExecEx(ctx, "delete-db-role-setting", - mu.txn, + mu.txn.KV(), sessiondata.RootUserSessionDataOverride, fmt.Sprintf( `DELETE FROM %s WHERE database_id = $1`, @@ -79,21 +74,20 @@ func (mu metadataUpdater) DeleteDatabaseRoleSettings(ctx context.Context, dbID d return nil } // Bump the table version for the role settings table when we modify it. - desc, err := mu.descriptors.MutableByID(mu.txn).Table(ctx, keys.DatabaseRoleSettingsTableID) + desc, err := mu.descriptors.MutableByID(mu.txn.KV()).Table(ctx, keys.DatabaseRoleSettingsTableID) if err != nil { return err } desc.MaybeIncrementVersion() - return mu.descriptors.WriteDesc(ctx, false /*kvTrace*/, desc, mu.txn) + return mu.descriptors.WriteDesc(ctx, false /*kvTrace*/, desc, mu.txn.KV()) } // DeleteSchedule implement scexec.DescriptorMetadataUpdater. func (mu metadataUpdater) DeleteSchedule(ctx context.Context, scheduleID int64) error { - ie := mu.ieFactory.NewInternalExecutor(mu.sessionData) - _, err := ie.ExecEx( + _, err := mu.txn.ExecEx( ctx, "delete-schedule", - mu.txn, + mu.txn.KV(), sessiondata.RootUserSessionDataOverride, "DELETE FROM system.scheduled_jobs WHERE schedule_id = $1", scheduleID, diff --git a/pkg/sql/discard.go b/pkg/sql/discard.go index a51f0edd4746..ec6fa86016e6 100644 --- a/pkg/sql/discard.go +++ b/pkg/sql/discard.go @@ -13,12 +13,10 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/errors" ) @@ -86,27 +84,27 @@ func (n *discardNode) startExec(params runParams) error { } func deleteTempTables(ctx context.Context, p *planner) error { - return p.WithInternalExecutor(ctx, func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor) error { - codec := p.execCfg.Codec - descCol := p.Descriptors() - allDbDescs, err := descCol.GetAllDatabaseDescriptors(ctx, p.Txn()) + codec := p.execCfg.Codec + descCol := p.Descriptors() + allDbDescs, err := descCol.GetAllDatabaseDescriptors(ctx, p.Txn()) + if err != nil { + return err + } + g := p.byNameGetterBuilder().MaybeGet() + for _, db := range allDbDescs { + sc, err := g.Schema(ctx, db, p.TemporarySchemaName()) if err != nil { return err } - g := p.byNameGetterBuilder().MaybeGet() - for _, db := range allDbDescs { - sc, err := g.Schema(ctx, db, p.TemporarySchemaName()) - if err != nil { - return err - } - if sc == nil { - continue - } - err = cleanupTempSchemaObjects(ctx, p.Txn(), descCol, codec, ie, db, sc) - if err != nil { - return err - } + if sc == nil { + continue + } + err = cleanupTempSchemaObjects( + ctx, p.InternalSQLTxn(), descCol, codec, db, sc, + ) + if err != nil { + return err } - return nil - }) + } + return nil } diff --git a/pkg/sql/distsql/server.go b/pkg/sql/distsql/server.go index 02b7534d8abd..ac7d23891318 100644 --- a/pkg/sql/distsql/server.go +++ b/pkg/sql/distsql/server.go @@ -291,7 +291,7 @@ func (ds *ServerImpl) setupFlow( } // The flow will run in a LeafTxn because we do not want each distributed // Txn to heartbeat the transaction. - return kv.NewLeafTxn(ctx, ds.DB, roachpb.NodeID(req.Flow.Gateway), tis), nil + return kv.NewLeafTxn(ctx, ds.DB.KV(), roachpb.NodeID(req.Flow.Gateway), tis), nil } var evalCtx *eval.Context diff --git a/pkg/sql/drop_database.go b/pkg/sql/drop_database.go index 252eff0c9b08..d7c837b0a9b3 100644 --- a/pkg/sql/drop_database.go +++ b/pkg/sql/drop_database.go @@ -200,10 +200,9 @@ func (n *dropDatabaseNode) startExec(params runParams) error { metadataUpdater := descmetadata.NewMetadataUpdater( ctx, - p.ExecCfg().InternalExecutorFactory, + p.InternalSQLTxn(), p.Descriptors(), &p.ExecCfg().Settings.SV, - p.txn, p.SessionData(), ) diff --git a/pkg/sql/drop_external_connection.go b/pkg/sql/drop_external_connection.go index ad2b24005ad7..25274035ca5a 100644 --- a/pkg/sql/drop_external_connection.go +++ b/pkg/sql/drop_external_connection.go @@ -68,7 +68,7 @@ func (p *planner) dropExternalConnection(params runParams, n *tree.DropExternalC // DROP EXTERNAL CONNECTION is only allowed for users with the `DROP` // privilege on this object. We run the query as `node` since the user might // not have `SELECT` on the system table. - if _ /* rows */, err = params.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( + if _ /* rows */, err = params.p.InternalSQLTxn().ExecEx( params.ctx, dropExternalConnectionOp, params.p.Txn(), @@ -80,7 +80,7 @@ func (p *planner) dropExternalConnection(params runParams, n *tree.DropExternalC // We must also DELETE all rows from system.privileges that refer to // external connection. - if _, err = params.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( + if _, err = params.p.InternalSQLTxn().ExecEx( params.ctx, dropExternalConnectionOp, params.p.Txn(), diff --git a/pkg/sql/drop_function_test.go b/pkg/sql/drop_function_test.go index 2972795fc0b6..dfd740348b99 100644 --- a/pkg/sql/drop_function_test.go +++ b/pkg/sql/drop_function_test.go @@ -17,10 +17,10 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -61,8 +61,8 @@ CREATE SCHEMA test_sc; `, ) - err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - funcDesc, err := col.ByIDWithLeased(txn).WithoutNonPublic().Get().Function(ctx, 109) + err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + funcDesc, err := col.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Function(ctx, 109) require.NoError(t, err) require.Equal(t, funcDesc.GetName(), "f") @@ -91,7 +91,7 @@ SELECT nextval(105:::REGCLASS);`, // Make sure columns and indexes has correct back references. tn := tree.MakeTableNameWithSchema("defaultdb", "public", "t") - _, tbl, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).Get(), &tn) + _, tbl, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).Get(), &tn) require.NoError(t, err) require.Equal(t, "t", tbl.GetName()) require.Equal(t, @@ -106,7 +106,7 @@ SELECT nextval(105:::REGCLASS);`, // Make sure sequence has correct back references. sqn := tree.MakeTableNameWithSchema("defaultdb", "public", "sq1") - _, seq, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).Get(), &sqn) + _, seq, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).Get(), &sqn) require.NoError(t, err) require.Equal(t, "sq1", seq.GetName()) require.Equal(t, @@ -118,7 +118,7 @@ SELECT nextval(105:::REGCLASS);`, // Make sure view has correct back references. vn := tree.MakeTableNameWithSchema("defaultdb", "public", "v") - _, view, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).Get(), &vn) + _, view, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).Get(), &vn) require.NoError(t, err) require.Equal(t, "v", view.GetName()) require.Equal(t, @@ -130,7 +130,7 @@ SELECT nextval(105:::REGCLASS);`, // Make sure type has correct back references. typn := tree.MakeQualifiedTypeName("defaultdb", "public", "notmyworkday") - _, typ, err := descs.PrefixAndType(ctx, col.ByNameWithLeased(txn).Get(), &typn) + _, typ, err := descs.PrefixAndType(ctx, col.ByNameWithLeased(txn.KV()).Get(), &typn) require.NoError(t, err) require.Equal(t, "notmyworkday", typ.GetName()) require.Equal(t, @@ -144,14 +144,14 @@ SELECT nextval(105:::REGCLASS);`, // DROP the function and make sure dependencies are cleared. tDB.Exec(t, "DROP FUNCTION f") - err = sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - _, err := col.ByIDWithLeased(txn).WithoutNonPublic().Get().Function(ctx, 109) + err = sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + _, err := col.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Function(ctx, 109) require.Error(t, err) require.Regexp(t, "descriptor is being dropped", err.Error()) // Make sure columns and indexes has correct back references. tn := tree.MakeTableNameWithSchema("defaultdb", "public", "t") - _, tbl, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).Get(), &tn) + _, tbl, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).Get(), &tn) require.NoError(t, err) require.Equal(t, []descpb.TableDescriptor_Reference{ @@ -162,19 +162,19 @@ SELECT nextval(105:::REGCLASS);`, // Make sure sequence has correct back references. sqn := tree.MakeTableNameWithSchema("defaultdb", "public", "sq1") - _, seq, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).Get(), &sqn) + _, seq, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).Get(), &sqn) require.NoError(t, err) require.Nil(t, seq.GetDependedOnBy()) // Make sure view has correct back references. vn := tree.MakeTableNameWithSchema("defaultdb", "public", "v") - _, view, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).Get(), &vn) + _, view, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).Get(), &vn) require.NoError(t, err) require.Nil(t, view.GetDependedOnBy()) // Make sure type has correct back references. typn := tree.MakeQualifiedTypeName("defaultdb", "public", "notmyworkday") - _, typ, err := descs.PrefixAndType(ctx, col.ByNameWithLeased(txn).Get(), &typn) + _, typ, err := descs.PrefixAndType(ctx, col.ByNameWithLeased(txn.KV()).Get(), &typn) require.NoError(t, err) require.Nil(t, typ.GetReferencingDescriptorIDs()) @@ -373,8 +373,8 @@ $$; // Test drop/rename behavior in legacy schema changer. tDB.Exec(t, "SET use_declarative_schema_changer = off;") - err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - fnDesc, err := col.ByIDWithLeased(txn).WithoutNonPublic().Get().Function(ctx, 113) + err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + fnDesc, err := col.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Function(ctx, 113) require.NoError(t, err) require.Equal(t, "f", fnDesc.GetName()) require.True(t, fnDesc.Public()) @@ -384,8 +384,8 @@ $$; tDB.Exec(t, tc.stmt) - err = sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - _, err := col.ByIDWithLeased(txn).WithoutNonPublic().Get().Function(ctx, 113) + err = sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + _, err := col.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Function(ctx, 113) require.Error(t, err) require.Regexp(t, "descriptor is being dropped", err.Error()) return nil @@ -406,8 +406,8 @@ $$; // Test drop/rename behavior in legacy schema changer. tDB.Exec(t, "SET use_declarative_schema_changer = on;") - err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - fnDesc, err := col.ByIDWithLeased(txn).WithoutNonPublic().Get().Function(ctx, 113) + err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + fnDesc, err := col.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Function(ctx, 113) require.NoError(t, err) require.Equal(t, "f", fnDesc.GetName()) require.True(t, fnDesc.Public()) @@ -417,8 +417,8 @@ $$; tDB.Exec(t, tc.stmt) - err = sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - _, err := col.ByIDWithLeased(txn).WithoutNonPublic().Get().Function(ctx, 113) + err = sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + _, err := col.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Function(ctx, 113) require.Error(t, err) require.Regexp(t, "descriptor is being dropped", err.Error()) return nil @@ -426,4 +426,5 @@ $$; require.NoError(t, err) }) } + } diff --git a/pkg/sql/drop_role.go b/pkg/sql/drop_role.go index fe10496906f6..04ca2050ed68 100644 --- a/pkg/sql/drop_role.go +++ b/pkg/sql/drop_role.go @@ -353,10 +353,11 @@ func (n *DropRoleNode) startExec(params runParams) error { } // Check if user owns any scheduled jobs. - numSchedulesRow, err := params.ExecCfg().InternalExecutor.QueryRow( + numSchedulesRow, err := params.p.InternalSQLTxn().QueryRowEx( params.ctx, "check-user-schedules", params.p.txn, + sessiondata.NodeUserSessionDataOverride, "SELECT count(*) FROM system.scheduled_jobs WHERE owner=$1", normalizedUsername, ) @@ -373,10 +374,11 @@ func (n *DropRoleNode) startExec(params runParams) error { normalizedUsername, numSchedules) } - numUsersDeleted, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.Exec( + numUsersDeleted, err := params.p.InternalSQLTxn().ExecEx( params.ctx, opName, params.p.txn, + sessiondata.NodeUserSessionDataOverride, `DELETE FROM system.users WHERE username=$1`, normalizedUsername, ) @@ -389,10 +391,11 @@ func (n *DropRoleNode) startExec(params runParams) error { } // Drop all role memberships involving the user/role. - rowsDeleted, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.Exec( + rowsDeleted, err := params.p.InternalSQLTxn().ExecEx( params.ctx, "drop-role-membership", params.p.txn, + sessiondata.NodeUserSessionDataOverride, `DELETE FROM system.role_members WHERE "role" = $1 OR "member" = $1`, normalizedUsername, ) @@ -401,10 +404,11 @@ func (n *DropRoleNode) startExec(params runParams) error { } numRoleMembershipsDeleted += rowsDeleted - _, err = params.extendedEvalCtx.ExecCfg.InternalExecutor.Exec( + _, err = params.p.InternalSQLTxn().ExecEx( params.ctx, opName, params.p.txn, + sessiondata.NodeUserSessionDataOverride, fmt.Sprintf( `DELETE FROM %s WHERE username=$1`, sessioninit.RoleOptionsTableName, @@ -415,10 +419,11 @@ func (n *DropRoleNode) startExec(params runParams) error { return err } - rowsDeleted, err = params.extendedEvalCtx.ExecCfg.InternalExecutor.Exec( + rowsDeleted, err = params.p.InternalSQLTxn().ExecEx( params.ctx, opName, params.p.txn, + sessiondata.NodeUserSessionDataOverride, fmt.Sprintf( `DELETE FROM %s WHERE role_name = $1`, sessioninit.DatabaseRoleSettingsTableName, diff --git a/pkg/sql/drop_schema.go b/pkg/sql/drop_schema.go index 24e5fd0077fd..c2a44c495ac6 100644 --- a/pkg/sql/drop_schema.go +++ b/pkg/sql/drop_schema.go @@ -241,7 +241,7 @@ func (p *planner) createDropSchemaJob( typeIDs = append(typeIDs, t.ID) } - _, err := p.extendedEvalCtx.QueueJob(ctx, p.Txn(), jobs.Record{ + _, err := p.extendedEvalCtx.QueueJob(ctx, p.InternalSQLTxn(), jobs.Record{ Description: jobDesc, Username: p.User(), DescriptorIDs: schemas, diff --git a/pkg/sql/drop_table.go b/pkg/sql/drop_table.go index 99cf7a3860c9..10c80902fa90 100644 --- a/pkg/sql/drop_table.go +++ b/pkg/sql/drop_table.go @@ -15,12 +15,12 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/jobs" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/funcdesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scerrors" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -419,7 +419,7 @@ func (p *planner) markTableMutationJobsSuccessful( delete(p.ExtendedEvalContext().SchemaChangeJobRecords, tableDesc.ID) continue } - mutationJob, err := p.execCfg.JobRegistry.LoadJobWithTxn(ctx, jobID, p.txn) + mutationJob, err := p.execCfg.JobRegistry.LoadJobWithTxn(ctx, jobID, p.InternalSQLTxn()) if err != nil { if jobs.HasJobNotFoundError(err) { log.Warningf(ctx, "mutation job %d not found", jobID) @@ -427,24 +427,25 @@ func (p *planner) markTableMutationJobsSuccessful( } return err } - if err := mutationJob.Update( - ctx, p.txn, func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { - status := md.Status - switch status { - case jobs.StatusSucceeded, jobs.StatusCanceled, jobs.StatusFailed, jobs.StatusRevertFailed: - log.Warningf(ctx, "mutation job %d in unexpected state %s", jobID, status) - return nil - case jobs.StatusRunning, jobs.StatusPending: - status = jobs.StatusSucceeded - default: - // We shouldn't mark jobs as succeeded if they're not in a state where - // they're eligible to ever succeed, so mark them as failed. - status = jobs.StatusFailed - } - log.Infof(ctx, "marking mutation job %d for dropped table as %s", jobID, status) - ju.UpdateStatus(status) + if err := mutationJob.WithTxn(p.InternalSQLTxn()).Update(ctx, func( + txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, + ) error { + status := md.Status + switch status { + case jobs.StatusSucceeded, jobs.StatusCanceled, jobs.StatusFailed, jobs.StatusRevertFailed: + log.Warningf(ctx, "mutation job %d in unexpected state %s", jobID, status) return nil - }); err != nil { + case jobs.StatusRunning, jobs.StatusPending: + status = jobs.StatusSucceeded + default: + // We shouldn't mark jobs as succeeded if they're not in a state where + // they're eligible to ever succeed, so mark them as failed. + status = jobs.StatusFailed + } + log.Infof(ctx, "marking mutation job %d for dropped table as %s", jobID, status) + ju.UpdateStatus(status) + return nil + }); err != nil { return errors.Wrap(err, "updating mutation job for dropped table") } } diff --git a/pkg/sql/drop_test.go b/pkg/sql/drop_test.go index f43cd4ea435d..b5dbbd548ee2 100644 --- a/pkg/sql/drop_test.go +++ b/pkg/sql/drop_test.go @@ -37,6 +37,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/gcjob" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqltestutils" @@ -132,8 +133,8 @@ INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd'); tbDesc := desctestutils.TestingGetPublicTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") var dbDesc catalog.DatabaseDescriptor - require.NoError(t, sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) (err error) { - dbDesc, err = col.ByID(txn).Get().Database(ctx, tbDesc.GetParentID()) + require.NoError(t, sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) (err error) { + dbDesc, err = col.ByID(txn.KV()).Get().Database(ctx, tbDesc.GetParentID()) return err })) @@ -298,8 +299,8 @@ INSERT INTO t.kv2 VALUES ('c', 'd'), ('a', 'b'), ('e', 'a'); tbDesc := desctestutils.TestingGetPublicTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") tb2Desc := desctestutils.TestingGetPublicTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv2") var dbDesc catalog.DatabaseDescriptor - require.NoError(t, sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) (err error) { - dbDesc, err = col.ByID(txn).Get().Database(ctx, tbDesc.GetParentID()) + require.NoError(t, sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) (err error) { + dbDesc, err = col.ByID(txn.KV()).Get().Database(ctx, tbDesc.GetParentID()) return err })) @@ -867,8 +868,8 @@ func TestDropTableWhileUpgradingFormat(t *testing.T) { // Simulate a migration upgrading the table descriptor's format version after // the table has been dropped but before the truncation has occurred. - if err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) (err error) { - tbl, err := col.ByID(txn).Get().Table(ctx, tableDesc.ID) + if err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) (err error) { + tbl, err := col.ByID(txn.KV()).Get().Table(ctx, tableDesc.ID) if err != nil { return err } @@ -1211,8 +1212,8 @@ func TestDropIndexOnHashShardedIndexWithStoredShardColumn(t *testing.T) { query = `SELECT id FROM system.namespace WHERE name = 'tbl'` tdb.QueryRow(t, query).Scan(&tableID) require.NoError(t, sql.TestingDescsTxn(ctx, s, - func(ctx context.Context, txn *kv.Txn, col *descs.Collection) (err error) { - tableDesc, err = col.ByID(txn).Get().Table(ctx, tableID) + func(ctx context.Context, txn isql.Txn, col *descs.Collection) (err error) { + tableDesc, err = col.ByID(txn.KV()).Get().Table(ctx, tableID) return err })) shardIdx, err := tableDesc.FindIndexWithName("idx") @@ -1229,8 +1230,8 @@ func TestDropIndexOnHashShardedIndexWithStoredShardColumn(t *testing.T) { // Assert that the index is dropped but the shard column remains after dropping the index. require.NoError(t, sql.TestingDescsTxn(ctx, s, - func(ctx context.Context, txn *kv.Txn, col *descs.Collection) (err error) { - tableDesc, err = col.ByID(txn).Get().Table(ctx, tableID) + func(ctx context.Context, txn isql.Txn, col *descs.Collection) (err error) { + tableDesc, err = col.ByID(txn.KV()).Get().Table(ctx, tableID) return err })) _, err = tableDesc.FindIndexWithName("idx") diff --git a/pkg/sql/event_log.go b/pkg/sql/event_log.go index 1b3427534f63..403f3a0540c4 100644 --- a/pkg/sql/event_log.go +++ b/pkg/sql/event_log.go @@ -20,15 +20,16 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/obsservice/obspb" v1 "github.com/cockroachdb/cockroach/pkg/obsservice/obspb/opentelemetry-proto/common/v1" otel_logs_pb "github.com/cockroachdb/cockroach/pkg/obsservice/obspb/opentelemetry-proto/logs/v1" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/util/contextutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" @@ -215,7 +216,7 @@ func (p *planner) logEventsWithOptions( ctx context.Context, depth int, opts eventLogOptions, entries ...logpb.EventPayload, ) error { return logEventInternalForSQLStatements(ctx, - p.extendedEvalCtx.ExecCfg, p.txn, + p.extendedEvalCtx.ExecCfg, p.InternalSQLTxn(), 1+depth, opts, p.getCommonSQLEventDetails(opts.rOpts), @@ -227,13 +228,13 @@ func (p *planner) logEventsWithOptions( func logEventInternalForSchemaChanges( ctx context.Context, execCfg *ExecutorConfig, - txn *kv.Txn, + txn isql.Txn, sqlInstanceID base.SQLInstanceID, descID descpb.ID, mutationID descpb.MutationID, event logpb.EventPayload, ) error { - event.CommonDetails().Timestamp = txn.ReadTimestamp().WallTime + event.CommonDetails().Timestamp = txn.KV().ReadTimestamp().WallTime scCommon, ok := event.(eventpb.EventWithCommonSchemaChangePayload) if !ok { return errors.AssertionFailedf("unknown event type: %T", event) @@ -271,7 +272,7 @@ func logEventInternalForSchemaChanges( func logEventInternalForSQLStatements( ctx context.Context, execCfg *ExecutorConfig, - txn *kv.Txn, + txn isql.Txn, depth int, opts eventLogOptions, commonSQLEventDetails eventpb.CommonSQLEventDetails, @@ -283,7 +284,7 @@ func logEventInternalForSQLStatements( // No txn is set for COPY, so use now instead. event.CommonDetails().Timestamp = timeutil.Now().UnixNano() } else { - event.CommonDetails().Timestamp = txn.ReadTimestamp().WallTime + event.CommonDetails().Timestamp = txn.KV().ReadTimestamp().WallTime } sqlCommon, ok := event.(eventpb.EventWithCommonSQLPayload) if !ok { @@ -335,7 +336,7 @@ func logEventInternalForSQLStatements( } type schemaChangerEventLogger struct { - txn *kv.Txn + txn isql.Txn execCfg *ExecutorConfig depth int } @@ -344,7 +345,7 @@ var _ scexec.EventLogger = (*schemaChangerEventLogger)(nil) // NewSchemaChangerEventLogger returns a scexec.EventLogger implementation. func NewSchemaChangerEventLogger( - txn *kv.Txn, execCfg *ExecutorConfig, depth int, + txn isql.Txn, execCfg *ExecutorConfig, depth int, ) scexec.EventLogger { return &schemaChangerEventLogger{ txn: txn, @@ -369,7 +370,7 @@ func (l schemaChangerEventLogger) LogEvent( func (l schemaChangerEventLogger) LogEventForSchemaChange( ctx context.Context, event logpb.EventPayload, ) error { - event.CommonDetails().Timestamp = l.txn.ReadTimestamp().WallTime + event.CommonDetails().Timestamp = l.txn.KV().ReadTimestamp().WallTime scCommon, ok := event.(eventpb.EventWithCommonSchemaChangePayload) if !ok { return errors.AssertionFailedf("unknown event type: %T", event) @@ -388,14 +389,14 @@ func (l schemaChangerEventLogger) LogEventForSchemaChange( func LogEventForJobs( ctx context.Context, execCfg *ExecutorConfig, - txn *kv.Txn, + txn isql.Txn, event logpb.EventPayload, jobID int64, payload jobspb.Payload, user username.SQLUsername, status jobs.Status, ) error { - event.CommonDetails().Timestamp = txn.ReadTimestamp().WallTime + event.CommonDetails().Timestamp = txn.KV().ReadTimestamp().WallTime jobCommon, ok := event.(eventpb.EventWithCommonJobPayload) if !ok { return errors.AssertionFailedf("unknown event type: %T", event) @@ -508,7 +509,7 @@ func InsertEventRecords( func insertEventRecords( ctx context.Context, execCfg *ExecutorConfig, - txn *kv.Txn, + txn isql.Txn, depth int, opts eventLogOptions, entries ...logpb.EventPayload, @@ -561,7 +562,7 @@ func insertEventRecords( // ensure that the external logging only sees the event when the // transaction commits. if txn != nil && opts.dst.hasFlag(LogExternally) { - txn.AddCommitTrigger(func(ctx context.Context) { + txn.KV().AddCommitTrigger(func(ctx context.Context) { for i := range entries { log.StructuredEvent(ctx, entries[i]) } @@ -573,8 +574,8 @@ func insertEventRecords( if txn != nil && syncWrites { // Yes, do it now. query, args, otelEvents := prepareEventWrite(ctx, execCfg, entries) - txn.AddCommitTrigger(func(ctx context.Context) { sendEventsToObsService(ctx, execCfg, otelEvents) }) - return writeToSystemEventsTable(ctx, execCfg.InternalExecutor, txn, len(entries), query, args) + txn.KV().AddCommitTrigger(func(ctx context.Context) { sendEventsToObsService(ctx, execCfg, otelEvents) }) + return writeToSystemEventsTable(ctx, txn, len(entries), query, args) } // No: do them async. // With txn: trigger async write at end of txn (no event logged if txn aborts). @@ -582,7 +583,7 @@ func insertEventRecords( if txn == nil { asyncWriteToOtelAndSystemEventsTable(ctx, execCfg, entries) } else { - txn.AddCommitTrigger(func(ctx context.Context) { + txn.KV().AddCommitTrigger(func(ctx context.Context) { asyncWriteToOtelAndSystemEventsTable(ctx, execCfg, entries) }) } @@ -634,8 +635,8 @@ func asyncWriteToOtelAndSystemEventsTable( for r := retry.Start(retryOpts); r.Next(); { // Don't try too long to write if the system table is unavailable. if err := contextutil.RunWithTimeout(ctx, "record-events", perAttemptTimeout, func(ctx context.Context) error { - return execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return writeToSystemEventsTable(ctx, execCfg.InternalExecutor, txn, len(entries), query, args) + return execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return writeToSystemEventsTable(ctx, txn, len(entries), query, args) }) }); err != nil { log.Ops.Warningf(ctx, "unable to save %d entries to system.eventlog: %v", len(entries), err) @@ -734,14 +735,13 @@ VALUES($1, $2, $3, $4, 0)` } func writeToSystemEventsTable( - ctx context.Context, - ie *InternalExecutor, - txn *kv.Txn, - numEntries int, - query string, - args []interface{}, + ctx context.Context, txn isql.Txn, numEntries int, query string, args []interface{}, ) error { - rows, err := ie.Exec(ctx, "log-event", txn, query, args...) + rows, err := txn.ExecEx( + ctx, "log-event", txn.KV(), + sessiondata.NodeUserSessionDataOverride, + query, args..., + ) if err != nil { return err } diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go index 3c1a9900c881..5cb1ff1a45eb 100644 --- a/pkg/sql/exec_util.go +++ b/pkg/sql/exec_util.go @@ -67,6 +67,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/gcjob/gcjobnotifier" "github.com/cockroachdb/cockroach/pkg/sql/idxusage" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/lex" "github.com/cockroachdb/cockroach/pkg/sql/opt" "github.com/cockroachdb/cockroach/pkg/sql/optionalnodeliveness" @@ -1215,7 +1216,6 @@ type ExecutorConfig struct { DistSQLPlanner *DistSQLPlanner TableStatsCache *stats.TableStatisticsCache StatsRefresher *stats.Refresher - InternalExecutor *InternalExecutor QueryCache *querycache.C SchemaChangerMetrics *SchemaChangerMetrics @@ -1347,9 +1347,9 @@ type ExecutorConfig struct { // records. SpanConfigKVAccessor spanconfig.KVAccessor - // InternalExecutorFactory is used to create an InternalExecutor bound with - // SessionData and other ExtraTxnState. - InternalExecutorFactory descs.TxnManager + // InternalDB is used to create an isql.Executor bound with SessionData and + // other ExtraTxnState. + InternalDB *InternalDB // ConsistencyChecker is to generate the results in calls to // crdb_internal.check_consistency. @@ -1410,6 +1410,11 @@ func (cfg *ExecutorConfig) SV() *settings.Values { return &cfg.Settings.SV } +func (cfg *ExecutorConfig) JobsKnobs() *jobs.TestingKnobs { + knobs, _ := cfg.DistSQLSrv.TestingKnobs.JobsTestingKnobs.(*jobs.TestingKnobs) + return knobs +} + var _ base.ModuleTestingKnobs = &ExecutorTestingKnobs{} // ModuleTestingKnobs is part of the base.ModuleTestingKnobs interface. @@ -3514,12 +3519,16 @@ func formatStatementSummary(ast tree.Statement) string { // DescsTxn is a convenient method for running a transaction on descriptors // when you have an ExecutorConfig. +// +// TODO(ajwerner): Remove this now that it is such a thin shim. func DescsTxn( ctx context.Context, execCfg *ExecutorConfig, - f func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error, + f func(ctx context.Context, txn isql.Txn, col *descs.Collection) error, ) error { - return execCfg.InternalExecutorFactory.DescsTxn(ctx, execCfg.DB, f) + return execCfg.InternalDB.DescsTxn(ctx, func(ctx context.Context, txn descs.Txn) error { + return f(ctx, txn, txn.Descriptors()) + }) } // TestingDescsTxn is a convenience function for running a transaction on @@ -3527,7 +3536,7 @@ func DescsTxn( func TestingDescsTxn( ctx context.Context, s serverutils.TestServerInterface, - f func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error, + f func(ctx context.Context, txn isql.Txn, col *descs.Collection) error, ) error { execCfg := s.ExecutorConfig().(ExecutorConfig) return DescsTxn(ctx, &execCfg, f) diff --git a/pkg/sql/execinfra/BUILD.bazel b/pkg/sql/execinfra/BUILD.bazel index 3094eeb55bfa..c2200d88b730 100644 --- a/pkg/sql/execinfra/BUILD.bazel +++ b/pkg/sql/execinfra/BUILD.bazel @@ -56,7 +56,6 @@ go_library( "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", "//pkg/sql/sqlliveness", - "//pkg/sql/sqlutil", "//pkg/sql/types", "//pkg/storage/fs", "//pkg/util/admission", diff --git a/pkg/sql/execinfra/server_config.go b/pkg/sql/execinfra/server_config.go index 3a770b5d84e9..82267d185c6d 100644 --- a/pkg/sql/execinfra/server_config.go +++ b/pkg/sql/execinfra/server_config.go @@ -21,7 +21,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangecache" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/diskmap" @@ -37,7 +36,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/rowinfra" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/storage/fs" "github.com/cockroachdb/cockroach/pkg/util/admission" "github.com/cockroachdb/cockroach/pkg/util/limit" @@ -75,11 +73,7 @@ type ServerConfig struct { Codec keys.SQLCodec // DB is a handle to the cluster. - DB *kv.DB - // Executor can be used to run "internal queries". Note that Flows also have - // access to an executor in the EvalContext. That one is "session bound" - // whereas this one isn't. - Executor sqlutil.InternalExecutor + DB descs.DB RPCContext *rpc.Context Stopper *stop.Stopper @@ -148,11 +142,6 @@ type ServerConfig struct { // Dialer for communication between SQL nodes/pods. PodNodeDialer *nodedialer.Dialer - // InternalExecutorFactory is used to construct session-bound - // executors. The idea is that a higher-layer binds some of the arguments - // required, so that users of ServerConfig don't have to care about them. - InternalExecutorFactory descs.TxnManager - ExternalStorage cloud.ExternalStorageFactory ExternalStorageFromURI cloud.ExternalStorageFromURIFactory diff --git a/pkg/sql/execstats/traceanalyzer_test.go b/pkg/sql/execstats/traceanalyzer_test.go index 3d7375165449..bdaa23371eda 100644 --- a/pkg/sql/execstats/traceanalyzer_test.go +++ b/pkg/sql/execstats/traceanalyzer_test.go @@ -108,7 +108,7 @@ func TestTraceAnalyzer(t *testing.T) { for _, vectorizeMode := range []sessiondatapb.VectorizeExecMode{sessiondatapb.VectorizeOff, sessiondatapb.VectorizeOn} { execCtx, finishAndCollect := tracing.ContextWithRecordingSpan(ctx, execCfg.AmbientCtx.Tracer, t.Name()) defer finishAndCollect() - ie := execCfg.InternalExecutorFactory.NewInternalExecutor(&sessiondata.SessionData{ + ie := execCfg.InternalDB.NewInternalExecutor(&sessiondata.SessionData{ SessionData: sessiondatapb.SessionData{ VectorizeMode: vectorizeMode, }, diff --git a/pkg/sql/function_resolver_test.go b/pkg/sql/function_resolver_test.go index 241d537bbe0c..fa3c04a123d4 100644 --- a/pkg/sql/function_resolver_test.go +++ b/pkg/sql/function_resolver_test.go @@ -16,11 +16,11 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/funcdesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -74,10 +74,10 @@ CREATE FUNCTION f(INT) RETURNS INT IMMUTABLE LANGUAGE SQL AS $$ SELECT a FROM t require.NoError(t, protoutil.Unmarshal(sessionSerialized, &sessionData)) } - err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { + err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { execCfg := s.ExecutorConfig().(sql.ExecutorConfig) planner, cleanup := sql.NewInternalPlanner( - "resolve-index", txn, username.RootUserName(), &sql.MemoryMetrics{}, &execCfg, sessionData, + "resolve-index", txn.KV(), username.RootUserName(), &sql.MemoryMetrics{}, &execCfg, sessionData, ) defer cleanup() ec := planner.(interface{ EvalContext() *eval.Context }).EvalContext() @@ -250,10 +250,10 @@ CREATE FUNCTION sc1.lower() RETURNS INT IMMUTABLE LANGUAGE SQL AS $$ SELECT 3 $$ require.NoError(t, protoutil.Unmarshal(sessionSerialized, &sessionData)) } - err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { + err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { execCfg := s.ExecutorConfig().(sql.ExecutorConfig) planner, cleanup := sql.NewInternalPlanner( - "resolve-index", txn, username.RootUserName(), &sql.MemoryMetrics{}, &execCfg, sessionData, + "resolve-index", txn.KV(), username.RootUserName(), &sql.MemoryMetrics{}, &execCfg, sessionData, ) defer cleanup() ec := planner.(interface{ EvalContext() *eval.Context }).EvalContext() @@ -387,10 +387,10 @@ CREATE FUNCTION sc1.lower(a STRING) RETURNS STRING IMMUTABLE LANGUAGE SQL AS $$ require.NoError(t, protoutil.Unmarshal(sessionSerialized, &sessionData)) } - err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { + err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { execCfg := s.ExecutorConfig().(sql.ExecutorConfig) planner, cleanup := sql.NewInternalPlanner( - "resolve-index", txn, username.RootUserName(), &sql.MemoryMetrics{}, &execCfg, sessionData, + "resolve-index", txn.KV(), username.RootUserName(), &sql.MemoryMetrics{}, &execCfg, sessionData, ) defer cleanup() ec := planner.(interface{ EvalContext() *eval.Context }).EvalContext() diff --git a/pkg/sql/gcjob/BUILD.bazel b/pkg/sql/gcjob/BUILD.bazel index 273f587effe6..dc4a7ff96e84 100644 --- a/pkg/sql/gcjob/BUILD.bazel +++ b/pkg/sql/gcjob/BUILD.bazel @@ -35,6 +35,7 @@ go_library( "//pkg/sql/catalog", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/descs", + "//pkg/sql/isql", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/sqlerrors", diff --git a/pkg/sql/gcjob/gc_job.go b/pkg/sql/gcjob/gc_job.go index e085dfd12504..e1de8442585c 100644 --- a/pkg/sql/gcjob/gc_job.go +++ b/pkg/sql/gcjob/gc_job.go @@ -27,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/timeutil" @@ -132,8 +133,8 @@ func deleteTableData( } for _, droppedTable := range progress.Tables { var table catalog.TableDescriptor - if err := sql.DescsTxn(ctx, cfg, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) (err error) { - table, err = col.ByID(txn).Get().Table(ctx, droppedTable.ID) + if err := sql.DescsTxn(ctx, cfg, func(ctx context.Context, txn isql.Txn, col *descs.Collection) (err error) { + table, err = col.ByID(txn.KV()).Get().Table(ctx, droppedTable.ID) return err }); err != nil { if errors.Is(err, catalog.ErrDescriptorNotFound) { diff --git a/pkg/sql/gcjob/gc_job_utils.go b/pkg/sql/gcjob/gc_job_utils.go index 5cfdcb5471f7..fdc8e35f53d6 100644 --- a/pkg/sql/gcjob/gc_job_utils.go +++ b/pkg/sql/gcjob/gc_job_utils.go @@ -17,9 +17,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" ) @@ -68,7 +68,7 @@ func initDetailsAndProgress( var details jobspb.SchemaChangeGCDetails var progress *jobspb.SchemaChangeGCProgress var job *jobs.Job - if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { var err error job, err = execCfg.JobRegistry.LoadJobWithTxn(ctx, jobID, txn) if err != nil { @@ -122,12 +122,12 @@ func initializeProgress( } if update { - if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { job, err := execCfg.JobRegistry.LoadJobWithTxn(ctx, jobID, txn) if err != nil { return err } - return job.SetProgress(ctx, txn, *progress) + return job.WithTxn(txn).SetProgress(ctx, *progress) }); err != nil { return err } @@ -278,16 +278,16 @@ func persistProgress( progress *jobspb.SchemaChangeGCProgress, runningStatus jobs.RunningStatus, ) { - if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { job, err := execCfg.JobRegistry.LoadJobWithTxn(ctx, jobID, txn) if err != nil { return err } - if err := job.SetProgress(ctx, txn, *progress); err != nil { + if err := job.WithTxn(txn).SetProgress(ctx, *progress); err != nil { return err } log.Infof(ctx, "updated progress payload: %+v", progress) - err = job.RunningStatus(ctx, txn, func(_ context.Context, _ jobspb.Details) (jobs.RunningStatus, error) { + err = job.WithTxn(txn).RunningStatus(ctx, func(_ context.Context, _ jobspb.Details) (jobs.RunningStatus, error) { return runningStatus, nil }) if err != nil { diff --git a/pkg/sql/gcjob/index_garbage_collection.go b/pkg/sql/gcjob/index_garbage_collection.go index 29311183bc97..cb64c80b5aa8 100644 --- a/pkg/sql/gcjob/index_garbage_collection.go +++ b/pkg/sql/gcjob/index_garbage_collection.go @@ -22,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" @@ -119,9 +120,9 @@ func gcIndexes( // All the data chunks have been removed. Now also removed the // zone configs for the dropped indexes, if any. removeIndexZoneConfigs := func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { - freshParentTableDesc, err := descriptors.MutableByID(txn).Table(ctx, parentID) + freshParentTableDesc, err := descriptors.MutableByID(txn.KV()).Table(ctx, parentID) if err != nil { return err } @@ -200,9 +201,9 @@ func deleteIndexZoneConfigsAfterGC( // All the data chunks have been removed. Now also removed the // zone configs for the dropped indexes, if any. removeIndexZoneConfigs := func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { - freshParentTableDesc, err := descriptors.MutableByID(txn).Table(ctx, parentID) + freshParentTableDesc, err := descriptors.MutableByID(txn.KV()).Table(ctx, parentID) if err != nil { return err } diff --git a/pkg/sql/gcjob/refresh_statuses.go b/pkg/sql/gcjob/refresh_statuses.go index 6886d9b48c3e..2b9549a54481 100644 --- a/pkg/sql/gcjob/refresh_statuses.go +++ b/pkg/sql/gcjob/refresh_statuses.go @@ -19,7 +19,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -28,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/timeutil" @@ -103,8 +103,8 @@ func updateStatusForGCElements( protectedtsCache := execCfg.ProtectedTimestampProvider earliestDeadline := timeutil.Unix(0, int64(math.MaxInt64)) - if err := sql.DescsTxn(ctx, execCfg, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - table, err := col.ByID(txn).Get().Table(ctx, tableID) + if err := sql.DescsTxn(ctx, execCfg, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + table, err := col.ByID(txn.KV()).Get().Table(ctx, tableID) if err != nil { return err } @@ -410,8 +410,8 @@ func isTenantProtected( isProtected := false ptsProvider := execCfg.ProtectedTimestampProvider - if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - ptsState, err := ptsProvider.GetState(ctx, txn) + if err := execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + ptsState, err := ptsProvider.WithTxn(txn).GetState(ctx) if err != nil { return errors.Wrap(err, "failed to get protectedts State") } diff --git a/pkg/sql/gcjob/table_garbage_collection.go b/pkg/sql/gcjob/table_garbage_collection.go index f567781036c5..849dd67d6d9c 100644 --- a/pkg/sql/gcjob/table_garbage_collection.go +++ b/pkg/sql/gcjob/table_garbage_collection.go @@ -25,6 +25,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/timeutil" @@ -48,8 +49,8 @@ func gcTables( } var table catalog.TableDescriptor - if err := sql.DescsTxn(ctx, execCfg, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) (err error) { - table, err = col.ByID(txn).Get().Table(ctx, droppedTable.ID) + if err := sql.DescsTxn(ctx, execCfg, func(ctx context.Context, txn isql.Txn, col *descs.Collection) (err error) { + table, err = col.ByID(txn.KV()).Get().Table(ctx, droppedTable.ID) return err }); err != nil { if errors.Is(err, catalog.ErrDescriptorNotFound) { @@ -287,8 +288,8 @@ func deleteTableDescriptorsAfterGC( } var table catalog.TableDescriptor - if err := sql.DescsTxn(ctx, execCfg, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) (err error) { - table, err = col.ByID(txn).Get().Table(ctx, droppedTable.ID) + if err := sql.DescsTxn(ctx, execCfg, func(ctx context.Context, txn isql.Txn, col *descs.Collection) (err error) { + table, err = col.ByID(txn.KV()).Get().Table(ctx, droppedTable.ID) return err }); err != nil { if errors.Is(err, catalog.ErrDescriptorNotFound) { diff --git a/pkg/sql/gcjob/tenant_garbage_collection.go b/pkg/sql/gcjob/tenant_garbage_collection.go index 97621702b27c..5defda20cb08 100644 --- a/pkg/sql/gcjob/tenant_garbage_collection.go +++ b/pkg/sql/gcjob/tenant_garbage_collection.go @@ -16,6 +16,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -42,8 +44,11 @@ func gcTenant( ) } - info, err := sql.GetTenantRecordByID(ctx, execCfg, nil /* txn */, roachpb.MustMakeTenantID(tenID)) - if err != nil { + var info *descpb.TenantInfo + if err := execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { + info, err = sql.GetTenantRecordByID(ctx, txn /* txn */, roachpb.MustMakeTenantID(tenID)) + return err + }); err != nil { if pgerror.GetPGCode(err) == pgcode.UndefinedObject { // The tenant row is deleted only after its data is cleared so there is // nothing to do in this case but mark the job as done. diff --git a/pkg/sql/gcjob_test/BUILD.bazel b/pkg/sql/gcjob_test/BUILD.bazel index dcc74794157d..1974cb1d5fdd 100644 --- a/pkg/sql/gcjob_test/BUILD.bazel +++ b/pkg/sql/gcjob_test/BUILD.bazel @@ -33,8 +33,8 @@ go_test( "//pkg/sql/catalog/tabledesc", "//pkg/sql/gcjob", "//pkg/sql/gcjob/gcjobnotifier", + "//pkg/sql/isql", "//pkg/sql/sem/catid", - "//pkg/sql/sqlutil", "//pkg/storage", "//pkg/testutils", "//pkg/testutils/jobutils", diff --git a/pkg/sql/gcjob_test/gc_job_test.go b/pkg/sql/gcjob_test/gc_job_test.go index 91c1f121f49e..3d50388fa13d 100644 --- a/pkg/sql/gcjob_test/gc_job_test.go +++ b/pkg/sql/gcjob_test/gc_job_test.go @@ -39,8 +39,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/gcjob" "github.com/cockroachdb/cockroach/pkg/sql/gcjob/gcjobnotifier" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/jobutils" @@ -108,13 +108,13 @@ func TestSchemaChangeGCJob(t *testing.T) { var myTableDesc *tabledesc.Mutable var myOtherTableDesc *tabledesc.Mutable - if err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - myImm, err := col.ByID(txn).Get().Table(ctx, myTableID) + if err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + myImm, err := col.ByID(txn.KV()).Get().Table(ctx, myTableID) if err != nil { return err } myTableDesc = tabledesc.NewBuilder(myImm.TableDesc()).BuildExistingMutableTable() - myOtherImm, err := col.ByID(txn).Get().Table(ctx, myOtherTableID) + myOtherImm, err := col.ByID(txn.KV()).Get().Table(ctx, myOtherTableID) if err != nil { return err } @@ -208,7 +208,7 @@ func TestSchemaChangeGCJob(t *testing.T) { Details: details, } - job, err := jobs.TestingCreateAndStartJob(ctx, jobRegistry, kvDB, jobRecord) + job, err := jobs.TestingCreateAndStartJob(ctx, jobRegistry, s.InternalDB().(isql.DB), jobRecord) if err != nil { t.Fatal(err) } @@ -237,8 +237,8 @@ func TestSchemaChangeGCJob(t *testing.T) { } } - if err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - myImm, err := col.ByID(txn).Get().Table(ctx, myTableID) + if err := sql.TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + myImm, err := col.ByID(txn.KV()).Get().Table(ctx, myTableID) if err != nil { return err } @@ -248,7 +248,7 @@ func TestSchemaChangeGCJob(t *testing.T) { return nil } myTableDesc = tabledesc.NewBuilder(myImm.TableDesc()).BuildExistingMutableTable() - myOtherImm, err := col.ByID(txn).Get().Table(ctx, myOtherTableID) + myOtherImm, err := col.ByID(txn.KV()).Get().Table(ctx, myOtherTableID) if err != nil { return err } @@ -324,7 +324,7 @@ func TestGCResumer(t *testing.T) { ctx := context.Background() args := base.TestServerArgs{Knobs: base.TestingKnobs{JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals()}} - srv, sqlDB, kvDB := serverutils.StartServer(t, args) + srv, sqlDB, _ := serverutils.StartServer(t, args) execCfg := srv.ExecutorConfig().(sql.ExecutorConfig) jobRegistry := execCfg.JobRegistry defer srv.Stopper().Stop(ctx) @@ -342,13 +342,17 @@ func TestGCResumer(t *testing.T) { Username: username.TestUserName(), } - sj, err := jobs.TestingCreateAndStartJob(ctx, jobRegistry, kvDB, record) + sj, err := jobs.TestingCreateAndStartJob(ctx, jobRegistry, execCfg.InternalDB, record) require.NoError(t, err) require.NoError(t, sj.AwaitCompletion(ctx)) job, err := jobRegistry.LoadJob(ctx, sj.ID()) require.NoError(t, err) require.Equal(t, jobs.StatusSucceeded, job.Status()) - _, err = sql.GetTenantRecordByID(ctx, &execCfg, nil /* txn */, roachpb.MustMakeTenantID(tenID)) + err = execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + _, err := sql.GetTenantRecordByID(ctx, txn, roachpb.MustMakeTenantID(tenID)) + return err + }) + require.EqualError(t, err, `tenant "10" does not exist`) progress := job.Progress() require.Equal(t, jobspb.SchemaChangeGCProgress_CLEARED, progress.GetSchemaChangeGC().Tenant.Status) @@ -367,7 +371,7 @@ func TestGCResumer(t *testing.T) { Username: username.TestUserName(), } - sj, err := jobs.TestingCreateAndStartJob(ctx, jobRegistry, kvDB, record) + sj, err := jobs.TestingCreateAndStartJob(ctx, jobRegistry, execCfg.InternalDB, record) require.NoError(t, err) _, err = sqlDB.Exec("ALTER RANGE tenants CONFIGURE ZONE USING gc.ttlseconds = 1;") @@ -377,7 +381,10 @@ func TestGCResumer(t *testing.T) { job, err := jobRegistry.LoadJob(ctx, sj.ID()) require.NoError(t, err) require.Equal(t, jobs.StatusSucceeded, job.Status()) - _, err = sql.GetTenantRecordByID(ctx, &execCfg, nil /* txn */, roachpb.MustMakeTenantID(tenID)) + err = execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + _, err := sql.GetTenantRecordByID(ctx, txn, roachpb.MustMakeTenantID(tenID)) + return err + }) require.EqualError(t, err, `tenant "10" does not exist`) progress := job.Progress() require.Equal(t, jobspb.SchemaChangeGCProgress_CLEARED, progress.GetSchemaChangeGC().Tenant.Status) @@ -400,7 +407,7 @@ func TestGCResumer(t *testing.T) { Username: username.TestUserName(), } - sj, err := jobs.TestingCreateAndStartJob(ctx, jobRegistry, kvDB, record) + sj, err := jobs.TestingCreateAndStartJob(ctx, jobRegistry, execCfg.InternalDB, record) require.NoError(t, err) require.Error(t, sj.AwaitCompletion(ctx)) }) @@ -427,14 +434,31 @@ func TestGCTenant(t *testing.T) { dropTenID = 11 nonexistentTenID = 12 ) - _, err := sql.CreateTenantRecord(ctx, &execCfg, nil, &descpb.TenantInfoWithUsage{ - TenantInfo: descpb.TenantInfo{ID: activeTenID}, - }, execCfg.DefaultZoneConfig) - require.NoError(t, err) - _, err = sql.CreateTenantRecord(ctx, &execCfg, nil, &descpb.TenantInfoWithUsage{ - TenantInfo: descpb.TenantInfo{ID: dropTenID, State: descpb.TenantInfo_DROP}, - }, execCfg.DefaultZoneConfig) - require.NoError(t, err) + require.NoError(t, execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + _, err := sql.CreateTenantRecord( + ctx, execCfg.Codec, execCfg.Settings, + txn, + execCfg.SpanConfigKVAccessor.WithTxn(ctx, txn.KV()), + &descpb.TenantInfoWithUsage{ + TenantInfo: descpb.TenantInfo{ID: activeTenID}, + }, + execCfg.DefaultZoneConfig, + ) + return err + })) + + require.NoError(t, execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + _, err := sql.CreateTenantRecord( + ctx, execCfg.Codec, execCfg.Settings, + txn, + execCfg.SpanConfigKVAccessor.WithTxn(ctx, txn.KV()), + &descpb.TenantInfoWithUsage{ + TenantInfo: descpb.TenantInfo{ID: dropTenID, State: descpb.TenantInfo_DROP}, + }, + execCfg.DefaultZoneConfig, + ) + return err + })) t.Run("unexpected progress state", func(t *testing.T) { progress := &jobspb.SchemaChangeGCProgress{ @@ -506,7 +530,10 @@ func TestGCTenant(t *testing.T) { require.NoError(t, gcClosure(dropTenID, progress)) require.Equal(t, jobspb.SchemaChangeGCProgress_CLEARED, progress.Tenant.Status) - _, err = sql.GetTenantRecordByID(ctx, &execCfg, nil /* txn */, roachpb.MustMakeTenantID(dropTenID)) + err = execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + _, err := sql.GetTenantRecordByID(ctx, txn, roachpb.MustMakeTenantID(dropTenID)) + return err + }) require.EqualError(t, err, `tenant "11" does not exist`) require.NoError(t, gcClosure(dropTenID, progress)) @@ -626,9 +653,8 @@ SELECT descriptor_id, index_id close(ch) } // Ensure that the job completes successfully in either case. - require.NoError(t, s.JobRegistry().(*jobs.Registry).WaitForJobs( - ctx, s.InternalExecutor().(sqlutil.InternalExecutor), []jobspb.JobID{jobID}, - )) + jr := s.JobRegistry().(*jobs.Registry) + require.NoError(t, jr.WaitForJobs(ctx, []jobspb.JobID{jobID})) }) } diff --git a/pkg/sql/grant_revoke_system.go b/pkg/sql/grant_revoke_system.go index 2b8e01a28f21..38976eeac2dc 100644 --- a/pkg/sql/grant_revoke_system.go +++ b/pkg/sql/grant_revoke_system.go @@ -78,7 +78,7 @@ func (n *changeNonDescriptorBackedPrivilegesNode) startExec(params runParams) er // public means public has SELECT which // is the default case. if user == username.PublicRoleName() && userPrivs.Privileges == privilege.SELECT.Mask() { - _, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( + _, err := params.p.InternalSQLTxn().ExecEx( params.ctx, `delete-system-privilege`, params.p.txn, @@ -94,7 +94,7 @@ func (n *changeNonDescriptorBackedPrivilegesNode) startExec(params runParams) er } insertStmt := fmt.Sprintf(`UPSERT INTO system.%s VALUES ($1, $2, $3, $4)`, catconstants.SystemPrivilegeTableName) - _, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( + _, err := params.p.InternalSQLTxn().ExecEx( params.ctx, `insert-system-privilege`, params.p.txn, @@ -119,7 +119,7 @@ func (n *changeNonDescriptorBackedPrivilegesNode) startExec(params runParams) er // For Public role and virtual tables, leave an empty // row to indicate that SELECT has been revoked. if !found && (n.grantOn == privilege.VirtualTable && user == username.PublicRoleName()) { - _, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( + _, err := params.p.InternalSQLTxn().ExecEx( params.ctx, `insert-system-privilege`, params.p.txn, @@ -139,7 +139,7 @@ func (n *changeNonDescriptorBackedPrivilegesNode) startExec(params runParams) er // If there are no entries remaining on the PrivilegeDescriptor for the user // we can remove the entire row for the user. if !found { - _, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( + _, err := params.p.InternalSQLTxn().ExecEx( params.ctx, `delete-system-privilege`, params.p.txn, @@ -154,7 +154,7 @@ func (n *changeNonDescriptorBackedPrivilegesNode) startExec(params runParams) er continue } - _, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( + _, err := params.p.InternalSQLTxn().ExecEx( params.ctx, `insert-system-privilege`, params.p.txn, @@ -222,8 +222,9 @@ func (n *changeNonDescriptorBackedPrivilegesNode) makeSystemPrivilegeObject( var ret []syntheticprivilege.Object for _, externalConnectionName := range n.targets.ExternalConnections { // Ensure that an External Connection of this name actually exists. - if _, err := externalconn.LoadExternalConnection(ctx, string(externalConnectionName), - p.ExecCfg().InternalExecutor, p.Txn()); err != nil { + if _, err := externalconn.LoadExternalConnection( + ctx, string(externalConnectionName), p.InternalSQLTxn(), + ); err != nil { return nil, errors.Wrap(err, "failed to resolve External Connection") } @@ -264,7 +265,7 @@ func (p *planner) getPrivilegeDescriptor( TableName: d.GetName(), } return p.ExecCfg().SyntheticPrivilegeCache.Get( - ctx, p.Txn(), p.Descriptors(), vDesc, + ctx, p.InternalSQLTxn(), p.Descriptors(), vDesc, ) } return d.GetPrivileges(), nil @@ -272,7 +273,7 @@ func (p *planner) getPrivilegeDescriptor( return d.GetPrivileges(), nil case syntheticprivilege.Object: return p.ExecCfg().SyntheticPrivilegeCache.Get( - ctx, p.Txn(), p.Descriptors(), d, + ctx, p.InternalSQLTxn(), p.Descriptors(), d, ) } return nil, errors.AssertionFailedf("unknown privilege.Object type %T", po) diff --git a/pkg/sql/grant_role.go b/pkg/sql/grant_role.go index 1b1f273ea9e2..35aa765a69e2 100644 --- a/pkg/sql/grant_role.go +++ b/pkg/sql/grant_role.go @@ -15,7 +15,6 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql/decodeusername" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -25,7 +24,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" @@ -171,75 +169,70 @@ func (p *planner) GrantRoleNode(ctx context.Context, n *tree.GrantRole) (*GrantR func (n *GrantRoleNode) startExec(params runParams) error { var rowsAffected int roleMembersHasIDs := params.p.ExecCfg().Settings.Version.IsActive(params.ctx, clusterversion.V23_1RoleMembersTableHasIDColumns) - if err := params.p.WithInternalExecutor(params.ctx, func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor) error { - // Add memberships. Existing memberships are allowed. - // If admin option is false, we do not remove it from existing memberships. - memberStmt := `INSERT INTO system.role_members ("role", "member", "isAdmin") VALUES ($1, $2, $3) ON CONFLICT ("role", "member")` - if roleMembersHasIDs { - memberStmt = `INSERT INTO system.role_members ("role", "member", "isAdmin", "role_id", "member_id") VALUES ($1, $2, $3, $4, $5) ON CONFLICT ("role", "member")` - } - if n.adminOption { - // admin option: true, set "isAdmin" even if the membership exists. - memberStmt += ` DO UPDATE SET "isAdmin" = true` - } else { - // admin option: false, do not clear it from existing memberships. - memberStmt += ` DO NOTHING` - } - // Get user IDs for both role and member if ID columns have been added. - var qargs []interface{} + // Add memberships. Existing memberships are allowed. + // If admin option is false, we do not remove it from existing memberships. + memberStmt := `INSERT INTO system.role_members ("role", "member", "isAdmin") VALUES ($1, $2, $3) ON CONFLICT ("role", "member")` + if roleMembersHasIDs { + memberStmt = `INSERT INTO system.role_members ("role", "member", "isAdmin", "role_id", "member_id") VALUES ($1, $2, $3, $4, $5) ON CONFLICT ("role", "member")` + } + if n.adminOption { + // admin option: true, set "isAdmin" even if the membership exists. + memberStmt += ` DO UPDATE SET "isAdmin" = true` + } else { + // admin option: false, do not clear it from existing memberships. + memberStmt += ` DO NOTHING` + } + + // Get user IDs for both role and member if ID columns have been added. + var qargs []interface{} + if roleMembersHasIDs { + qargs = make([]interface{}, 5) + } else { + qargs = make([]interface{}, 3) + } + + qargs[2] = n.adminOption + for _, r := range n.roles { + qargs[0] = r.Normalized() + if roleMembersHasIDs { - qargs = make([]interface{}, 5) - } else { - qargs = make([]interface{}, 3) + idRow, err := params.p.InternalSQLTxn().QueryRowEx( + params.ctx, "get-user-id", params.p.Txn(), + sessiondata.NodeUserSessionDataOverride, + `SELECT user_id FROM system.users WHERE username = $1`, r.Normalized(), + ) + if err != nil { + return err + } + qargs[3] = tree.MustBeDOid(idRow[0]) } - qargs[2] = n.adminOption - for _, r := range n.roles { - qargs[0] = r.Normalized() + for _, m := range n.members { + qargs[1] = m.Normalized() if roleMembersHasIDs { - idRow, err := ie.QueryRowEx( - ctx, "get-user-id", txn, + idRow, err := params.p.InternalSQLTxn().QueryRowEx( + params.ctx, "get-user-id", params.p.Txn(), sessiondata.NodeUserSessionDataOverride, - `SELECT user_id FROM system.users WHERE username = $1`, r.Normalized(), + `SELECT user_id FROM system.users WHERE username = $1`, m.Normalized(), ) if err != nil { return err } - qargs[3] = tree.MustBeDOid(idRow[0]) + qargs[4] = tree.MustBeDOid(idRow[0]) } - for _, m := range n.members { - qargs[1] = m.Normalized() - - if roleMembersHasIDs { - idRow, err := ie.QueryRowEx( - ctx, "get-user-id", txn, - sessiondata.NodeUserSessionDataOverride, - `SELECT user_id FROM system.users WHERE username = $1`, m.Normalized(), - ) - if err != nil { - return err - } - qargs[4] = tree.MustBeDOid(idRow[0]) - } - - memberStmtRowsAffected, err := ie.ExecEx( - ctx, "grant-role", txn, - sessiondata.RootUserSessionDataOverride, - memberStmt, qargs..., - ) - if err != nil { - return err - } - rowsAffected += memberStmtRowsAffected + memberStmtRowsAffected, err := params.p.InternalSQLTxn().ExecEx( + params.ctx, "grant-role", params.p.Txn(), + sessiondata.RootUserSessionDataOverride, + memberStmt, qargs..., + ) + if err != nil { + return err } + rowsAffected += memberStmtRowsAffected } - - return nil - }); err != nil { - return err } // We need to bump the table version to trigger a refresh if anything changed. diff --git a/pkg/sql/importer/BUILD.bazel b/pkg/sql/importer/BUILD.bazel index f6e679abd96c..cb9d96e1a0de 100644 --- a/pkg/sql/importer/BUILD.bazel +++ b/pkg/sql/importer/BUILD.bazel @@ -70,6 +70,7 @@ go_library( "//pkg/sql/exprutil", "//pkg/sql/faketreeeval", "//pkg/sql/gcjob", + "//pkg/sql/isql", "//pkg/sql/lexbase", "//pkg/sql/opt/memo", "//pkg/sql/parser", @@ -196,6 +197,7 @@ go_test( "//pkg/sql/execinfra", "//pkg/sql/execinfrapb", "//pkg/sql/gcjob", + "//pkg/sql/isql", "//pkg/sql/parser", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", @@ -206,7 +208,6 @@ go_test( "//pkg/sql/sem/eval", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/sql/stats", "//pkg/sql/tests", "//pkg/sql/types", diff --git a/pkg/sql/importer/exportparquet_test.go b/pkg/sql/importer/exportparquet_test.go index 6840c26fc540..97cca4714eab 100644 --- a/pkg/sql/importer/exportparquet_test.go +++ b/pkg/sql/importer/exportparquet_test.go @@ -25,6 +25,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/importer" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/randgen" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" @@ -77,7 +78,7 @@ type parquetTest struct { // validateParquetFile reads the parquet file and validates various aspects of // the parquet file. func validateParquetFile( - t *testing.T, ctx context.Context, ie *sql.InternalExecutor, test parquetTest, + t *testing.T, ctx context.Context, ie isql.Executor, test parquetTest, ) error { paths, err := filepath.Glob(filepath.Join(test.dir, test.filePrefix, parquetExportFilePattern+test.fileSuffix)) require.NoError(t, err) @@ -211,7 +212,7 @@ func TestRandomParquetExports(t *testing.T) { sqlDB.Exec(t, fmt.Sprintf("CREATE DATABASE %s", dbName)) var tableName string - ie := srv.ExecutorConfig().(sql.ExecutorConfig).InternalExecutor + idb := srv.ExecutorConfig().(sql.ExecutorConfig).InternalDB // Try at most 10 times to populate a random table with at least 10 rows. { var ( @@ -242,7 +243,7 @@ func TestRandomParquetExports(t *testing.T) { // Ensure the table only contains columns supported by EXPORT Parquet. If an // unsupported column cannot be dropped, try populating another table if err := func() error { - _, cols, err := ie.QueryRowExWithCols( + _, cols, err := idb.Executor().QueryRowExWithCols( ctx, "", nil, @@ -281,7 +282,7 @@ func TestRandomParquetExports(t *testing.T) { tableName, tableName), } sqlDB.Exec(t, test.stmt) - err := validateParquetFile(t, ctx, ie, test) + err := validateParquetFile(t, ctx, idb.Executor(), test) require.NoError(t, err, "failed to validate parquet file") } @@ -308,7 +309,7 @@ func TestBasicParquetTypes(t *testing.T) { sqlDB.Exec(t, fmt.Sprintf("CREATE DATABASE %s", dbName)) // instantiating an internal executor to easily get datums from the table - ie := srv.ExecutorConfig().(sql.ExecutorConfig).InternalExecutor + ie := srv.ExecutorConfig().(sql.ExecutorConfig).InternalDB.Executor() sqlDB.Exec(t, `CREATE TABLE foo (i INT PRIMARY KEY, x STRING, y INT, z FLOAT NOT NULL, a BOOL, INDEX (y))`) diff --git a/pkg/sql/importer/import_job.go b/pkg/sql/importer/import_job.go index c2f0e3612ded..e539826cb01a 100644 --- a/pkg/sql/importer/import_job.go +++ b/pkg/sql/importer/import_job.go @@ -40,6 +40,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/gcjob" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -116,7 +117,7 @@ func (r *importResumer) Resume(ctx context.Context, execCtx interface{}) error { if !details.PrepareComplete { var schemaMetadata *preparedSchemaMetadata if err := sql.DescsTxn(ctx, p.ExecCfg(), func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) error { var preparedDetails jobspb.ImportDetails schemaMetadata = &preparedSchemaMetadata{ @@ -134,14 +135,14 @@ func (r *importResumer) Resume(ctx context.Context, execCtx interface{}) error { } // The public schema is expected to always be present in the database for 22.2+. - dbDesc, err := descsCol.ByIDWithLeased(txn).WithoutNonPublic().Get().Database(ctx, details.ParentID) + dbDesc, err := descsCol.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Database(ctx, details.ParentID) if err != nil { return err } schemaMetadata.oldSchemaIDToName[dbDesc.GetSchemaID(tree.PublicSchema)] = tree.PublicSchema schemaMetadata.newSchemaIDToName[dbDesc.GetSchemaID(tree.PublicSchema)] = tree.PublicSchema - preparedDetails, err = r.prepareTablesForIngestion(ctx, p, curDetails, txn, descsCol, + preparedDetails, err = r.prepareTablesForIngestion(ctx, p, curDetails, txn.KV(), descsCol, schemaMetadata) if err != nil { return err @@ -149,7 +150,7 @@ func (r *importResumer) Resume(ctx context.Context, execCtx interface{}) error { // Telemetry for multi-region. for _, table := range preparedDetails.Tables { - dbDesc, err := descsCol.ByIDWithLeased(txn).WithoutNonPublic().Get().Database(ctx, table.Desc.GetParentID()) + dbDesc, err := descsCol.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Database(ctx, table.Desc.GetParentID()) if err != nil { return err } @@ -160,8 +161,8 @@ func (r *importResumer) Resume(ctx context.Context, execCtx interface{}) error { // Update the job details now that the schemas and table descs have // been "prepared". - return r.job.Update(ctx, txn, func( - txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, + return r.job.WithTxn(txn).Update(ctx, func( + txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, ) error { pl := md.Payload *pl.GetImport() = preparedDetails @@ -192,8 +193,7 @@ func (r *importResumer) Resume(ctx context.Context, execCtx interface{}) error { // is in keeping with the semantics we use when creating a schema during // sql execution. Namely, queue job in the txn which creates the schema // desc and run once the txn has committed. - if err := p.ExecCfg().JobRegistry.Run(ctx, p.ExecCfg().InternalExecutor, - schemaMetadata.queuedSchemaJobs); err != nil { + if err := p.ExecCfg().JobRegistry.Run(ctx, schemaMetadata.queuedSchemaJobs); err != nil { return err } @@ -275,7 +275,7 @@ func (r *importResumer) Resume(ctx context.Context, execCtx interface{}) error { } } - if err := r.job.SetDetails(ctx, nil /* txn */, details); err != nil { + if err := r.job.NoTxn().SetDetails(ctx, details); err != nil { return err } } @@ -336,8 +336,10 @@ func (r *importResumer) Resume(ctx context.Context, execCtx interface{}) error { // IMPORT INTO was planned on the older node. // // TODO(adityamaru): Remove in 22.1. - if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return r.releaseProtectedTimestamp(ctx, txn, p.ExecCfg().ProtectedTimestampProvider) + if err := p.ExecCfg().InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return r.releaseProtectedTimestamp( + ctx, p.ExecCfg().ProtectedTimestampProvider.WithTxn(txn), + ) }); err != nil { log.Errorf(ctx, "failed to release protected timestamp: %v", err) } @@ -612,7 +614,7 @@ func (r *importResumer) prepareSchemasForIngestion( ctx context.Context, p sql.JobExecContext, details jobspb.ImportDetails, - txn *kv.Txn, + txn isql.Txn, descsCol *descs.Collection, ) (*preparedSchemaMetadata, error) { schemaMetadata := &preparedSchemaMetadata{ @@ -624,7 +626,7 @@ func (r *importResumer) prepareSchemasForIngestion( schemaMetadata.schemaPreparedDetails.Schemas = make([]jobspb.ImportDetails_Schema, len(details.Schemas)) - desc, err := descsCol.MutableByID(txn).Desc(ctx, details.ParentID) + desc, err := descsCol.MutableByID(txn.KV()).Desc(ctx, details.ParentID) if err != nil { return nil, err } @@ -682,7 +684,7 @@ func (r *importResumer) prepareSchemasForIngestion( return nil, err } } - if err := txn.Run(ctx, b); err != nil { + if err := txn.KV().Run(ctx, b); err != nil { return nil, err } schemaMetadata.schemaPreparedDetails.Schemas[i] = jobspb.ImportDetails_Schema{ @@ -698,9 +700,9 @@ func bindImportStartTime( ctx context.Context, p sql.JobExecContext, id catid.DescID, startWallTime int64, ) error { if err := sql.DescsTxn(ctx, p.ExecCfg(), func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) error { - mutableDesc, err := descsCol.MutableByID(txn).Table(ctx, id) + mutableDesc, err := descsCol.MutableByID(txn.KV()).Table(ctx, id) if err != nil { return err } @@ -708,7 +710,7 @@ func bindImportStartTime( return err } if err := descsCol.WriteDesc( - ctx, false /* kvTrace */, mutableDesc, txn, + ctx, false /* kvTrace */, mutableDesc, txn.KV(), ); err != nil { return err } @@ -741,7 +743,7 @@ func (r *importResumer) parseBundleSchemaIfNeeded(ctx context.Context, phs inter ctx, span = tracing.ChildSpan(ctx, "import-parsing-bundle-schema") defer span.Finish() - if err := r.job.RunningStatus(ctx, nil /* txn */, func(_ context.Context, _ jobspb.Details) (jobs.RunningStatus, error) { + if err := r.job.NoTxn().RunningStatus(ctx, func(_ context.Context, _ jobspb.Details) (jobs.RunningStatus, error) { return runningStatusImportBundleParseSchema, nil }); err != nil { return errors.Wrapf(err, "failed to update running status of job %d", errors.Safe(r.job.ID())) @@ -750,9 +752,9 @@ func (r *importResumer) parseBundleSchemaIfNeeded(ctx context.Context, phs inter var dbDesc catalog.DatabaseDescriptor { if err := sql.DescsTxn(ctx, p.ExecCfg(), func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) (err error) { - dbDesc, err = descriptors.ByID(txn).WithoutNonPublic().Get().Database(ctx, parentID) + dbDesc, err = descriptors.ByID(txn.KV()).WithoutNonPublic().Get().Database(ctx, parentID) if err != nil { return err } @@ -802,7 +804,7 @@ func (r *importResumer) parseBundleSchemaIfNeeded(ctx context.Context, phs inter // Prevent job from redoing schema parsing and table desc creation // on subsequent resumptions. details.ParseBundleSchema = false - if err := r.job.SetDetails(ctx, nil /* txn */, details); err != nil { + if err := r.job.NoTxn().SetDetails(ctx, details); err != nil { return err } } @@ -813,10 +815,10 @@ func getPublicSchemaDescForDatabase( ctx context.Context, execCfg *sql.ExecutorConfig, db catalog.DatabaseDescriptor, ) (scDesc catalog.SchemaDescriptor, err error) { if err := sql.DescsTxn(ctx, execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { publicSchemaID := db.GetSchemaID(tree.PublicSchema) - scDesc, err = descriptors.ByIDWithLeased(txn).WithoutNonPublic().Get().Schema(ctx, publicSchemaID) + scDesc, err = descriptors.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Schema(ctx, publicSchemaID) return err }); err != nil { return nil, err @@ -938,11 +940,11 @@ func (r *importResumer) publishTables( log.Event(ctx, "making tables live") err := sql.DescsTxn(ctx, execCfg, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) error { - b := txn.NewBatch() + b := txn.KV().NewBatch() for _, tbl := range details.Tables { - newTableDesc, err := descsCol.MutableByID(txn).Table(ctx, tbl.Desc.ID) + newTableDesc, err := descsCol.MutableByID(txn.KV()).Table(ctx, tbl.Desc.ID) if err != nil { return err } @@ -981,13 +983,13 @@ func (r *importResumer) publishTables( return errors.Wrapf(err, "publishing table %d", newTableDesc.ID) } } - if err := txn.Run(ctx, b); err != nil { + if err := txn.KV().Run(ctx, b); err != nil { return errors.Wrap(err, "publishing tables") } // Update job record to mark tables published state as complete. details.TablesPublished = true - err := r.job.SetDetails(ctx, txn, details) + err := r.job.WithTxn(txn).SetDetails(ctx, details) if err != nil { return errors.Wrap(err, "updating job details after publishing tables") } @@ -1036,7 +1038,9 @@ func (r *importResumer) writeStubStatisticsForImportedTables( statistic.AvgSize = avgRowSize } // TODO(michae2): parallelize insertion of statistics. - err = stats.InsertNewStats(ctx, execCfg.Settings, execCfg.InternalExecutor, nil /* txn */, statistics) + err = execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return stats.InsertNewStats(ctx, execCfg.Settings, txn, statistics) + }) } if err != nil { // Failure to create statistics should not fail the entire import. @@ -1059,11 +1063,11 @@ func (r *importResumer) publishSchemas(ctx context.Context, execCfg *sql.Executo log.Event(ctx, "making schemas live") return sql.DescsTxn(ctx, execCfg, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) error { - b := txn.NewBatch() + b := txn.KV().NewBatch() for _, schema := range details.Schemas { - newDesc, err := descsCol.MutableByID(txn).Desc(ctx, schema.Desc.GetID()) + newDesc, err := descsCol.MutableByID(txn.KV()).Desc(ctx, schema.Desc.GetID()) if err != nil { return err } @@ -1079,13 +1083,13 @@ func (r *importResumer) publishSchemas(ctx context.Context, execCfg *sql.Executo return errors.Wrapf(err, "publishing schema %d", newSchemaDesc.ID) } } - if err := txn.Run(ctx, b); err != nil { + if err := txn.KV().Run(ctx, b); err != nil { return errors.Wrap(err, "publishing schemas") } // Update job record to mark tables published state as complete. details.SchemasPublished = true - err := r.job.SetDetails(ctx, txn, details) + err := r.job.WithTxn(txn).SetDetails(ctx, details) if err != nil { return errors.Wrap(err, "updating job details after publishing schemas") } @@ -1103,18 +1107,16 @@ func (r *importResumer) checkVirtualConstraints( desc.SetPublic() if sql.HasVirtualUniqueConstraints(desc) { - if err := job.RunningStatus(ctx, nil /* txn */, func(_ context.Context, _ jobspb.Details) (jobs.RunningStatus, error) { + if err := job.NoTxn().RunningStatus(ctx, func(_ context.Context, _ jobspb.Details) (jobs.RunningStatus, error) { return jobs.RunningStatus(fmt.Sprintf("re-validating %s", desc.GetName())), nil }); err != nil { return errors.Wrapf(err, "failed to update running status of job %d", errors.Safe(job.ID())) } } - if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - ie := execCfg.InternalExecutorFactory.NewInternalExecutor(sql.NewFakeSessionData(execCfg.SV())) - return ie.WithSyntheticDescriptors([]catalog.Descriptor{desc}, func() error { - return sql.RevalidateUniqueConstraintsInTable(ctx, txn, user, ie, desc) - }) + if err := execCfg.InternalDB.DescsTxn(ctx, func(ctx context.Context, txn descs.Txn) error { + txn.Descriptors().AddSyntheticDescriptor(desc) + return sql.RevalidateUniqueConstraintsInTable(ctx, txn, user, desc) }); err != nil { return err } @@ -1202,11 +1204,11 @@ func (r *importResumer) checkForUDTModification( ) } checkTypesAreEquivalent := func( - ctx context.Context, txn *kv.Txn, col *descs.Collection, + ctx context.Context, txn isql.Txn, col *descs.Collection, ) error { for _, savedTypeDesc := range details.Types { if err := checkTypeIsEquivalent( - ctx, txn, col, savedTypeDesc.Desc, + ctx, txn.KV(), col, savedTypeDesc.Desc, ); err != nil { return err } @@ -1297,7 +1299,7 @@ func emitImportJobEvent( ctx context.Context, p sql.JobExecContext, status jobs.Status, job *jobs.Job, ) { var importEvent eventpb.Import - if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := p.ExecCfg().InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { return sql.LogEventForJobs(ctx, p.ExecCfg(), txn, &importEvent, int64(job.ID()), job.Payload(), p.User(), status) }); err != nil { @@ -1323,7 +1325,7 @@ func constructSchemaAndTableKey( func writeNonDropDatabaseChange( ctx context.Context, desc *dbdesc.Mutable, - txn *kv.Txn, + txn isql.Txn, descsCol *descs.Collection, p sql.JobExecContext, jobDesc string, @@ -1335,7 +1337,7 @@ func writeNonDropDatabaseChange( } queuedJob := []jobspb.JobID{job.ID()} - b := txn.NewBatch() + b := txn.KV().NewBatch() err = descsCol.WriteDescToBatch( ctx, p.ExtendedEvalContext().Tracing.KVTracingEnabled(), @@ -1345,7 +1347,7 @@ func writeNonDropDatabaseChange( if err != nil { return nil, err } - return queuedJob, txn.Run(ctx, b) + return queuedJob, txn.KV().Run(ctx, b) } func createNonDropDatabaseChangeJob( @@ -1354,7 +1356,7 @@ func createNonDropDatabaseChangeJob( databaseID descpb.ID, jobDesc string, p sql.JobExecContext, - txn *kv.Txn, + txn isql.Txn, ) (*jobs.Job, error) { jobRecord := jobs.Record{ Description: jobDesc, @@ -1391,7 +1393,7 @@ func (r *importResumer) OnFailOrCancel(ctx context.Context, execCtx interface{}, cfg := execCtx.(sql.JobExecContext).ExecCfg() var jobsToRunAfterTxnCommit []jobspb.JobID if err := sql.DescsTxn(ctx, cfg, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) error { if err := r.dropTables(ctx, txn, descsCol, cfg); err != nil { log.Errorf(ctx, "drop tables failed: %s", err.Error()) @@ -1410,7 +1412,7 @@ func (r *importResumer) OnFailOrCancel(ctx context.Context, execCtx interface{}, } // TODO(adityamaru): Remove in 22.1 since we do not write PTS records during // IMPORT INTO from 21.2+. - return r.releaseProtectedTimestamp(ctx, txn, cfg.ProtectedTimestampProvider) + return r.releaseProtectedTimestamp(ctx, cfg.ProtectedTimestampProvider.WithTxn(txn)) }); err != nil { return err } @@ -1419,8 +1421,7 @@ func (r *importResumer) OnFailOrCancel(ctx context.Context, execCtx interface{}, // This would be a job to drop all the schemas, and a job to update the parent // database descriptor. if len(jobsToRunAfterTxnCommit) != 0 { - if err := p.ExecCfg().JobRegistry.Run(ctx, p.ExecCfg().InternalExecutor, - jobsToRunAfterTxnCommit); err != nil { + if err := p.ExecCfg().JobRegistry.Run(ctx, jobsToRunAfterTxnCommit); err != nil { return errors.Wrap(err, "failed to run jobs that drop the imported schemas") } } @@ -1433,7 +1434,7 @@ func (r *importResumer) OnFailOrCancel(ctx context.Context, execCtx interface{}, // dropTables implements the OnFailOrCancel logic. func (r *importResumer) dropTables( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, execCfg *sql.ExecutorConfig, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, execCfg *sql.ExecutorConfig, ) error { details := r.job.Details().(jobspb.ImportDetails) @@ -1450,7 +1451,7 @@ func (r *importResumer) dropTables( var intoTable catalog.TableDescriptor for _, tbl := range details.Tables { if !tbl.IsNew { - desc, err := descsCol.MutableByID(txn).Table(ctx, tbl.Desc.ID) + desc, err := descsCol.MutableByID(txn.KV()).Table(ctx, tbl.Desc.ID) if err != nil { return err } @@ -1500,7 +1501,7 @@ func (r *importResumer) dropTables( // writes, so even if GC has run it would not have GC'ed any keys to which // we need to revert, so we can safely ignore the target-time GC check. const ignoreGC = true - if err := sql.RevertTables(ctx, txn.DB(), execCfg, []catalog.TableDescriptor{intoTable}, ts, ignoreGC, + if err := sql.RevertTables(ctx, txn.KV().DB(), execCfg, []catalog.TableDescriptor{intoTable}, ts, ignoreGC, sql.RevertTableDefaultBatchSize); err != nil { return errors.Wrap(err, "rolling back partially completed IMPORT via RevertRange") } @@ -1526,8 +1527,8 @@ func (r *importResumer) dropTables( } // Bring the IMPORT INTO table back online - b := txn.NewBatch() - intoDesc, err := descsCol.MutableByID(txn).Table(ctx, intoTable.GetID()) + b := txn.KV().NewBatch() + intoDesc, err := descsCol.MutableByID(txn.KV()).Table(ctx, intoTable.GetID()) if err != nil { return err } @@ -1537,22 +1538,22 @@ func (r *importResumer) dropTables( if err := descsCol.WriteDescToBatch(ctx, kvTrace, intoDesc, b); err != nil { return err } - return errors.Wrap(txn.Run(ctx, b), "putting IMPORT INTO table back online") + return errors.Wrap(txn.KV().Run(ctx, b), "putting IMPORT INTO table back online") } // dropNewTables drops the tables that were created as part of an IMPORT and // queues a GC job to clean up the dropped descriptors. func (r *importResumer) dropNewTables( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, execCfg *sql.ExecutorConfig, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, execCfg *sql.ExecutorConfig, ) error { details := r.job.Details().(jobspb.ImportDetails) dropTime := int64(1) - b := txn.NewBatch() + b := txn.KV().NewBatch() tablesToGC := make([]descpb.ID, 0, len(details.Tables)) toWrite := make([]*tabledesc.Mutable, 0, len(details.Tables)) for _, tbl := range details.Tables { - newTableDesc, err := descsCol.MutableByID(txn).Table(ctx, tbl.Desc.ID) + newTableDesc, err := descsCol.MutableByID(txn.KV()).Table(ctx, tbl.Desc.ID) if err != nil { return err } @@ -1601,12 +1602,12 @@ func (r *importResumer) dropNewTables( ctx, gcJobRecord, execCfg.JobRegistry.MakeJobID(), txn); err != nil { return err } - return errors.Wrap(txn.Run(ctx, b), "rolling back IMPORT tables") + return errors.Wrap(txn.KV().Run(ctx, b), "rolling back IMPORT tables") } func (r *importResumer) dropSchemas( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, descsCol *descs.Collection, execCfg *sql.ExecutorConfig, p sql.JobExecContext, @@ -1621,7 +1622,7 @@ func (r *importResumer) dropSchemas( } // Resolve the database descriptor. - desc, err := descsCol.MutableByID(txn).Desc(ctx, details.ParentID) + desc, err := descsCol.MutableByID(txn.KV()).Desc(ctx, details.ParentID) if err != nil { return nil, err } @@ -1634,7 +1635,7 @@ func (r *importResumer) dropSchemas( droppedSchemaIDs := make([]descpb.ID, 0) for _, schema := range details.Schemas { - desc, err := descsCol.MutableByID(txn).Desc(ctx, schema.Desc.ID) + desc, err := descsCol.MutableByID(txn.KV()).Desc(ctx, schema.Desc.ID) if err != nil { return nil, err } @@ -1649,7 +1650,7 @@ func (r *importResumer) dropSchemas( schemaDesc.SetDropped() droppedSchemaIDs = append(droppedSchemaIDs, schemaDesc.GetID()) - b := txn.NewBatch() + b := txn.KV().NewBatch() if dbDesc.Schemas != nil { delete(dbDesc.Schemas, schemaDesc.GetName()) } @@ -1663,7 +1664,7 @@ func (r *importResumer) dropSchemas( ); err != nil { return nil, err } - err = txn.Run(ctx, b) + err = txn.KV().Run(ctx, b) if err != nil { return nil, err } @@ -1700,7 +1701,7 @@ func (r *importResumer) dropSchemas( } func (r *importResumer) releaseProtectedTimestamp( - ctx context.Context, txn *kv.Txn, pts protectedts.Storage, + ctx context.Context, pts protectedts.Storage, ) error { details := r.job.Details().(jobspb.ImportDetails) ptsID := details.ProtectedTimestampRecord @@ -1708,7 +1709,7 @@ func (r *importResumer) releaseProtectedTimestamp( if ptsID == nil { return nil } - err := pts.Release(ctx, txn, *ptsID) + err := pts.Release(ctx, *ptsID) if errors.Is(err, protectedts.ErrNotExists) { // No reason to return an error which might cause problems if it doesn't // seem to exist. diff --git a/pkg/sql/importer/import_planning.go b/pkg/sql/importer/import_planning.go index b737acf4bf02..3bacf71cc405 100644 --- a/pkg/sql/importer/import_planning.go +++ b/pkg/sql/importer/import_planning.go @@ -27,7 +27,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server/telemetry" @@ -43,6 +42,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/exprutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice" @@ -295,15 +295,15 @@ func resolveUDTsUsedByImportInto( typeDescs := make([]catalog.TypeDescriptor, 0) var dbDesc catalog.DatabaseDescriptor err := sql.DescsTxn(ctx, p.ExecCfg(), func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) (err error) { - dbDesc, err = descriptors.ByID(txn).WithoutNonPublic().Get().Database(ctx, table.GetParentID()) + dbDesc, err = descriptors.ByID(txn.KV()).WithoutNonPublic().Get().Database(ctx, table.GetParentID()) if err != nil { return err } typeIDs, _, err := table.GetAllReferencedTypeIDs(dbDesc, func(id descpb.ID) (catalog.TypeDescriptor, error) { - immutDesc, err := descriptors.ByID(txn).WithoutNonPublic().Get().Type(ctx, id) + immutDesc, err := descriptors.ByID(txn.KV()).WithoutNonPublic().Get().Type(ctx, id) if err != nil { return nil, err } @@ -314,7 +314,7 @@ func resolveUDTsUsedByImportInto( } for _, typeID := range typeIDs { - immutDesc, err := descriptors.ByID(txn).WithoutNonPublic().Get().Type(ctx, typeID) + immutDesc, err := descriptors.ByID(txn.KV()).WithoutNonPublic().Get().Type(ctx, typeID) if err != nil { return err } @@ -889,9 +889,10 @@ func importPlanHook( // computed columns such as `gateway_region`. var databasePrimaryRegion catpb.RegionName if db.IsMultiRegion() { - if err := sql.DescsTxn(ctx, p.ExecCfg(), func(ctx context.Context, txn *kv.Txn, - descsCol *descs.Collection) error { - regionConfig, err := sql.SynthesizeRegionConfig(ctx, txn, db.GetID(), descsCol) + if err := sql.DescsTxn(ctx, p.ExecCfg(), func( + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, + ) error { + regionConfig, err := sql.SynthesizeRegionConfig(ctx, txn.KV(), db.GetID(), descsCol) if err != nil { return err } @@ -955,7 +956,7 @@ func importPlanHook( // record. We do not wait for the job to finish. jobID := p.ExecCfg().JobRegistry.MakeJobID() _, err := p.ExecCfg().JobRegistry.CreateAdoptableJobWithTxn( - ctx, jr, jobID, p.Txn()) + ctx, jr, jobID, p.InternalSQLTxn()) if err != nil { return err } @@ -967,7 +968,7 @@ func importPlanHook( // We create the job record in the planner's transaction to ensure that // the job record creation happens transactionally. - plannerTxn := p.Txn() + plannerTxn := p.InternalSQLTxn() // Construct the job and commit the transaction. Perform this work in a // closure to ensure that the job is cleaned up if an error occurs. @@ -990,11 +991,23 @@ func importPlanHook( // is safe because we're in an implicit transaction. If we were in an // explicit transaction the job would have to be run with the detached // option and would have been handled above. - return plannerTxn.Commit(ctx) + return plannerTxn.KV().Commit(ctx) }(); err != nil { return err } + // Release all descriptor leases here. We need to do this because we're + // about to kick off a job which is going to potentially write descriptors. + // Note that we committed the underlying transaction in the above closure + // -- so we're not using any leases anymore, but we might be holding some + // because some sql queries might have been executed by this transaction + // (indeed some certainly were when we created the job we're going to run). + // + // This is all a bit of a hack to deal with the fact that we want to + // return results as part of this statement and the usual machinery for + // releasing leases assumes that that does not happen during statement + // execution. + p.InternalSQLTxn().Descriptors().ReleaseAll(ctx) if err := sj.Start(ctx); err != nil { return err } diff --git a/pkg/sql/importer/import_processor.go b/pkg/sql/importer/import_processor.go index c3d4d1d3175c..27cdef863ac0 100644 --- a/pkg/sql/importer/import_processor.go +++ b/pkg/sql/importer/import_processor.go @@ -389,7 +389,7 @@ func ingestKvs( // will hog memory as it tries to grow more aggressively. minBufferSize, maxBufferSize := importBufferConfigSizes(flowCtx.Cfg.Settings, true /* isPKAdder */) - pkIndexAdder, err := flowCtx.Cfg.BulkAdder(ctx, flowCtx.Cfg.DB, writeTS, kvserverbase.BulkAdderOptions{ + pkIndexAdder, err := flowCtx.Cfg.BulkAdder(ctx, flowCtx.Cfg.DB.KV(), writeTS, kvserverbase.BulkAdderOptions{ Name: pkAdderName, DisallowShadowingBelow: writeTS, SkipDuplicates: true, @@ -405,7 +405,7 @@ func ingestKvs( minBufferSize, maxBufferSize = importBufferConfigSizes(flowCtx.Cfg.Settings, false /* isPKAdder */) - indexAdder, err := flowCtx.Cfg.BulkAdder(ctx, flowCtx.Cfg.DB, writeTS, kvserverbase.BulkAdderOptions{ + indexAdder, err := flowCtx.Cfg.BulkAdder(ctx, flowCtx.Cfg.DB.KV(), writeTS, kvserverbase.BulkAdderOptions{ Name: indexAdderName, DisallowShadowingBelow: writeTS, SkipDuplicates: true, diff --git a/pkg/sql/importer/import_processor_planning.go b/pkg/sql/importer/import_processor_planning.go index d4e977b0fa6b..b887d5b43817 100644 --- a/pkg/sql/importer/import_processor_planning.go +++ b/pkg/sql/importer/import_processor_planning.go @@ -135,20 +135,21 @@ func distImport( importDetails := job.Progress().Details.(*jobspb.Progress_Import).Import if importDetails.ReadProgress == nil { // Initialize the progress metrics on the first attempt. - if err := job.FractionProgressed(ctx, nil, /* txn */ - func(ctx context.Context, details jobspb.ProgressDetails) float32 { - prog := details.(*jobspb.Progress_Import).Import - prog.ReadProgress = make([]float32, len(from)) - prog.ResumePos = make([]int64, len(from)) - if prog.SequenceDetails == nil { - prog.SequenceDetails = make([]*jobspb.SequenceDetails, len(from)) - for i := range prog.SequenceDetails { - prog.SequenceDetails[i] = &jobspb.SequenceDetails{} - } + if err := job.NoTxn().FractionProgressed(ctx, func( + ctx context.Context, details jobspb.ProgressDetails, + ) float32 { + prog := details.(*jobspb.Progress_Import).Import + prog.ReadProgress = make([]float32, len(from)) + prog.ResumePos = make([]int64, len(from)) + if prog.SequenceDetails == nil { + prog.SequenceDetails = make([]*jobspb.SequenceDetails, len(from)) + for i := range prog.SequenceDetails { + prog.SequenceDetails[i] = &jobspb.SequenceDetails{} } + } - return 0.0 - }, + return 0.0 + }, ); err != nil { return roachpb.BulkOpSummary{}, err } @@ -158,25 +159,26 @@ func distImport( fractionProgress := make([]uint32, len(from)) updateJobProgress := func() error { - return job.FractionProgressed(ctx, nil, /* txn */ - func(ctx context.Context, details jobspb.ProgressDetails) float32 { - var overall float32 - prog := details.(*jobspb.Progress_Import).Import - for i := range rowProgress { - prog.ResumePos[i] = atomic.LoadInt64(&rowProgress[i]) - } - for i := range fractionProgress { - fileProgress := math.Float32frombits(atomic.LoadUint32(&fractionProgress[i])) - prog.ReadProgress[i] = fileProgress - overall += fileProgress - } + return job.NoTxn().FractionProgressed(ctx, func( + ctx context.Context, details jobspb.ProgressDetails, + ) float32 { + var overall float32 + prog := details.(*jobspb.Progress_Import).Import + for i := range rowProgress { + prog.ResumePos[i] = atomic.LoadInt64(&rowProgress[i]) + } + for i := range fractionProgress { + fileProgress := math.Float32frombits(atomic.LoadUint32(&fractionProgress[i])) + prog.ReadProgress[i] = fileProgress + overall += fileProgress + } - accumulatedBulkSummary.Lock() - prog.Summary.Add(accumulatedBulkSummary.BulkOpSummary) - accumulatedBulkSummary.Reset() - accumulatedBulkSummary.Unlock() - return overall / float32(len(from)) - }, + accumulatedBulkSummary.Lock() + prog.Summary.Add(accumulatedBulkSummary.BulkOpSummary) + accumulatedBulkSummary.Reset() + accumulatedBulkSummary.Unlock() + return overall / float32(len(from)) + }, ) } diff --git a/pkg/sql/importer/import_processor_test.go b/pkg/sql/importer/import_processor_test.go index bfebc551c3bc..372030ec9b5d 100644 --- a/pkg/sql/importer/import_processor_test.go +++ b/pkg/sql/importer/import_processor_test.go @@ -31,10 +31,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/distsql" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/rowexec" @@ -240,6 +242,7 @@ func TestImportIgnoresProcessedFiles(t *testing.T) { Cfg: &execinfra.ServerConfig{ Settings: &cluster.Settings{}, ExternalStorage: externalStorageFactory, + DB: fakeDB{}, BulkAdder: func( _ context.Context, _ *kv.DB, _ hlc.Timestamp, _ kvserverbase.BulkAdderOptions) (kvserverbase.BulkAdder, error) { @@ -324,6 +327,28 @@ type observedKeys struct { keys []roachpb.Key } +// fakeDB implements descs.DB but will panic on all method calls and will +// return a nil kv.DB. +type fakeDB struct{} + +func (fakeDB) KV() *kv.DB { return nil } + +func (fakeDB) Txn( + ctx context.Context, f2 func(context.Context, isql.Txn) error, option ...isql.TxnOption, +) error { + panic("unimplemented") +} + +func (fakeDB) Executor(option ...isql.ExecutorOption) isql.Executor { + panic("unimplemented") +} + +func (fakeDB) DescsTxn( + ctx context.Context, f func(context.Context, descs.Txn) error, opts ...isql.TxnOption, +) error { + panic("unimplemented") +} + func TestImportHonorsResumePosition(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) @@ -341,6 +366,7 @@ func TestImportHonorsResumePosition(t *testing.T) { Cfg: &execinfra.ServerConfig{ Settings: &cluster.Settings{}, ExternalStorage: externalStorageFactory, + DB: fakeDB{}, BulkAdder: func( _ context.Context, _ *kv.DB, _ hlc.Timestamp, opts kvserverbase.BulkAdderOptions) (kvserverbase.BulkAdder, error) { @@ -469,6 +495,7 @@ func TestImportHandlesDuplicateKVs(t *testing.T) { Cfg: &execinfra.ServerConfig{ Settings: &cluster.Settings{}, ExternalStorage: externalStorageFactory, + DB: fakeDB{}, BulkAdder: func( _ context.Context, _ *kv.DB, _ hlc.Timestamp, opts kvserverbase.BulkAdderOptions) (kvserverbase.BulkAdder, error) { @@ -882,9 +909,7 @@ func externalStorageFactory( } return cloud.MakeExternalStorage(ctx, dest, base.ExternalIODirConfig{}, nil, blobs.TestBlobServiceClient(workdir), - nil, /* ie */ - nil, /* ief */ - nil, /* kvDB */ + nil, /* db */ nil, /* limiters */ cloud.NilMetrics, ) diff --git a/pkg/sql/importer/import_stmt_test.go b/pkg/sql/importer/import_stmt_test.go index 225695cb92bc..b655357ac700 100644 --- a/pkg/sql/importer/import_stmt_test.go +++ b/pkg/sql/importer/import_stmt_test.go @@ -39,7 +39,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/jobs/jobstest" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" @@ -52,13 +51,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/distsql" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/gcjob" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/stats" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/storage" @@ -2060,8 +2059,8 @@ func TestFailedImportGC(t *testing.T) { dbID := sqlutils.QueryDatabaseID(t, sqlDB.DB, "failedimport") tableID := descpb.ID(dbID + 2) var td catalog.TableDescriptor - if err := sql.TestingDescsTxn(ctx, tc.Server(0), func(ctx context.Context, txn *kv.Txn, col *descs.Collection) (err error) { - td, err = col.ByID(txn).Get().Table(ctx, tableID) + if err := sql.TestingDescsTxn(ctx, tc.Server(0), func(ctx context.Context, txn isql.Txn, col *descs.Collection) (err error) { + td, err = col.ByID(txn.KV()).Get().Table(ctx, tableID) return err }); err != nil { t.Fatal(err) @@ -2766,11 +2765,12 @@ func TestImportObjectLevelRBAC(t *testing.T) { writeToUserfile := func(filename, data string) { // Write to userfile storage now that testuser has CREATE privileges. - ie := tc.Server(0).InternalExecutor().(*sql.InternalExecutor) - ief := tc.Server(0).InternalExecutorFactory().(sqlutil.InternalExecutorFactory) - fileTableSystem1, err := cloud.ExternalStorageFromURI(ctx, dest, base.ExternalIODirConfig{}, - cluster.NoSettings, blobs.TestEmptyBlobClientFactory, username.TestUserName(), ie, ief, - tc.Server(0).DB(), nil, cloud.NilMetrics) + ief := tc.Server(0).InternalDB().(isql.DB) + fileTableSystem1, err := cloud.ExternalStorageFromURI( + ctx, dest, base.ExternalIODirConfig{}, + cluster.NoSettings, blobs.TestEmptyBlobClientFactory, + username.TestUserName(), ief, nil, cloud.NilMetrics, + ) require.NoError(t, err) require.NoError(t, cloud.WriteFile(ctx, fileTableSystem1, filename, bytes.NewReader([]byte(data)))) } @@ -5199,7 +5199,9 @@ func TestImportControlJobRBAC(t *testing.T) { }, jobs.UsesTenantCostControl) startLeasedJob := func(t *testing.T, record jobs.Record) *jobs.StartableJob { - job, err := jobs.TestingCreateAndStartJob(ctx, registry, tc.Server(0).DB(), record) + job, err := jobs.TestingCreateAndStartJob( + ctx, registry, tc.Server(0).InternalDB().(isql.DB), record, + ) require.NoError(t, err) return job } @@ -5994,9 +5996,7 @@ func TestImportPgDumpIgnoredStmts(t *testing.T) { tc.Server(0).ClusterSettings(), blobs.TestEmptyBlobClientFactory, username.RootUserName(), - tc.Server(0).InternalExecutor().(*sql.InternalExecutor), - tc.Server(0).InternalExecutorFactory().(sqlutil.InternalExecutorFactory), - tc.Server(0).DB(), + tc.Server(0).InternalDB().(isql.DB), nil, cloud.NilMetrics, ) @@ -6411,8 +6411,8 @@ func TestImportPgDumpSchemas(t *testing.T) { for _, schemaID := range schemaIDs { // Expect that the schema descriptor is deleted. - if err := sql.TestingDescsTxn(ctx, tc.Server(0), func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - _, err := col.ByID(txn).Get().Schema(ctx, schemaID) + if err := sql.TestingDescsTxn(ctx, tc.Server(0), func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + _, err := col.ByID(txn.KV()).Get().Schema(ctx, schemaID) if pgerror.GetPGCode(err) == pgcode.InvalidSchemaName { return nil } @@ -6427,8 +6427,8 @@ func TestImportPgDumpSchemas(t *testing.T) { for _, tableID := range tableIDs { // Expect that the table descriptor is deleted. - if err := sql.TestingDescsTxn(ctx, tc.Server(0), func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - _, err := col.ByID(txn).Get().Table(ctx, tableID) + if err := sql.TestingDescsTxn(ctx, tc.Server(0), func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + _, err := col.ByID(txn.KV()).Get().Table(ctx, tableID) if !testutils.IsError(err, "descriptor not found") { return err } diff --git a/pkg/sql/importer/read_import_avro_logical_test.go b/pkg/sql/importer/read_import_avro_logical_test.go index 4a391f20b933..b77cb56f2162 100644 --- a/pkg/sql/importer/read_import_avro_logical_test.go +++ b/pkg/sql/importer/read_import_avro_logical_test.go @@ -287,7 +287,7 @@ func TestImportAvroLogicalTypes(t *testing.T) { } require.Equal(t, true, success, "failed to generate random data after 5 attempts") - ie := srv.ExecutorConfig().(sql.ExecutorConfig).InternalExecutor + ie := srv.ExecutorConfig().(sql.ExecutorConfig).InternalDB.Executor() datums, _, err := ie.QueryBufferedExWithCols( ctx, "", diff --git a/pkg/sql/importer/read_import_base.go b/pkg/sql/importer/read_import_base.go index 41b1fb6fc97e..3d532188d594 100644 --- a/pkg/sql/importer/read_import_base.go +++ b/pkg/sql/importer/read_import_base.go @@ -72,7 +72,7 @@ func runImport( evalCtx.Regions = makeImportRegionOperator(spec.DatabasePrimaryRegion) semaCtx := tree.MakeSemaContext() semaCtx.TypeResolver = importResolver - conv, err := makeInputConverter(ctx, &semaCtx, spec, evalCtx, kvCh, seqChunkProvider, flowCtx.Cfg.DB) + conv, err := makeInputConverter(ctx, &semaCtx, spec, evalCtx, kvCh, seqChunkProvider, flowCtx.Cfg.DB.KV()) if err != nil { return nil, err } diff --git a/pkg/sql/importer/read_import_pgdump.go b/pkg/sql/importer/read_import_pgdump.go index a18bdb027cca..996a00d2e808 100644 --- a/pkg/sql/importer/read_import_pgdump.go +++ b/pkg/sql/importer/read_import_pgdump.go @@ -33,6 +33,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -247,11 +248,11 @@ func createPostgresSchemas( sessionData *sessiondata.SessionData, ) ([]*schemadesc.Mutable, error) { createSchema := func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn descs.Txn, dbDesc catalog.DatabaseDescriptor, schema *tree.CreateSchema, ) (*schemadesc.Mutable, error) { desc, _, err := sql.CreateUserDefinedSchemaDescriptor( - ctx, sessionData, schema, txn, descriptors, execCfg.InternalExecutor, + ctx, sessionData, schema, txn, execCfg.DescIDGenerator, dbDesc, false, /* allocateID */ ) if err != nil { @@ -275,15 +276,15 @@ func createPostgresSchemas( } var schemaDescs []*schemadesc.Mutable createSchemaDescs := func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn descs.Txn, ) error { schemaDescs = nil // reset for retries - dbDesc, err := descriptors.ByID(txn).WithoutNonPublic().Get().Database(ctx, parentID) + dbDesc, err := txn.Descriptors().ByID(txn.KV()).WithoutNonPublic().Get().Database(ctx, parentID) if err != nil { return err } for _, schema := range schemasToCreate { - scDesc, err := createSchema(ctx, txn, descriptors, dbDesc, schema) + scDesc, err := createSchema(ctx, txn, dbDesc, schema) if err != nil { return err } @@ -293,7 +294,7 @@ func createPostgresSchemas( } return nil } - if err := sql.DescsTxn(ctx, execCfg, createSchemaDescs); err != nil { + if err := execCfg.InternalDB.DescsTxn(ctx, createSchemaDescs); err != nil { return nil, err } return schemaDescs, nil @@ -873,15 +874,15 @@ func readPostgresStmt( // Otherwise, we silently ignore the drop statement and continue with the import. for _, name := range names { tableName := name.ToUnresolvedObjectName().String() - if err := sql.DescsTxn(ctx, p.ExecCfg(), func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - dbDesc, err := col.ByID(txn).Get().Database(ctx, parentID) + if err := sql.DescsTxn(ctx, p.ExecCfg(), func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + dbDesc, err := col.ByID(txn.KV()).Get().Database(ctx, parentID) if err != nil { return err } err = descs.CheckObjectNameCollision( ctx, col, - txn, + txn.KV(), parentID, dbDesc.GetSchemaID(tree.PublicSchema), tree.NewUnqualifiedTableName(tree.Name(tableName)), diff --git a/pkg/sql/index_backfiller.go b/pkg/sql/index_backfiller.go index b8df168f4ba2..6aa456ddad9a 100644 --- a/pkg/sql/index_backfiller.go +++ b/pkg/sql/index_backfiller.go @@ -20,6 +20,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -164,12 +165,12 @@ func (ib *IndexBackfillPlanner) plan( var planCtx *PlanningCtx td := tabledesc.NewBuilder(tableDesc.TableDesc()).BuildExistingMutableTable() if err := DescsTxn(ctx, ib.execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { sd := NewFakeSessionData(ib.execCfg.SV()) evalCtx = createSchemaChangeEvalCtx(ctx, ib.execCfg, sd, nowTimestamp, descriptors) planCtx = ib.execCfg.DistSQLPlanner.NewPlanningCtx(ctx, &evalCtx, - nil /* planner */, txn, DistributionTypeSystemTenantOnly) + nil /* planner */, txn.KV(), DistributionTypeSystemTenantOnly) // TODO(ajwerner): Adopt util.ConstantWithMetamorphicTestRange for the // batch size. Also plumb in a testing knob. chunkSize := indexBackfillBatchSize.Get(&ib.execCfg.Settings.SV) diff --git a/pkg/sql/indexbackfiller_test.go b/pkg/sql/indexbackfiller_test.go index 8933a05d8e76..82c272e19939 100644 --- a/pkg/sql/indexbackfiller_test.go +++ b/pkg/sql/indexbackfiller_test.go @@ -34,6 +34,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/fetchpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/rowinfra" @@ -485,9 +486,9 @@ INSERT INTO foo VALUES (1), (10), (100); var j *jobs.Job var table catalog.TableDescriptor require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) (err error) { - mut, err := descriptors.MutableByID(txn).Table(ctx, tableID) + mut, err := descriptors.MutableByID(txn.KV()).Table(ctx, tableID) if err != nil { return err } @@ -523,12 +524,12 @@ INSERT INTO foo VALUES (1), (10), (100); jobToBlock.Store(jobID) mut.MaybeIncrementVersion() table = mut.ImmutableCopy().(catalog.TableDescriptor) - return descriptors.WriteDesc(ctx, false /* kvTrace */, mut, txn) + return descriptors.WriteDesc(ctx, false /* kvTrace */, mut, txn.KV()) })) // Run the index backfill changer := sql.NewSchemaChangerForTesting( - tableID, 1, execCfg.NodeInfo.NodeID.SQLInstanceID(), s0.DB(), lm, jr, &execCfg, settings) + tableID, 1, execCfg.NodeInfo.NodeID.SQLInstanceID(), execCfg.InternalDB, lm, jr, &execCfg, settings) changer.SetJob(j) spans := []roachpb.Span{table.IndexSpan(keys.SystemSQLCodec, test.indexToBackfill)} require.NoError(t, changer.TestingDistIndexBackfill(ctx, table.GetVersion(), spans, @@ -537,9 +538,9 @@ INSERT INTO foo VALUES (1), (10), (100); // Make the mutation complete, then read the index and validate that it // has the expected contents. require.NoError(t, sql.DescsTxn(ctx, &execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { - table, err := descriptors.MutableByID(txn).Table(ctx, tableID) + table, err := descriptors.MutableByID(txn.KV()).Table(ctx, tableID) if err != nil { return err } @@ -549,7 +550,7 @@ INSERT INTO foo VALUES (1), (10), (100); require.NoError(t, table.MakeMutationComplete(mut)) } table.Mutations = table.Mutations[toComplete:] - datums := fetchIndex(ctx, t, txn, table, test.indexToBackfill) + datums := fetchIndex(ctx, t, txn.KV(), table, test.indexToBackfill) require.Equal(t, test.expectedContents, datumSliceToStrMatrix(datums)) return nil })) diff --git a/pkg/sql/information_schema.go b/pkg/sql/information_schema.go index a09537090150..6ee3711e7518 100644 --- a/pkg/sql/information_schema.go +++ b/pkg/sql/information_schema.go @@ -20,7 +20,6 @@ import ( "unicode/utf8" "github.com/cockroachdb/cockroach/pkg/docs" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catenumpb" @@ -29,6 +28,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/builtins" "github.com/cockroachdb/cockroach/pkg/sql/sem/builtins/builtinsregistry" @@ -36,7 +36,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/sql/sem/semenumpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/sql/vtable" "github.com/cockroachdb/cockroach/pkg/util/iterutil" @@ -252,25 +252,25 @@ func populateRoleHierarchy( if err != nil { return err } - return forEachRoleMembership( - ctx, p.ExecCfg().InternalExecutor, p.Txn(), - func(role, member username.SQLUsername, isAdmin bool) error { - // The ADMIN OPTION is inherited through the role hierarchy, and grantee - // is supposed to be the role that has the ADMIN OPTION. The current user - // inherits all the ADMIN OPTIONs of its ancestors. - isRole := member == p.User() - _, hasRole := allRoles[member] - if (hasRole || isRole) && (!onlyIsAdmin || isAdmin) { - if err := addRow( - tree.NewDString(member.Normalized()), // grantee - tree.NewDString(role.Normalized()), // role_name - yesOrNoDatum(isAdmin), // is_grantable - ); err != nil { - return err - } + return forEachRoleMembership(ctx, p.InternalSQLTxn(), func( + role, member username.SQLUsername, isAdmin bool, + ) error { + // The ADMIN OPTION is inherited through the role hierarchy, and grantee + // is supposed to be the role that has the ADMIN OPTION. The current user + // inherits all the ADMIN OPTIONs of its ancestors. + isRole := member == p.User() + _, hasRole := allRoles[member] + if (hasRole || isRole) && (!onlyIsAdmin || isAdmin) { + if err := addRow( + tree.NewDString(member.Normalized()), // grantee + tree.NewDString(role.Normalized()), // role_name + yesOrNoDatum(isAdmin), // is_grantable + ); err != nil { + return err } - return nil - }, + } + return nil + }, ) } @@ -2721,8 +2721,10 @@ func forEachRole( // logic test fail in 3node-tenant config with 'txn already encountered an // error' (because of the context cancellation), so we buffer all roles // first. - rows, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.QueryBuffered( - ctx, "read-roles", p.txn, query, + rows, err := p.InternalSQLTxn().QueryBufferedEx( + ctx, "read-roles", p.txn, + sessiondata.InternalExecutorOverride{User: username.NodeUserName()}, + query, ) if err != nil { return err @@ -2753,13 +2755,12 @@ func forEachRole( } func forEachRoleMembership( - ctx context.Context, - ie sqlutil.InternalExecutor, - txn *kv.Txn, - fn func(role, member username.SQLUsername, isAdmin bool) error, + ctx context.Context, txn isql.Txn, fn func(role, member username.SQLUsername, isAdmin bool) error, ) (retErr error) { const query = `SELECT "role", "member", "isAdmin" FROM system.role_members` - it, err := ie.QueryIterator(ctx, "read-members", txn, query) + it, err := txn.QueryIteratorEx(ctx, "read-members", txn.KV(), sessiondata.InternalExecutorOverride{ + User: username.NodeUserName(), + }, query) if err != nil { return err } diff --git a/pkg/sql/instrumentation.go b/pkg/sql/instrumentation.go index 480be46eeb5a..32b6a4dc6f79 100644 --- a/pkg/sql/instrumentation.go +++ b/pkg/sql/instrumentation.go @@ -27,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/execstats" "github.com/cockroachdb/cockroach/pkg/sql/idxrecommendations" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec/explain" "github.com/cockroachdb/cockroach/pkg/sql/opt/indexrec" @@ -395,8 +396,8 @@ func (ih *instrumentationHelper) Finish( var bundle diagnosticsBundle var warnings []string if ih.collectBundle { - ie := p.extendedEvalCtx.ExecCfg.InternalExecutorFactory.NewInternalExecutor( - p.SessionData(), + ie := p.extendedEvalCtx.ExecCfg.InternalDB.Executor( + isql.WithSessionData(p.SessionData()), ) phaseTimes := statsCollector.PhaseTimes() execLatency := phaseTimes.GetServiceLatencyNoOverhead() diff --git a/pkg/sql/internal.go b/pkg/sql/internal.go index 2c94be4179ba..a63fe263e567 100644 --- a/pkg/sql/internal.go +++ b/pkg/sql/internal.go @@ -29,6 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgwirebase" "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" @@ -38,7 +39,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/fsm" "github.com/cockroachdb/cockroach/pkg/util/mon" @@ -49,7 +49,7 @@ import ( "github.com/cockroachdb/logtags" ) -var _ sqlutil.InternalExecutor = &InternalExecutor{} +var _ isql.Executor = &InternalExecutor{} // InternalExecutor can be used internally by code modules to execute SQL // statements without needing to open a SQL connection. @@ -115,7 +115,7 @@ func (ie *InternalExecutor) WithSyntheticDescriptors( // MakeInternalExecutor creates an InternalExecutor. // TODO (janexing): usage of it should be deprecated with `DescsTxnWithExecutor()` -// or `MakeInternalExecutorWithoutTxn()`. +// or `Executor()`. func MakeInternalExecutor( s *Server, memMetrics MemoryMetrics, monitor *mon.BytesMonitor, ) InternalExecutor { @@ -149,7 +149,7 @@ func MakeInternalExecutorMemMonitor( // SetSessionData cannot be called concurrently with query execution. func (ie *InternalExecutor) SetSessionData(sessionData *sessiondata.SessionData) { if sessionData != nil { - ie.s.populateMinimalSessionData(sessionData) + populateMinimalSessionData(sessionData) ie.sessionDataStack = sessiondata.NewStack(sessionData) } } @@ -359,7 +359,7 @@ func (ie *InternalExecutor) newConnExecutorWithTxn( ex.QualityOfService()) // Modify the Collection to match the parent executor's Collection. - // This allows the InternalExecutor to see schema changes made by the + // This allows the Executor to see schema changes made by the // parent executor. if shouldResetSyntheticDescriptors { ex.extraTxnState.descCollection.SetSyntheticDescriptors(ie.syntheticDescriptors) @@ -408,7 +408,7 @@ type rowsIterator struct { sp *tracing.Span } -var _ sqlutil.InternalRows = &rowsIterator{} +var _ isql.Rows = &rowsIterator{} var _ eval.InternalRows = &rowsIterator{} func (r *rowsIterator) Next(ctx context.Context) (_ bool, retErr error) { @@ -704,7 +704,7 @@ func (ie *InternalExecutor) ExecEx( // as root. Use QueryIteratorEx instead. func (ie *InternalExecutor) QueryIterator( ctx context.Context, opName string, txn *kv.Txn, stmt string, qargs ...interface{}, -) (sqlutil.InternalRows, error) { +) (isql.Rows, error) { return ie.QueryIteratorEx(ctx, opName, txn, ie.maybeRootSessionDataOverride(opName), stmt, qargs...) } @@ -718,7 +718,7 @@ func (ie *InternalExecutor) QueryIteratorEx( session sessiondata.InternalExecutorOverride, stmt string, qargs ...interface{}, -) (sqlutil.InternalRows, error) { +) (isql.Rows, error) { return ie.execInternal( ctx, opName, newSyncIEResultChannel(), txn, session, stmt, qargs..., ) @@ -801,7 +801,7 @@ func (ie *InternalExecutor) execInternal( // internal queries spawned from the same context should never do so. sd.LocalOnlySessionData.EnforceHomeRegion = false } else { - sd = ie.s.newSessionData(SessionArgs{}) + sd = newSessionData(SessionArgs{}) } applyOverrides(sessionDataOverride, sd) sd.Internal = true @@ -1023,7 +1023,7 @@ func (ie *InternalExecutor) commitTxn(ctx context.Context) error { if ie.sessionDataStack != nil { sd = ie.sessionDataStack.Top().Clone() } else { - sd = ie.s.newSessionData(SessionArgs{}) + sd = newSessionData(SessionArgs{}) } rw := newAsyncIEResultChannel() @@ -1037,6 +1037,13 @@ func (ie *InternalExecutor) commitTxn(ctx context.Context) error { ex.planner.txn = ie.extraTxnState.txn defer ex.close(ctx, externalTxnClose) + if ie.extraTxnState.txn.IsCommitted() { + // TODO(ajwerner): assert that none of the other extraTxnState is + // occupied with state. Namely, we want to make sure that no jobs or + // schema changes occurred. If that had, it'd violate various invariants + // we'd like to uphold. + return nil + } return ex.commitSQLTransactionInternal(ctx) } @@ -1065,7 +1072,7 @@ func (ie *InternalExecutor) checkIfTxnIsConsistent(txn *kv.Txn) error { if txn == nil && ie.extraTxnState != nil { return errors.New("the current internal executor was contructed with " + "a txn. To use an internal executor without a txn, call " + - "sqlutil.InternalExecutorFactory.MakeInternalExecutorWithoutTxn()") + "insql.DB.Executor()") } if txn != nil && ie.extraTxnState != nil && ie.extraTxnState.txn != txn { @@ -1227,7 +1234,7 @@ func (ncl *noopClientLock) RTrim(_ context.Context, pos CmdPos) { // will be passed to an internal executor when it's used under a txn context. // It should not be exported from the sql package. // TODO (janexing): we will deprecate this struct ASAP. It only exists as a -// stop-gap before we implement InternalExecutor.ConnExecutor to run all +// stop-gap before we implement Executor.ConnExecutor to run all // sql statements under a transaction. This struct is not ideal for an internal // executor in that it may lead to surprising bugs whereby we forget to add // fields here and keep them in sync. @@ -1239,49 +1246,101 @@ type extraTxnState struct { schemaChangerState *SchemaChangerState } -// InternalExecutorFactory stored information needed to construct a new +// InternalDB stored information needed to construct a new // internal executor. -type InternalExecutorFactory struct { +type InternalDB struct { server *Server + db *kv.DB + cf *descs.CollectionFactory + lm *lease.Manager memMetrics MemoryMetrics monitor *mon.BytesMonitor } -// NewInternalExecutorFactory returns a new internal executor factory. -func NewInternalExecutorFactory( - s *Server, memMetrics MemoryMetrics, monitor *mon.BytesMonitor, -) *InternalExecutorFactory { - return &InternalExecutorFactory{ +// NewShimInternalDB is used to bootstrap the server which needs access to +// components which will ultimately have a handle to an InternalDB. Some of +// those components may attempt to access the *kv.DB before the InternalDB +// has been fully initialized. To get around this, we initially construct +// an InternalDB with just a handle to a *kv.DB and then we'll fill in the +// object during sql server construction. +func NewShimInternalDB(db *kv.DB) *InternalDB { + return &InternalDB{db: db} +} + +func (ief *InternalDB) CloneWithMemoryMonitor( + metrics MemoryMetrics, monitor *mon.BytesMonitor, +) *InternalDB { + clone := *ief + clone.memMetrics = metrics + clone.monitor = monitor + return &clone +} + +func (ief *InternalDB) KV() *kv.DB { + return ief.db +} + +// NewInternalDB returns a new InternalDB. +func NewInternalDB(s *Server, memMetrics MemoryMetrics, monitor *mon.BytesMonitor) *InternalDB { + return &InternalDB{ server: s, + cf: s.cfg.CollectionFactory, + db: s.cfg.DB, + lm: s.cfg.LeaseManager, memMetrics: memMetrics, monitor: monitor, } } -var _ sqlutil.InternalExecutorFactory = &InternalExecutorFactory{} -var _ descs.TxnManager = &InternalExecutorFactory{} +var _ isql.DB = &InternalDB{} + +type internalTxn struct { + internalExecutor + txn *kv.Txn +} + +func (txn *internalTxn) Descriptors() *descs.Collection { + return txn.extraTxnState.descCollection +} + +func (txn *internalTxn) SessionData() *sessiondata.SessionData { + return txn.sessionDataStack.Top() +} + +func (txn *internalTxn) KV() *kv.Txn { return txn.txn } + +func (txn *internalTxn) init(kvTxn *kv.Txn, ie InternalExecutor) { + txn.txn = kvTxn + txn.InternalExecutor = ie +} + +type internalExecutor struct { + InternalExecutor +} // NewInternalExecutor constructs a new internal executor. // TODO (janexing): usage of it should be deprecated with `DescsTxnWithExecutor()` -// or `MakeInternalExecutorWithoutTxn()`. -func (ief *InternalExecutorFactory) NewInternalExecutor( - sd *sessiondata.SessionData, -) sqlutil.InternalExecutor { +// or `Executor()`. +func (ief *InternalDB) NewInternalExecutor(sd *sessiondata.SessionData) isql.Executor { ie := MakeInternalExecutor(ief.server, ief.memMetrics, ief.monitor) ie.SetSessionData(sd) return &ie } +// internalExecutorCommitTxnFunc is to commit the txn associated with an +// internal executor. +type internalExecutorCommitTxnFunc func(ctx context.Context) error + // newInternalExecutorWithTxn creates an internal executor with txn-related info, // such as descriptor collection and schema change job records, etc. // This function should only be used under -// InternalExecutorFactory.DescsTxnWithExecutor(). +// InternalDB.DescsTxnWithExecutor(). // TODO (janexing): This function will be soon refactored after we change // the internal executor infrastructure with a single conn executor for all // sql statement executions within a txn. -func (ief *InternalExecutorFactory) newInternalExecutorWithTxn( +func (ief *InternalDB) newInternalExecutorWithTxn( sd *sessiondata.SessionData, sv *settings.Values, txn *kv.Txn, descCol *descs.Collection, -) (sqlutil.InternalExecutor, descs.InternalExecutorCommitTxnFunc) { +) (InternalExecutor, internalExecutorCommitTxnFunc) { // By default, if not given session data, we initialize a sessionData that // would be the same as what would be created if root logged in. // The sessionData's user can be override when calling the query @@ -1309,7 +1368,7 @@ func (ief *InternalExecutorFactory) newInternalExecutorWithTxn( schemaChangerState: schemaChangerState, }, } - ie.s.populateMinimalSessionData(sd) + populateMinimalSessionData(sd) ie.sessionDataStack = sessiondata.NewStack(sd) commitTxnFunc := func(ctx context.Context) error { @@ -1320,62 +1379,53 @@ func (ief *InternalExecutorFactory) newInternalExecutorWithTxn( if err := ie.commitTxn(ctx); err != nil { return err } - return ie.s.cfg.JobRegistry.Run( - ctx, ie.s.cfg.InternalExecutor, *ie.extraTxnState.jobs, - ) + return ie.s.cfg.JobRegistry.Run(ctx, *ie.extraTxnState.jobs) } - return &ie, commitTxnFunc + return ie, commitTxnFunc } -// MakeInternalExecutorWithoutTxn returns an internal executor not bound with any -// txn. -func (ief *InternalExecutorFactory) MakeInternalExecutorWithoutTxn() sqlutil.InternalExecutor { +// Executor returns an Executor not bound with any txn. +func (ief *InternalDB) Executor(opts ...isql.ExecutorOption) isql.Executor { + var cfg isql.ExecutorConfig + cfg.Init(opts...) ie := MakeInternalExecutor(ief.server, ief.memMetrics, ief.monitor) + if sd := cfg.GetSessionData(); sd != nil { + ie.SetSessionData(sd) + } return &ie } type kvTxnFunc = func(context.Context, *kv.Txn) error -// ApplyTxnOptions is to apply the txn options and returns the txn generator -// function. -func ApplyTxnOptions( - db *kv.DB, opts ...sqlutil.TxnOption, -) func(ctx context.Context, f kvTxnFunc) error { - var config sqlutil.TxnConfig - for _, opt := range opts { - opt.Apply(&config) - } - run := db.Txn - - if config.GetSteppingEnabled() { +// DescsTxn enables callers to run transactions with explicit access to the +// *descs.Collection which is bound to the isql.Txn in the Txn method. +func (ief *InternalDB) DescsTxn( + ctx context.Context, f func(context.Context, descs.Txn) error, opts ...isql.TxnOption, +) error { + return ief.txn( + ctx, + func(ctx context.Context, txn *internalTxn) error { return f(ctx, txn) }, + opts..., + ) +} - run = func(ctx context.Context, f kvTxnFunc) error { - return db.TxnWithSteppingEnabled(ctx, sessiondatapb.Normal, f) - } - } - return run +// Txn is used to run queries with internal executor in a transactional +// manner. +func (ief *InternalDB) Txn( + ctx context.Context, f func(context.Context, isql.Txn) error, opts ...isql.TxnOption, +) error { + wrapped := func(ctx context.Context, txn *internalTxn) error { return f(ctx, txn) } + return ief.txn(ctx, wrapped, opts...) } -// DescsTxnWithExecutor enables callers to run transactions with a *Collection -// such that all retrieved immutable descriptors are properly leased and all mutable -// descriptors are handled. The function deals with verifying the two version -// invariant and retrying when it is violated. Callers need not worry that they -// write mutable descriptors multiple times. The call will explicitly wait for -// the leases to drain on old versions of descriptors modified or deleted in the -// transaction; callers do not need to call lease.WaitForOneVersion. -// It also enables using internal executor to run sql queries in a txn manner. -// -// The passed transaction is pre-emptively anchored to the system config key on -// the system tenant. -func (ief *InternalExecutorFactory) DescsTxnWithExecutor( - ctx context.Context, - db *kv.DB, - sd *sessiondata.SessionData, - f descs.TxnWithExecutorFunc, - opts ...sqlutil.TxnOption, +func (ief *InternalDB) txn( + ctx context.Context, f func(context.Context, *internalTxn) error, opts ...isql.TxnOption, ) error { - run := ApplyTxnOptions(db, opts...) + var cfg isql.TxnConfig + cfg.Init(opts...) + + db := ief.server.cfg.DB // Wait for descriptors that were modified or dropped. If the descriptor // was not dropped, wait for one version. Otherwise, wait for no versions. @@ -1410,16 +1460,40 @@ func (ief *InternalExecutorFactory) DescsTxnWithExecutor( return nil } + run := db.Txn + if priority, hasPriority := cfg.GetAdmissionPriority(); hasPriority { + steppingMode := kv.SteppingDisabled + if cfg.GetSteppingEnabled() { + steppingMode = kv.SteppingEnabled + } + run = func(ctx context.Context, f kvTxnFunc) error { + return db.TxnWithAdmissionControl( + ctx, roachpb.AdmissionHeader_FROM_SQL, priority, steppingMode, f, + ) + } + } else if cfg.GetSteppingEnabled() { + run = func(ctx context.Context, f kvTxnFunc) error { + return db.TxnWithSteppingEnabled(ctx, sessiondatapb.Normal, f) + } + } + cf := ief.server.cfg.CollectionFactory for { var withNewVersion []lease.IDVersion var deletedDescs catalog.DescriptorIDSet - if err := run(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + if err := run(ctx, func(ctx context.Context, kvTxn *kv.Txn) (err error) { withNewVersion, deletedDescs = nil, catalog.DescriptorIDSet{} descsCol := cf.NewCollection(ctx, descs.WithMonitor(ief.monitor)) defer descsCol.ReleaseAll(ctx) - ie, commitTxnFn := ief.newInternalExecutorWithTxn(sd, &cf.GetClusterSettings().SV, txn, descsCol) - if err := f(ctx, txn, descsCol, ie); err != nil { + ie, commitTxnFn := ief.newInternalExecutorWithTxn( + cfg.GetSessionData(), + &cf.GetClusterSettings().SV, + kvTxn, + descsCol, + ) + txn := internalTxn{txn: kvTxn} + txn.InternalExecutor = ie + if err := f(ctx, &txn); err != nil { return err } deletedDescs = descsCol.GetDeletedDescs() @@ -1438,42 +1512,3 @@ func (ief *InternalExecutorFactory) DescsTxnWithExecutor( } } } - -// DescsTxn is similar to DescsTxnWithExecutor, but without an internal executor -// involved. -func (ief *InternalExecutorFactory) DescsTxn( - ctx context.Context, - db *kv.DB, - f func(context.Context, *kv.Txn, *descs.Collection) error, - opts ...sqlutil.TxnOption, -) error { - return ief.DescsTxnWithExecutor( - ctx, - db, - nil, /* sessionData */ - func(ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, _ sqlutil.InternalExecutor) error { - return f(ctx, txn, descriptors) - }, - opts..., - ) -} - -// TxnWithExecutor is to run queries with internal executor in a transactional -// manner. -func (ief *InternalExecutorFactory) TxnWithExecutor( - ctx context.Context, - db *kv.DB, - sd *sessiondata.SessionData, - f func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor) error, - opts ...sqlutil.TxnOption, -) error { - return ief.DescsTxnWithExecutor( - ctx, - db, - sd, - func(ctx context.Context, txn *kv.Txn, _ *descs.Collection, ie sqlutil.InternalExecutor) error { - return f(ctx, txn, ie) - }, - opts..., - ) -} diff --git a/pkg/sql/internal_test.go b/pkg/sql/internal_test.go index 46d331d53649..cd79f41c879a 100644 --- a/pkg/sql/internal_test.go +++ b/pkg/sql/internal_test.go @@ -620,8 +620,8 @@ func TestInternalExecutorWithUndefinedQoSOverridePanics(t *testing.T) { } // TODO(andrei): Test that descriptor leases are released by the -// InternalExecutor, with and without a higher-level txn. When there is no +// Executor, with and without a higher-level txn. When there is no // higher-level txn, the leases are released normally by the txn finishing. When // there is, they are released by the resetExtraTxnState() call in the -// InternalExecutor. Unfortunately at the moment we don't have a great way to +// Executor. Unfortunately at the moment we don't have a great way to // test lease releases. diff --git a/pkg/sql/sqlutil/BUILD.bazel b/pkg/sql/isql/BUILD.bazel similarity index 64% rename from pkg/sql/sqlutil/BUILD.bazel rename to pkg/sql/isql/BUILD.bazel index 9a4b7375281d..462ac7976518 100644 --- a/pkg/sql/sqlutil/BUILD.bazel +++ b/pkg/sql/isql/BUILD.bazel @@ -2,9 +2,13 @@ load("//build/bazelutil/unused_checker:unused.bzl", "get_x_data") load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( - name = "sqlutil", - srcs = ["internal_executor.go"], - importpath = "github.com/cockroachdb/cockroach/pkg/sql/sqlutil", + name = "isql", + srcs = [ + "doc.go", + "isql_db.go", + "options.go", + ], + importpath = "github.com/cockroachdb/cockroach/pkg/sql/isql", visibility = ["//visibility:public"], deps = [ "//pkg/kv", @@ -12,6 +16,7 @@ go_library( "//pkg/sql/catalog/colinfo", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", + "//pkg/util/admission/admissionpb", ], ) diff --git a/pkg/sql/isql/doc.go b/pkg/sql/isql/doc.go new file mode 100644 index 000000000000..7c331bbf8241 --- /dev/null +++ b/pkg/sql/isql/doc.go @@ -0,0 +1,13 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +// Package isql provides interfaces for interacting with the database +// using SQL from within the database itself. +package isql diff --git a/pkg/sql/sqlutil/internal_executor.go b/pkg/sql/isql/isql_db.go similarity index 76% rename from pkg/sql/sqlutil/internal_executor.go rename to pkg/sql/isql/isql_db.go index 0393361796ae..9fce96d06ad8 100644 --- a/pkg/sql/sqlutil/internal_executor.go +++ b/pkg/sql/isql/isql_db.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package sqlutil +package isql import ( "context" @@ -20,11 +20,50 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" ) -// InternalExecutor is meant to be used by layers below SQL in the system that +// DB enables clients to create and execute sql transactions from code inside +// the database. Multi-statement transactions should leverage the Txn method. +type DB interface { + + // KV returns the underlying *kv.DB. + KV() *kv.DB + + // Txn enables callers to run transactions with a *Collection such that all + // retrieved immutable descriptors are properly leased and all mutable + // descriptors are handled. The function deals with verifying the two version + // invariant and retrying when it is violated. Callers need not worry that they + // write mutable descriptors multiple times. The call will explicitly wait for + // the leases to drain on old versions of descriptors modified or deleted in the + // transaction; callers do not need to call lease.WaitForOneVersion. + // It also enables using internal executor to run sql queries in a txn manner. + Txn(context.Context, func(context.Context, Txn) error, ...TxnOption) error + + // Executor constructs an internal executor not bound to a transaction. + Executor(...ExecutorOption) Executor +} + +// Txn is an internal sql transaction. +type Txn interface { + + // KV returns the underlying kv.Txn. + KV() *kv.Txn + + // SessionData returns the transaction's SessionData. + SessionData() *sessiondata.SessionData + + // Executor allows the user to execute transactional SQL statements. + Executor +} + +// Executor is meant to be used by layers below SQL in the system that // nevertheless want to execute SQL queries (presumably against system tables). -// It is extracted in this "sqlutil" package to avoid circular references and +// It is extracted in this "isql" package to avoid circular references and // is implemented by *sql.InternalExecutor. -type InternalExecutor interface { +// +// TODO(ajwerner): Remove the txn argument from all the functions. They are +// now implicit -- if you have your hands on an isql.Txn, you know it's +// transactional. If you just have an Executor, you don't know, but you +// cannot assume one way or the other. +type Executor interface { // Exec executes the supplied SQL statement and returns the number of rows // affected (not like the full results; see QueryIterator()). If no user has // been previously set through SetSessionData, the statement is executed as @@ -130,7 +169,7 @@ type InternalExecutor interface { txn *kv.Txn, stmt string, qargs ...interface{}, - ) (InternalRows, error) + ) (Rows, error) // QueryIteratorEx executes the query, returning an iterator that can be // used to get the results. If the call is successful, the returned iterator @@ -142,7 +181,7 @@ type InternalExecutor interface { session sessiondata.InternalExecutorOverride, stmt string, qargs ...interface{}, - ) (InternalRows, error) + ) (Rows, error) // QueryBufferedExWithCols is like QueryBufferedEx, additionally returning the computed // ResultColumns of the input query. @@ -171,9 +210,9 @@ type InternalExecutor interface { ) error } -// InternalRows is an iterator interface that's exposed by the internal +// Rows is an iterator interface that's exposed by the internal // executor. It provides access to the rows from a query. -type InternalRows interface { +type Rows interface { // Next advances the iterator by one row, returning false if there are no // more rows in this iterator or if an error is encountered (the latter is // then returned). @@ -188,7 +227,7 @@ type InternalRows interface { // invalidate it). Cur() tree.Datums - // RowsAffected() returns the count of rows affected by the statement. + // RowsAffected returns the count of rows affected by the statement. // This is only guaranteed to be accurate after Next() has returned // false (no more rows). RowsAffected() int @@ -206,63 +245,3 @@ type InternalRows interface { // Next() (including after Close() was called). Types() colinfo.ResultColumns } - -// InternalExecutorFactory is an interface that allow the creation of an -// internal executor, and run sql statement without a txn with the internal -// executor. -type InternalExecutorFactory interface { - // NewInternalExecutor constructs a new internal executor. - // TODO (janexing): this should be deprecated soon. - NewInternalExecutor(sd *sessiondata.SessionData) InternalExecutor - - // TxnWithExecutor enables callers to run transactions with a *Collection such that all - // retrieved immutable descriptors are properly leased and all mutable - // descriptors are handled. The function deals with verifying the two version - // invariant and retrying when it is violated. Callers need not worry that they - // write mutable descriptors multiple times. The call will explicitly wait for - // the leases to drain on old versions of descriptors modified or deleted in the - // transaction; callers do not need to call lease.WaitForOneVersion. - // It also enables using internal executor to run sql queries in a txn manner. - // - // The passed transaction is pre-emptively anchored to the system config key on - // the system tenant. - TxnWithExecutor(context.Context, *kv.DB, *sessiondata.SessionData, func(context.Context, *kv.Txn, InternalExecutor) error, ...TxnOption) error - - // MakeInternalExecutorWithoutTxn returns an internal executor not bound with any - // txn. - MakeInternalExecutorWithoutTxn() InternalExecutor -} - -// TxnOption is used to configure a Txn or TxnWithExecutor. -type TxnOption interface { - Apply(*TxnConfig) -} - -// TxnConfig is the config to be set for txn. -type TxnConfig struct { - steppingEnabled bool -} - -// GetSteppingEnabled return the steppingEnabled setting from the txn config. -func (tc *TxnConfig) GetSteppingEnabled() bool { - return tc.steppingEnabled -} - -type txnOptionFn func(options *TxnConfig) - -// Apply is to apply the txn config. -func (f txnOptionFn) Apply(options *TxnConfig) { f(options) } - -var steppingEnabled = txnOptionFn(func(o *TxnConfig) { - o.steppingEnabled = true -}) - -// SteppingEnabled creates a TxnOption to determine whether the underlying -// transaction should have stepping enabled. If stepping is enabled, the -// transaction will implicitly use lower admission priority. However, the -// user will need to remember to Step the Txn to make writes visible. The -// InternalExecutor will automatically (for better or for worse) step the -// transaction when executing each statement. -func SteppingEnabled() TxnOption { - return steppingEnabled -} diff --git a/pkg/sql/isql/options.go b/pkg/sql/isql/options.go new file mode 100644 index 000000000000..c8386a28d948 --- /dev/null +++ b/pkg/sql/isql/options.go @@ -0,0 +1,113 @@ +// Copyright 2022 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package isql + +import ( + "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" + "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" +) + +type ( + // TxnOption is used to configure a Txn. + TxnOption interface{ applyTxn(*TxnConfig) } + + // ExecutorOption configures an Executor. + ExecutorOption interface{ applyEx(*ExecutorConfig) } + + // Option can configure both an Executor and a Txn. + Option interface { + TxnOption + ExecutorOption + } +) + +// SteppingEnabled creates a TxnOption to determine whether the underlying +// transaction should have stepping enabled. If stepping is enabled, the +// transaction will implicitly use lower admission priority. However, the +// user will need to remember to Step the Txn to make writes visible. The +// Executor will automatically (for better or for worse) step the +// transaction when executing each statement. +func SteppingEnabled() TxnOption { + return steppingEnabled(true) +} + +// WithPriority allows the user to configure the priority for the transaction. +func WithPriority(p admissionpb.WorkPriority) TxnOption { + return admissionPriority(p) +} + +// WithSessionData allows the user to configure the session data for the Txn or +// Executor. +func WithSessionData(sd *sessiondata.SessionData) Option { + return (*sessionDataOption)(sd) +} + +// TxnConfig is the config to be set for txn. +type TxnConfig struct { + ExecutorConfig + steppingEnabled bool + priority *admissionpb.WorkPriority +} + +// GetSteppingEnabled return the steppingEnabled setting from the txn config. +func (tc *TxnConfig) GetSteppingEnabled() bool { + return tc.steppingEnabled +} + +// GetAdmissionPriority returns the AdmissionControl configuration if it exists. +func (tc *TxnConfig) GetAdmissionPriority() (admissionpb.WorkPriority, bool) { + if tc.priority != nil { + return *tc.priority, true + } + return 0, false +} + +func (tc *TxnConfig) Init(opts ...TxnOption) { + for _, opt := range opts { + opt.applyTxn(tc) + } +} + +// ExecutorConfig is the configuration used by the implementation of DB to +// set up the Executor. +type ExecutorConfig struct { + sessionData *sessiondata.SessionData +} + +func (ec *ExecutorConfig) GetSessionData() *sessiondata.SessionData { + return ec.sessionData +} + +// Init is used to initialize an ExecutorConfig. +func (ec *ExecutorConfig) Init(opts ...ExecutorOption) { + for _, o := range opts { + o.applyEx(ec) + } +} + +type sessionDataOption sessiondata.SessionData + +func (o *sessionDataOption) applyEx(cfg *ExecutorConfig) { + cfg.sessionData = (*sessiondata.SessionData)(o) +} +func (o *sessionDataOption) applyTxn(cfg *TxnConfig) { + cfg.sessionData = (*sessiondata.SessionData)(o) +} + +type steppingEnabled bool + +func (s steppingEnabled) applyTxn(o *TxnConfig) { o.steppingEnabled = bool(s) } + +type admissionPriority admissionpb.WorkPriority + +func (a admissionPriority) applyTxn(config *TxnConfig) { + config.priority = (*admissionpb.WorkPriority)(&a) +} diff --git a/pkg/sql/join_token.go b/pkg/sql/join_token.go index d0d85445ef9b..ec55a8c7d695 100644 --- a/pkg/sql/join_token.go +++ b/pkg/sql/join_token.go @@ -15,7 +15,6 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/featureflag" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -61,16 +60,12 @@ func (p *planner) CreateJoinToken(ctx context.Context) (string, error) { return "", errors.Wrap(err, "error when marshaling join token") } expiration := timeutil.Now().Add(security.JoinTokenExpiration) - err = p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - _, err = p.ExecCfg().InternalExecutor.Exec( - ctx, "insert-join-token", txn, - "insert into system.join_tokens(id, secret, expiration) "+ - "values($1, $2, $3)", - jt.TokenID.String(), jt.SharedSecret, expiration.Format(time.RFC3339), - ) - return err - }) - if err != nil { + if _, err := p.ExecCfg().InternalDB.Executor().Exec( + ctx, "insert-join-token", nil, /* txn */ + "insert into system.join_tokens(id, secret, expiration) "+ + "values($1, $2, $3)", + jt.TokenID.String(), jt.SharedSecret, expiration.Format(time.RFC3339), + ); err != nil { return "", errors.Wrap(err, "could not persist join token in system table") } return string(token), nil diff --git a/pkg/sql/logictest/testdata/logic_test/drop_database b/pkg/sql/logictest/testdata/logic_test/drop_database index e74b91df69a7..f8d51de96386 100644 --- a/pkg/sql/logictest/testdata/logic_test/drop_database +++ b/pkg/sql/logictest/testdata/logic_test/drop_database @@ -42,7 +42,10 @@ test root NULL NULL {} NULL skipif config local-legacy-schema-changer query TT -SELECT job_type, status FROM [SHOW JOBS] WHERE user_name = 'root' and job_type != 'MIGRATION' + SELECT job_type, status + FROM [SHOW JOBS] + WHERE user_name = 'root' AND job_type != 'MIGRATION' +ORDER BY job_type DESC ---- SCHEMA CHANGE succeeded SCHEMA CHANGE succeeded diff --git a/pkg/sql/logictest/testdata/logic_test/drop_table b/pkg/sql/logictest/testdata/logic_test/drop_table index b1500f37dbd8..49c8388f13f0 100644 --- a/pkg/sql/logictest/testdata/logic_test/drop_table +++ b/pkg/sql/logictest/testdata/logic_test/drop_table @@ -43,7 +43,7 @@ SELECT replace(job_type, 'NEW SCHEMA CHANGE', 'SCHEMA CHANGE'), status job_type = 'SCHEMA CHANGE' AND description != 'updating privileges' ) - ); + ) ORDER BY 1, 2; ---- SCHEMA CHANGE succeeded SCHEMA CHANGE succeeded diff --git a/pkg/sql/logictest/testdata/logic_test/jobs b/pkg/sql/logictest/testdata/logic_test/jobs index 64c9c89a729e..d580755f8358 100644 --- a/pkg/sql/logictest/testdata/logic_test/jobs +++ b/pkg/sql/logictest/testdata/logic_test/jobs @@ -24,23 +24,23 @@ CREATE INDEX ON t(x) query TTT SELECT job_type, description, user_name FROM [SHOW JOBS] WHERE user_name = 'root' -AND job_type LIKE 'SCHEMA CHANGE%' +AND job_type LIKE 'SCHEMA CHANGE%' ORDER BY 1, 2, 3 ---- -SCHEMA CHANGE updating version for users table root -SCHEMA CHANGE updating version for role options table root -SCHEMA CHANGE updating privileges for database 104 root SCHEMA CHANGE CREATE INDEX ON test.public.t (x) root +SCHEMA CHANGE updating privileges for database 104 root +SCHEMA CHANGE updating version for role options table root +SCHEMA CHANGE updating version for users table root SCHEMA CHANGE GC GC for temporary index used during index backfill root query TTT SELECT job_type, description, user_name FROM crdb_internal.jobs WHERE user_name = 'root' -AND job_type LIKE 'SCHEMA CHANGE%' +AND job_type LIKE 'SCHEMA CHANGE%' ORDER BY 1, 2, 3 ---- -SCHEMA CHANGE updating version for users table root -SCHEMA CHANGE updating version for role options table root -SCHEMA CHANGE updating privileges for database 104 root -SCHEMA CHANGE CREATE INDEX ON test.public.t (x) root -SCHEMA CHANGE GC GC for temporary index used during index backfill root +SCHEMA CHANGE CREATE INDEX ON test.public.t (x) root +SCHEMA CHANGE updating privileges for database 104 root +SCHEMA CHANGE updating version for role options table root +SCHEMA CHANGE updating version for users table root +SCHEMA CHANGE GC GC for temporary index used during index backfill root query TTT SELECT job_type, description, user_name FROM crdb_internal.jobs WHERE user_name = 'node' @@ -70,13 +70,13 @@ CREATE INDEX ON u(x); query TTT -SELECT job_type, description, user_name FROM [SHOW JOBS] +SELECT job_type, description, user_name FROM [SHOW JOBS] ORDER BY 1, 2, 3 ---- SCHEMA CHANGE CREATE INDEX ON test.public.u (x) testuser SCHEMA CHANGE GC GC for temporary index used during index backfill testuser query TTT -SELECT job_type, description, user_name FROM crdb_internal.jobs +SELECT job_type, description, user_name FROM crdb_internal.jobs ORDER BY 1, 2, 3 ---- SCHEMA CHANGE CREATE INDEX ON test.public.u (x) testuser SCHEMA CHANGE GC GC for temporary index used during index backfill testuser @@ -87,27 +87,27 @@ user root query TTT SELECT job_type, description, user_name FROM [SHOW JOBS] WHERE user_name IN ('root', 'testuser', 'node') -AND job_type LIKE 'SCHEMA CHANGE%' +AND job_type LIKE 'SCHEMA CHANGE%' ORDER BY 1, 2, 3 ---- -SCHEMA CHANGE updating version for users table root -SCHEMA CHANGE updating version for role options table root -SCHEMA CHANGE updating privileges for database 104 root SCHEMA CHANGE CREATE INDEX ON test.public.t (x) root -SCHEMA CHANGE GC GC for temporary index used during index backfill root SCHEMA CHANGE CREATE INDEX ON test.public.u (x) testuser +SCHEMA CHANGE updating privileges for database 104 root +SCHEMA CHANGE updating version for role options table root +SCHEMA CHANGE updating version for users table root +SCHEMA CHANGE GC GC for temporary index used during index backfill root SCHEMA CHANGE GC GC for temporary index used during index backfill testuser query TTT SELECT job_type, description, user_name FROM crdb_internal.jobs WHERE user_name IN ('root', 'testuser', 'node') -AND (job_type LIKE 'AUTO SPAN%' OR job_type LIKE 'SCHEMA CHANGE%') +AND (job_type LIKE 'AUTO SPAN%' OR job_type LIKE 'SCHEMA CHANGE%') ORDER BY 1, 2, 3 ---- AUTO SPAN CONFIG RECONCILIATION reconciling span configurations node -SCHEMA CHANGE updating version for users table root -SCHEMA CHANGE updating version for role options table root -SCHEMA CHANGE updating privileges for database 104 root SCHEMA CHANGE CREATE INDEX ON test.public.t (x) root -SCHEMA CHANGE GC GC for temporary index used during index backfill root SCHEMA CHANGE CREATE INDEX ON test.public.u (x) testuser +SCHEMA CHANGE updating privileges for database 104 root +SCHEMA CHANGE updating version for role options table root +SCHEMA CHANGE updating version for users table root +SCHEMA CHANGE GC GC for temporary index used during index backfill root SCHEMA CHANGE GC GC for temporary index used during index backfill testuser statement ok @@ -137,14 +137,14 @@ user testuser # testuser should be able to see jobs created by non-admin users. query TTT -SELECT job_type, description, user_name FROM crdb_internal.jobs +SELECT job_type, description, user_name FROM crdb_internal.jobs ORDER BY 1, 2, 3 ---- -SCHEMA CHANGE CREATE INDEX ON test.public.u (x) testuser -SCHEMA CHANGE GC GC for temporary index used during index backfill testuser SCHEMA CHANGE CREATE INDEX ON test.public.t1 (x) testuser2 -SCHEMA CHANGE GC GC for temporary index used during index backfill testuser2 +SCHEMA CHANGE CREATE INDEX ON test.public.u (x) testuser SCHEMA CHANGE DROP TABLE test.public.t1 testuser2 SCHEMA CHANGE GC GC for DROP TABLE test.public.t1 testuser2 +SCHEMA CHANGE GC GC for temporary index used during index backfill testuser +SCHEMA CHANGE GC GC for temporary index used during index backfill testuser2 statement ok PAUSE JOB (SELECT job_id FROM [SHOW JOBS] WHERE user_name = 'testuser2' AND job_type = 'SCHEMA CHANGE GC' AND description LIKE 'GC for DROP%') diff --git a/pkg/sql/mvcc_backfiller.go b/pkg/sql/mvcc_backfiller.go index 4eaf6d45374c..a48def832c45 100644 --- a/pkg/sql/mvcc_backfiller.go +++ b/pkg/sql/mvcc_backfiller.go @@ -16,7 +16,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/backfill" @@ -24,6 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec/backfiller" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -124,11 +124,11 @@ func (im *IndexBackfillerMergePlanner) plan( var planCtx *PlanningCtx if err := DescsTxn(ctx, im.execCfg, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) error { sd := NewFakeSessionData(im.execCfg.SV()) - evalCtx = createSchemaChangeEvalCtx(ctx, im.execCfg, sd, txn.ReadTimestamp(), descriptors) - planCtx = im.execCfg.DistSQLPlanner.NewPlanningCtx(ctx, &evalCtx, nil /* planner */, txn, + evalCtx = createSchemaChangeEvalCtx(ctx, im.execCfg, sd, txn.KV().ReadTimestamp(), descriptors) + planCtx = im.execCfg.DistSQLPlanner.NewPlanningCtx(ctx, &evalCtx, nil /* planner */, txn.KV(), DistributionTypeSystemTenantOnly) spec, err := initIndexBackfillMergerSpec(*tableDesc.TableDesc(), addedIndexes, temporaryIndexes, mergeTimestamp) @@ -279,7 +279,7 @@ func (imt *IndexMergeTracker) FlushCheckpoint(ctx context.Context) error { details.ResumeSpanList[progress.MutationIdx[idx]].ResumeSpans = progress.TodoSpans[idx] } - return imt.jobMu.job.SetDetails(ctx, nil, details) + return imt.jobMu.job.NoTxn().SetDetails(ctx, details) } // FlushFractionCompleted writes out the fraction completed based on the number of total @@ -306,8 +306,9 @@ func (imt *IndexMergeTracker) FlushFractionCompleted(ctx context.Context) error imt.jobMu.Lock() defer imt.jobMu.Unlock() - if err := imt.jobMu.job.FractionProgressed(ctx, nil, - jobs.FractionUpdater(frac)); err != nil { + if err := imt.jobMu.job.NoTxn().FractionProgressed( + ctx, jobs.FractionUpdater(frac), + ); err != nil { return jobs.SimplifyInvalidStatusError(err) } } diff --git a/pkg/sql/opt_catalog.go b/pkg/sql/opt_catalog.go index deaef5fb034a..e8572adeca43 100644 --- a/pkg/sql/opt_catalog.go +++ b/pkg/sql/opt_catalog.go @@ -467,7 +467,7 @@ func (oc *optCatalog) fullyQualifiedNameWithTxn( // RoleExists is part of the cat.Catalog interface. func (oc *optCatalog) RoleExists(ctx context.Context, role username.SQLUsername) (bool, error) { - return RoleExists(ctx, oc.planner.ExecCfg().InternalExecutor, oc.planner.Txn(), role) + return RoleExists(ctx, oc.planner.InternalSQLTxn(), role) } // dataSourceForDesc returns a data source wrapper for the given descriptor. diff --git a/pkg/sql/opt_exec_factory.go b/pkg/sql/opt_exec_factory.go index d2785fdf1db5..53bf7c478242 100644 --- a/pkg/sql/opt_exec_factory.go +++ b/pkg/sql/opt_exec_factory.go @@ -1196,7 +1196,7 @@ func (e *urlOutputter) finish() (url.URL, error) { func (ef *execFactory) showEnv(plan string, envOpts exec.ExplainEnvData) (exec.Node, error) { var out urlOutputter - ie := ef.planner.extendedEvalCtx.ExecCfg.InternalExecutorFactory.NewInternalExecutor( + ie := ef.planner.extendedEvalCtx.ExecCfg.InternalDB.NewInternalExecutor( ef.planner.SessionData(), ) c := makeStmtEnvCollector(ef.ctx, ie.(*InternalExecutor)) diff --git a/pkg/sql/pg_catalog.go b/pkg/sql/pg_catalog.go index 00d7b94931bc..103d0f031a6e 100644 --- a/pkg/sql/pg_catalog.go +++ b/pkg/sql/pg_catalog.go @@ -595,7 +595,7 @@ https://www.postgresql.org/docs/9.5/catalog-pg-auth-members.html`, schema: vtable.PGCatalogAuthMembers, populate: func(ctx context.Context, p *planner, _ catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { h := makeOidHasher() - return forEachRoleMembership(ctx, p.ExecCfg().InternalExecutor, p.Txn(), + return forEachRoleMembership(ctx, p.InternalSQLTxn(), func(roleName, memberName username.SQLUsername, isAdmin bool) error { return addRow( h.UserOid(roleName), // roleid @@ -1535,10 +1535,11 @@ https://www.postgresql.org/docs/9.5/catalog-pg-depend.html`, // as a datum row, containing object id, sub id (column id in the case of // columns), comment text, and comment type (keys.FooCommentType). func getComments(ctx context.Context, p *planner) ([]tree.Datums, error) { - return p.extendedEvalCtx.ExecCfg.InternalExecutor.QueryBuffered( + return p.InternalSQLTxn().QueryBufferedEx( ctx, "select-comments", p.Txn(), + sessiondata.NodeUserSessionDataOverride, `SELECT object_id, sub_id, @@ -3470,7 +3471,7 @@ var pgCatalogDbRoleSettingTable = virtualSchemaTable{ https://www.postgresql.org/docs/13/catalog-pg-db-role-setting.html`, schema: vtable.PgCatalogDbRoleSetting, populate: func(ctx context.Context, p *planner, _ catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - rows, err := p.extendedEvalCtx.ExecCfg.InternalExecutor.QueryBufferedEx( + rows, err := p.InternalSQLTxn().QueryBufferedEx( ctx, "select-db-role-settings", p.Txn(), @@ -3555,8 +3556,10 @@ https://www.postgresql.org/docs/13/catalog-pg-statistic-ext.html`, // statistics. query := `SELECT "tableID", name, "columnIDs", "statisticID", '{d}'::"char"[] FROM system.table_statistics;` - rows, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.QueryBuffered( - ctx, "read-statistics-objects", p.txn, query, + rows, err := p.InternalSQLTxn().QueryBufferedEx( + ctx, "read-statistics-objects", p.txn, + sessiondata.NodeUserSessionDataOverride, + query, ) if err != nil { return err diff --git a/pkg/sql/pgwire/BUILD.bazel b/pkg/sql/pgwire/BUILD.bazel index 3aca7ac1f83f..8fb3faf52481 100644 --- a/pkg/sql/pgwire/BUILD.bazel +++ b/pkg/sql/pgwire/BUILD.bazel @@ -119,6 +119,7 @@ go_test( "//pkg/sql/catalog/bootstrap", "//pkg/sql/catalog/colinfo", "//pkg/sql/colconv", + "//pkg/sql/isql", "//pkg/sql/lex", "//pkg/sql/parser", "//pkg/sql/pgwire/hba", @@ -131,7 +132,6 @@ go_test( "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", "//pkg/sql/sessiondatapb", - "//pkg/sql/sqlutil", "//pkg/sql/tests", "//pkg/sql/types", "//pkg/testutils", diff --git a/pkg/sql/pgwire/auth.go b/pkg/sql/pgwire/auth.go index 181e261b2666..8a60c1574c83 100644 --- a/pkg/sql/pgwire/auth.go +++ b/pkg/sql/pgwire/auth.go @@ -64,9 +64,6 @@ type authOptions struct { // allow system usernames (e.g. GSSAPI principals or X.509 CN's) to // be dynamically mapped to database usernames. identMap *identmap.Conf - // ie is the server-wide internal executor, used to - // retrieve entries from system.users. - ie *sql.InternalExecutor // The following fields are only used by tests. @@ -152,7 +149,6 @@ func (c *conn) handleAuthentication( sql.GetUserSessionInitInfo( ctx, execCfg, - authOpt.ie, dbUser, c.sessionArgs.SessionDefaults["database"], ) diff --git a/pkg/sql/pgwire/conn_test.go b/pkg/sql/pgwire/conn_test.go index 321f9d15dc2e..979076c6df82 100644 --- a/pkg/sql/pgwire/conn_test.go +++ b/pkg/sql/pgwire/conn_test.go @@ -30,6 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/lex" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/hba" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -38,7 +39,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -436,7 +436,7 @@ func processPgxStartup(ctx context.Context, s serverutils.TestServerInterface, c func execQuery( ctx context.Context, query string, s serverutils.TestServerInterface, c *conn, ) error { - it, err := s.InternalExecutor().(sqlutil.InternalExecutor).QueryIteratorEx( + it, err := s.InternalExecutor().(isql.Executor).QueryIteratorEx( ctx, "test", nil, /* txn */ sessiondata.InternalExecutorOverride{User: username.RootUserName(), Database: "system"}, query, diff --git a/pkg/sql/pgwire/server.go b/pkg/sql/pgwire/server.go index aaa9f2c97528..1e07fc426b36 100644 --- a/pkg/sql/pgwire/server.go +++ b/pkg/sql/pgwire/server.go @@ -779,7 +779,6 @@ func (s *Server) ServeConn( connType: preServeStatus.ConnType, connDetails: connDetails, insecure: s.cfg.Insecure, - ie: s.execCfg.InternalExecutor, auth: hbaConf, identMap: identMap, testingAuthHook: testingAuthHook, diff --git a/pkg/sql/planhook.go b/pkg/sql/planhook.go index 0b4adc2909bd..5b43c1a5c82c 100644 --- a/pkg/sql/planhook.go +++ b/pkg/sql/planhook.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" "github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" @@ -127,6 +128,7 @@ type PlanHookState interface { Txn() *kv.Txn LookupTenantInfo(ctx context.Context, tenantSpec *tree.TenantSpec, op string) (*descpb.TenantInfo, error) GetAvailableTenantID(ctx context.Context, name roachpb.TenantName) (roachpb.TenantID, error) + InternalSQLTxn() descs.Txn } var _ jobsauth.AuthorizationAccessor = PlanHookState(nil) diff --git a/pkg/sql/planner.go b/pkg/sql/planner.go index e922b9cc65eb..04bffeddccc6 100644 --- a/pkg/sql/planner.go +++ b/pkg/sql/planner.go @@ -34,6 +34,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/evalcatalog" "github.com/cockroachdb/cockroach/pkg/sql/exprutil" "github.com/cockroachdb/cockroach/pkg/sql/idxusage" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/querycache" @@ -43,7 +44,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/persistedsqlstats" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/upgrade" "github.com/cockroachdb/cockroach/pkg/util/cancelchecker" @@ -145,7 +145,7 @@ func (evalCtx *extendedEvalContext) copy() *extendedEvalContext { // QueueJob creates a new job from record and queues it for execution after // the transaction commits. func (evalCtx *extendedEvalContext) QueueJob( - ctx context.Context, txn *kv.Txn, record jobs.Record, + ctx context.Context, txn isql.Txn, record jobs.Record, ) (*jobs.Job, error) { jobID := evalCtx.ExecCfg.JobRegistry.MakeJobID() job, err := evalCtx.ExecCfg.JobRegistry.CreateJobWithTxn( @@ -174,6 +174,14 @@ type planner struct { txn *kv.Txn + // internalSQLTxn corresponds to the object returned from InternalSQLTxn. + // It is here to avoid the need to allocate another structure. The value + // is initialized lazily. The assumption is that that method is called + // during statement execution when the planner is in a valid state. + // The internalSQLTxn may hold on to a stale txn reference and should + // never be accessed directly. Nothing explicitly resets this field. + internalSQLTxn internalTxn + // isInternalPlanner is set to true when this planner is not bound to // a SQL session. isInternalPlanner bool @@ -453,12 +461,12 @@ func internalExtendedEvalCtx( var sqlStatsController eval.SQLStatsController var schemaTelemetryController eval.SchemaTelemetryController var indexUsageStatsController eval.IndexUsageStatsController - if execCfg.InternalExecutor != nil { - if execCfg.InternalExecutor.s != nil { - indexUsageStats = execCfg.InternalExecutor.s.indexUsageStats - sqlStatsController = execCfg.InternalExecutor.s.sqlStatsController - schemaTelemetryController = execCfg.InternalExecutor.s.schemaTelemetryController - indexUsageStatsController = execCfg.InternalExecutor.s.indexUsageStatsController + if ief := execCfg.InternalDB; ief != nil { + if ief.server != nil { + indexUsageStats = ief.server.indexUsageStats + sqlStatsController = ief.server.sqlStatsController + schemaTelemetryController = ief.server.schemaTelemetryController + indexUsageStatsController = ief.server.indexUsageStatsController } else { // If the indexUsageStats is nil from the sql.Server, we create a dummy // index usage stats collector. The sql.Server in the ExecutorConfig @@ -558,6 +566,30 @@ func (p *planner) Txn() *kv.Txn { return p.txn } +func (p *planner) InternalSQLTxn() descs.Txn { + if p.txn == nil { + return nil + } + + // We lazily initialize the internalSQLTxn structure so that we don't have + // to pay to initialize this structure if the statement being executed does + // not execute internal sql statements. + if p.internalSQLTxn.txn != p.txn { + ief := p.ExecCfg().InternalDB + ie := MakeInternalExecutor(ief.server, ief.memMetrics, ief.monitor) + ie.SetSessionData(p.SessionData()) + ie.extraTxnState = &extraTxnState{ + txn: p.Txn(), + descCollection: p.Descriptors(), + jobs: p.extendedEvalCtx.Jobs, + schemaChangeJobRecords: p.extendedEvalCtx.SchemaChangeJobRecords, + schemaChangerState: p.extendedEvalCtx.SchemaChangerState, + } + p.internalSQLTxn.init(p.txn, ie) + } + return &p.internalSQLTxn +} + func (p *planner) User() username.SQLUsername { return p.SessionData().User() } @@ -691,21 +723,6 @@ func (p *planner) IsActive(ctx context.Context, key clusterversion.Key) bool { return p.execCfg.Settings.Version.IsActive(ctx, key) } -// initInternalExecutor is to initialize an internal executor with a planner. -// Note that this function should only be used when using internal executor -// to run sql statement under the planner context. -func initInternalExecutor(ctx context.Context, p *planner) sqlutil.InternalExecutor { - ie := p.ExecCfg().InternalExecutorFactory.NewInternalExecutor(p.SessionData()) - ie.(*InternalExecutor).extraTxnState = &extraTxnState{ - txn: p.Txn(), - descCollection: p.Descriptors(), - jobs: p.extendedEvalCtx.Jobs, - schemaChangeJobRecords: p.extendedEvalCtx.SchemaChangeJobRecords, - schemaChangerState: p.extendedEvalCtx.SchemaChangerState, - } - return ie -} - // QueryRowEx executes the supplied SQL statement and returns a single row, or // nil if no row is found, or an error if more that one row is returned. // @@ -718,8 +735,7 @@ func (p *planner) QueryRowEx( stmt string, qargs ...interface{}, ) (tree.Datums, error) { - ie := initInternalExecutor(ctx, p) - return ie.QueryRowEx(ctx, opName, p.Txn(), override, stmt, qargs...) + return p.InternalSQLTxn().QueryRowEx(ctx, opName, p.Txn(), override, stmt, qargs...) } // ExecEx is like Exec, but allows the caller to override some session data @@ -731,8 +747,7 @@ func (p *planner) ExecEx( stmt string, qargs ...interface{}, ) (int, error) { - ie := initInternalExecutor(ctx, p) - return ie.ExecEx(ctx, opName, p.Txn(), override, stmt, qargs...) + return p.InternalSQLTxn().ExecEx(ctx, opName, p.Txn(), override, stmt, qargs...) } // QueryIteratorEx executes the query, returning an iterator that can be used @@ -748,9 +763,7 @@ func (p *planner) QueryIteratorEx( stmt string, qargs ...interface{}, ) (eval.InternalRows, error) { - ie := initInternalExecutor(ctx, p) - rows, err := ie.QueryIteratorEx(ctx, opName, p.Txn(), override, stmt, qargs...) - return rows.(eval.InternalRows), err + return p.InternalSQLTxn().QueryIteratorEx(ctx, opName, p.Txn(), override, stmt, qargs...) } // QueryBufferedEx executes the supplied SQL statement and returns the resulting @@ -764,8 +777,7 @@ func (p *planner) QueryBufferedEx( stmt string, qargs ...interface{}, ) ([]tree.Datums, error) { - ie := initInternalExecutor(ctx, p) - return ie.QueryBufferedEx(ctx, opName, p.Txn(), session, stmt, qargs...) + return p.InternalSQLTxn().QueryBufferedEx(ctx, opName, p.Txn(), session, stmt, qargs...) } // QueryRowExWithCols is like QueryRowEx, additionally returning the computed @@ -777,8 +789,7 @@ func (p *planner) QueryRowExWithCols( stmt string, qargs ...interface{}, ) (tree.Datums, colinfo.ResultColumns, error) { - ie := initInternalExecutor(ctx, p) - return ie.QueryRowExWithCols(ctx, opName, p.Txn(), session, stmt, qargs...) + return p.InternalSQLTxn().QueryRowExWithCols(ctx, opName, p.Txn(), session, stmt, qargs...) } // QueryBufferedExWithCols is like QueryBufferedEx, additionally returning the @@ -790,19 +801,7 @@ func (p *planner) QueryBufferedExWithCols( stmt string, qargs ...interface{}, ) ([]tree.Datums, colinfo.ResultColumns, error) { - ie := initInternalExecutor(ctx, p) - return ie.QueryBufferedExWithCols(ctx, opName, p.Txn(), session, stmt, qargs...) -} - -// WithInternalExecutor let user run multiple sql statements within the same -// internal executor initialized under a planner context. To run single sql -// statements, please use the query functions above. -func (p *planner) WithInternalExecutor( - ctx context.Context, - run func(ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor) error, -) error { - ie := initInternalExecutor(ctx, p) - return run(ctx, p.Txn(), ie) + return p.InternalSQLTxn().QueryBufferedExWithCols(ctx, opName, p.Txn(), session, stmt, qargs...) } func (p *planner) resetPlanner( @@ -841,10 +840,10 @@ func (p *planner) resetPlanner( func (p *planner) GetReplicationStreamManager( ctx context.Context, ) (eval.ReplicationStreamManager, error) { - return repstream.GetReplicationStreamManager(ctx, p.EvalContext(), p.Txn()) + return repstream.GetReplicationStreamManager(ctx, p.EvalContext(), p.InternalSQLTxn()) } // GetStreamIngestManager returns a StreamIngestManager. func (p *planner) GetStreamIngestManager(ctx context.Context) (eval.StreamIngestManager, error) { - return repstream.GetStreamIngestManager(ctx, p.EvalContext(), p.Txn()) + return repstream.GetStreamIngestManager(ctx, p.EvalContext(), p.InternalSQLTxn()) } diff --git a/pkg/sql/privileged_accessor.go b/pkg/sql/privileged_accessor.go index c93a5d4e16a6..113a9b847e81 100644 --- a/pkg/sql/privileged_accessor.go +++ b/pkg/sql/privileged_accessor.go @@ -34,7 +34,7 @@ func (p *planner) LookupNamespaceID( `SELECT id FROM [%d AS namespace] WHERE "parentID" = $1 AND "parentSchemaID" = $2 AND name = $3`, keys.NamespaceTableID, ) - r, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.QueryRowEx( + r, err := p.InternalSQLTxn().QueryRowEx( ctx, "crdb-internal-get-descriptor-id", p.txn, diff --git a/pkg/sql/reassign_owned_by.go b/pkg/sql/reassign_owned_by.go index 4f818515b9c2..275778e2a16a 100644 --- a/pkg/sql/reassign_owned_by.go +++ b/pkg/sql/reassign_owned_by.go @@ -55,7 +55,7 @@ func (p *planner) ReassignOwnedBy(ctx context.Context, n *tree.ReassignOwnedBy) // is a member of old roles and new roles and has CREATE privilege. // Postgres first checks if the role exists before checking privileges. for _, oldRole := range normalizedOldRoles { - roleExists, err := RoleExists(ctx, p.ExecCfg().InternalExecutor, p.Txn(), oldRole) + roleExists, err := RoleExists(ctx, p.InternalSQLTxn(), oldRole) if err != nil { return nil, err } @@ -69,7 +69,7 @@ func (p *planner) ReassignOwnedBy(ctx context.Context, n *tree.ReassignOwnedBy) if err != nil { return nil, err } - roleExists, err := RoleExists(ctx, p.ExecCfg().InternalExecutor, p.Txn(), newRole) + roleExists, err := RoleExists(ctx, p.InternalSQLTxn(), newRole) if !roleExists { return nil, sqlerrors.NewUndefinedUserError(newRole) } diff --git a/pkg/sql/region_util.go b/pkg/sql/region_util.go index 0ed233b9b4fc..e700a0996beb 100644 --- a/pkg/sql/region_util.go +++ b/pkg/sql/region_util.go @@ -75,8 +75,9 @@ func GetLiveClusterRegions(ctx context.Context, p PlanHookState) (LiveClusterReg // Non-admin users can't access the crdb_internal.kv_node_status table, which // this query hits, so we must override the user here. override := sessiondata.RootUserSessionDataOverride + override.Database = "system" - it, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.QueryIteratorEx( + it, err := p.InternalSQLTxn().QueryIteratorEx( ctx, "get_live_cluster_regions", p.Txn(), @@ -1482,20 +1483,18 @@ func SynthesizeRegionConfig( // This returns an ErrNotMultiRegionDatabase error if the database isn't // multi-region. func GetLocalityRegionEnumPhysicalRepresentation( - ctx context.Context, - internalExecutorFactory descs.TxnManager, - kvDB *kv.DB, - dbID descpb.ID, - locality roachpb.Locality, + ctx context.Context, db descs.DB, dbID descpb.ID, locality roachpb.Locality, ) ([]byte, error) { var enumReps map[catpb.RegionName][]byte var primaryRegion catpb.RegionName - if err := internalExecutorFactory.DescsTxn(ctx, kvDB, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + if err := db.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { enumReps, primaryRegion = nil, "" // reset for retry var err error - enumReps, primaryRegion, err = GetRegionEnumRepresentations(ctx, txn, dbID, descsCol) + enumReps, primaryRegion, err = GetRegionEnumRepresentations( + ctx, txn.KV(), dbID, txn.Descriptors(), + ) return err }); err != nil { return nil, err @@ -2555,7 +2554,7 @@ func (p *planner) OptimizeSystemDatabase(ctx context.Context) error { // Delete statistics for the table because the statistics materialize // the column type for `crdb_region` and the column type is changing // from bytes to an enum. - if _, err := p.ExecCfg().InternalExecutor.Exec(ctx, "delete-stats", p.txn, + if _, err := p.InternalSQLTxn().Exec(ctx, "delete-stats", p.txn, `DELETE FROM system.table_statistics WHERE "tableID" = $1;`, descriptor.GetID(), ); err != nil { diff --git a/pkg/sql/resolve_oid.go b/pkg/sql/resolve_oid.go index cdba7448764f..c6d0755423e1 100644 --- a/pkg/sql/resolve_oid.go +++ b/pkg/sql/resolve_oid.go @@ -16,11 +16,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/errors" "github.com/lib/pq/oid" @@ -30,7 +30,7 @@ import ( func (p *planner) ResolveOIDFromString( ctx context.Context, resultType *types.T, toResolve *tree.DString, ) (_ *tree.DOid, errSafeToIgnore bool, _ error) { - ie := p.ExecCfg().InternalExecutorFactory.NewInternalExecutor(p.SessionData()) + ie := p.ExecCfg().InternalDB.NewInternalExecutor(p.SessionData()) return resolveOID( ctx, p.Txn(), ie, @@ -42,7 +42,7 @@ func (p *planner) ResolveOIDFromString( func (p *planner) ResolveOIDFromOID( ctx context.Context, resultType *types.T, toResolve *tree.DOid, ) (_ *tree.DOid, errSafeToIgnore bool, _ error) { - ie := p.ExecCfg().InternalExecutorFactory.NewInternalExecutor(p.SessionData()) + ie := p.ExecCfg().InternalDB.NewInternalExecutor(p.SessionData()) return resolveOID( ctx, p.Txn(), ie, @@ -51,11 +51,7 @@ func (p *planner) ResolveOIDFromOID( } func resolveOID( - ctx context.Context, - txn *kv.Txn, - ie sqlutil.InternalExecutor, - resultType *types.T, - toResolve tree.Datum, + ctx context.Context, txn *kv.Txn, ie isql.Executor, resultType *types.T, toResolve tree.Datum, ) (_ *tree.DOid, errSafeToIgnore bool, _ error) { info, ok := regTypeInfos[resultType.Oid()] if !ok { diff --git a/pkg/sql/revoke_role.go b/pkg/sql/revoke_role.go index 20ec4bf0848a..0a6ba0a32765 100644 --- a/pkg/sql/revoke_role.go +++ b/pkg/sql/revoke_role.go @@ -129,7 +129,7 @@ func (n *RevokeRoleNode) startExec(params runParams) error { "role/user %s cannot be removed from role %s or lose the ADMIN OPTION", username.RootUser, username.AdminRole) } - affected, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.ExecEx( + affected, err := params.p.InternalSQLTxn().ExecEx( params.ctx, opName, params.p.txn, diff --git a/pkg/sql/row/BUILD.bazel b/pkg/sql/row/BUILD.bazel index 3db4e5b4b7ee..dfc6d8a5afb7 100644 --- a/pkg/sql/row/BUILD.bazel +++ b/pkg/sql/row/BUILD.bazel @@ -47,6 +47,7 @@ go_library( "//pkg/sql/catalog/fetchpb", "//pkg/sql/catalog/schemaexpr", "//pkg/sql/catalog/seqexpr", + "//pkg/sql/isql", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/rowenc", @@ -117,6 +118,7 @@ go_test( "//pkg/sql/catalog/desctestutils", "//pkg/sql/catalog/fetchpb", "//pkg/sql/catalog/tabledesc", + "//pkg/sql/isql", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/rowenc", diff --git a/pkg/sql/row/expr_walker.go b/pkg/sql/row/expr_walker.go index e07f801ec319..5c40c851ccbe 100644 --- a/pkg/sql/row/expr_walker.go +++ b/pkg/sql/row/expr_walker.go @@ -23,6 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemaexpr" "github.com/cockroachdb/cockroach/pkg/sql/catalog/seqexpr" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/builtins/builtinconstants" @@ -288,7 +289,7 @@ func importGenUUID( type SeqChunkProvider struct { JobID jobspb.JobID Registry *jobs.Registry - DB *kv.DB + DB isql.DB } // RequestChunk updates seqMetadata with information about the chunk of sequence @@ -299,9 +300,9 @@ func (j *SeqChunkProvider) RequestChunk( ctx context.Context, evalCtx *eval.Context, c *CellInfoAnnotation, seqMetadata *SequenceMetadata, ) error { var hasAllocatedChunk bool - return j.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + return j.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { var foundFromPreviouslyAllocatedChunk bool - resolveChunkFunc := func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + resolveChunkFunc := func(txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { progress := md.Progress // Check if we have already reserved a chunk corresponding to this row in a @@ -318,7 +319,7 @@ func (j *SeqChunkProvider) RequestChunk( // Reserve a new sequence value chunk at the KV level. if !hasAllocatedChunk { - if err := reserveChunkOfSeqVals(ctx, evalCtx, c, seqMetadata, j.DB); err != nil { + if err := reserveChunkOfSeqVals(ctx, evalCtx, c, seqMetadata, j.DB.KV()); err != nil { return err } hasAllocatedChunk = true @@ -356,11 +357,13 @@ func (j *SeqChunkProvider) RequestChunk( ju.UpdateProgress(progress) return nil } - const useReadLock = true - err := j.Registry.UpdateJobWithTxn(ctx, j.JobID, txn, useReadLock, resolveChunkFunc) + job, err := j.Registry.LoadJobWithTxn(ctx, j.JobID, txn) if err != nil { return err } + if err := job.WithTxn(txn).Update(ctx, resolveChunkFunc); err != nil { + return err + } // Now that the job progress has been written to, we can use the newly // allocated chunk. diff --git a/pkg/sql/row/expr_walker_test.go b/pkg/sql/row/expr_walker_test.go index 7e8fd498b468..1601ad3c2145 100644 --- a/pkg/sql/row/expr_walker_test.go +++ b/pkg/sql/row/expr_walker_test.go @@ -23,6 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -81,7 +82,7 @@ func TestJobBackedSeqChunkProvider(t *testing.T) { ctx := context.Background() - s, sqlDB, db := serverutils.StartServer(t, base.TestServerArgs{}) + s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) evalCtx := &eval.Context{ @@ -192,7 +193,7 @@ func TestJobBackedSeqChunkProvider(t *testing.T) { t.Run(test.name, func(t *testing.T) { job := createMockImportJob(ctx, t, registry, test.allocatedChunks, test.resumePos) j := &row.SeqChunkProvider{ - Registry: registry, JobID: job.ID(), DB: db, + Registry: registry, JobID: job.ID(), DB: s.InternalDB().(isql.DB), } annot := &row.CellInfoAnnotation{ SourceID: 0, @@ -201,7 +202,7 @@ func TestJobBackedSeqChunkProvider(t *testing.T) { for id, val := range test.seqIDToExpectedVal { seqDesc := createAndIncrementSeqDescriptor(ctx, t, id, keys.TODOSQLCodec, - test.incrementBy, test.seqIDToOpts[id], db) + test.incrementBy, test.seqIDToOpts[id], kvDB) seqMetadata := &row.SequenceMetadata{ SeqDesc: seqDesc, InstancesPerRow: test.instancesPerRow, diff --git a/pkg/sql/rowexec/BUILD.bazel b/pkg/sql/rowexec/BUILD.bazel index 1e4104918397..868b3ea3dd9f 100644 --- a/pkg/sql/rowexec/BUILD.bazel +++ b/pkg/sql/rowexec/BUILD.bazel @@ -67,6 +67,7 @@ go_library( "//pkg/sql/execinfrapb", "//pkg/sql/execstats", "//pkg/sql/inverted", + "//pkg/sql/isql", "//pkg/sql/memsize", "//pkg/sql/opt/invertedexpr", "//pkg/sql/opt/invertedidx", @@ -177,6 +178,7 @@ go_test( "//pkg/sql/execinfrapb", "//pkg/sql/flowinfra", "//pkg/sql/inverted", + "//pkg/sql/isql", "//pkg/sql/opt/invertedexpr", "//pkg/sql/randgen", "//pkg/sql/rowcontainer", @@ -185,7 +187,6 @@ go_test( "//pkg/sql/sem/eval", "//pkg/sql/sem/tree", "//pkg/sql/span", - "//pkg/sql/sqlutil", "//pkg/sql/stats", "//pkg/sql/types", "//pkg/storage", diff --git a/pkg/sql/rowexec/backfiller.go b/pkg/sql/rowexec/backfiller.go index ec0ec23eebbc..8b8f7eb31bcd 100644 --- a/pkg/sql/rowexec/backfiller.go +++ b/pkg/sql/rowexec/backfiller.go @@ -17,13 +17,13 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/backfill" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/rowinfra" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -194,14 +194,14 @@ func (b *backfiller) mainLoop(ctx context.Context) (roachpb.Spans, error) { func GetResumeSpans( ctx context.Context, jobsRegistry *jobs.Registry, - txn *kv.Txn, + txn isql.Txn, codec keys.SQLCodec, col *descs.Collection, tableID descpb.ID, mutationID descpb.MutationID, filter backfill.MutationFilter, ) ([]roachpb.Span, *jobs.Job, int, error) { - tableDesc, err := col.ByID(txn).Get().Table(ctx, tableID) + tableDesc, err := col.ByID(txn.KV()).Get().Table(ctx, tableID) if err != nil { return nil, nil, 0, err } @@ -268,12 +268,12 @@ func GetResumeSpans( // SetResumeSpansInJob adds a list of resume spans into a job details field. func SetResumeSpansInJob( - ctx context.Context, spans []roachpb.Span, mutationIdx int, txn *kv.Txn, job *jobs.Job, + ctx context.Context, spans []roachpb.Span, mutationIdx int, txn isql.Txn, job *jobs.Job, ) error { details, ok := job.Details().(jobspb.SchemaChangeDetails) if !ok { return errors.Errorf("expected SchemaChangeDetails job type, got %T", job.Details()) } details.ResumeSpanList[mutationIdx].ResumeSpans = spans - return job.SetDetails(ctx, txn, details) + return job.WithTxn(txn).SetDetails(ctx, details) } diff --git a/pkg/sql/rowexec/backfiller_test.go b/pkg/sql/rowexec/backfiller_test.go index bb730f67d5fe..0439f3136b48 100644 --- a/pkg/sql/rowexec/backfiller_test.go +++ b/pkg/sql/rowexec/backfiller_test.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/backfill" @@ -26,6 +25,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/rowexec" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" @@ -40,7 +40,7 @@ import ( // resume is the left over work from origSpan. func TestingWriteResumeSpan( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, codec keys.SQLCodec, col *descs.Collection, id descpb.ID, @@ -130,13 +130,14 @@ func TestWriteResumeSpan(t *testing.T) { t.Fatal(errors.Wrapf(err, "can't find job %d", jobID)) } - require.NoError(t, job.Update(ctx, nil, /* txn */ - func(_ *kv.Txn, _ jobs.JobMetadata, ju *jobs.JobUpdater) error { - ju.UpdateStatus(jobs.StatusRunning) - return nil - })) + require.NoError(t, job.NoTxn().Update(ctx, func( + _ isql.Txn, _ jobs.JobMetadata, ju *jobs.JobUpdater, + ) error { + ju.UpdateStatus(jobs.StatusRunning) + return nil + })) - err = job.SetDetails(ctx, nil /* txn */, details) + err = job.NoTxn().SetDetails(ctx, details) if err != nil { t.Fatal(err) } @@ -175,7 +176,7 @@ func TestWriteResumeSpan(t *testing.T) { if test.resume.Key != nil { finished.EndKey = test.resume.Key } - if err := sql.TestingDescsTxn(ctx, server, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { + if err := sql.TestingDescsTxn(ctx, server, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { return TestingWriteResumeSpan( ctx, txn, @@ -215,7 +216,7 @@ func TestWriteResumeSpan(t *testing.T) { } var got []roachpb.Span - if err := sql.TestingDescsTxn(ctx, server, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) (err error) { + if err := sql.TestingDescsTxn(ctx, server, func(ctx context.Context, txn isql.Txn, col *descs.Collection) (err error) { got, _, _, err = rowexec.GetResumeSpans( ctx, registry, txn, keys.SystemSQLCodec, col, tableDesc.ID, mutationID, backfill.IndexMutationFilter) return err diff --git a/pkg/sql/rowexec/bulk_row_writer.go b/pkg/sql/rowexec/bulk_row_writer.go index 353ac0ee7198..eacd8194be20 100644 --- a/pkg/sql/rowexec/bulk_row_writer.go +++ b/pkg/sql/rowexec/bulk_row_writer.go @@ -105,7 +105,7 @@ func (sp *bulkRowWriter) work(ctx context.Context) error { semaCtx := tree.MakeSemaContext() conv, err := row.NewDatumRowConverter( ctx, &semaCtx, sp.tableDesc, nil /* targetColNames */, sp.EvalCtx, kvCh, nil, - /* seqChunkProvider */ sp.flowCtx.GetRowMetrics(), sp.flowCtx.Cfg.DB, + /* seqChunkProvider */ sp.flowCtx.GetRowMetrics(), sp.flowCtx.Cfg.DB.KV(), ) if err != nil { return err @@ -137,7 +137,7 @@ func (sp *bulkRowWriter) ingestLoop(ctx context.Context, kvCh chan row.KVBatch) writeTS := sp.spec.Table.CreateAsOfTime const bufferSize = 64 << 20 adder, err := sp.flowCtx.Cfg.BulkAdder( - ctx, sp.flowCtx.Cfg.DB, writeTS, kvserverbase.BulkAdderOptions{ + ctx, sp.flowCtx.Cfg.DB.KV(), writeTS, kvserverbase.BulkAdderOptions{ Name: sp.tableDesc.GetName(), MinBufferSize: bufferSize, // We disallow shadowing here to ensure that we report errors when builds diff --git a/pkg/sql/rowexec/columnbackfiller.go b/pkg/sql/rowexec/columnbackfiller.go index b859d117fa97..4ff7ad6c673b 100644 --- a/pkg/sql/rowexec/columnbackfiller.go +++ b/pkg/sql/rowexec/columnbackfiller.go @@ -13,13 +13,13 @@ package rowexec import ( "context" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/backfill" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/rowinfra" "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -111,9 +111,8 @@ func (cb *columnBackfiller) runChunk( ) (roachpb.Key, error) { var key roachpb.Key var commitWaitFn func(context.Context) error - err := cb.flowCtx.Cfg.DB.TxnWithAdmissionControl( - ctx, roachpb.AdmissionHeader_FROM_SQL, admissionpb.BulkNormalPri, - func(ctx context.Context, txn *kv.Txn) error { + err := cb.flowCtx.Cfg.DB.Txn( + ctx, func(ctx context.Context, txn isql.Txn) error { if cb.flowCtx.Cfg.TestingKnobs.RunBeforeBackfillChunk != nil { if err := cb.flowCtx.Cfg.TestingKnobs.RunBeforeBackfillChunk(sp); err != nil { return err @@ -126,12 +125,12 @@ func (cb *columnBackfiller) runChunk( // Defer the commit-wait operation so that we can coalesce this wait // across all batches. This dramatically reduces the total time we spend // waiting for consistency when backfilling a column on GLOBAL tables. - commitWaitFn = txn.DeferCommitWait(ctx) + commitWaitFn = txn.KV().DeferCommitWait(ctx) var err error key, err = cb.RunColumnBackfillChunk( ctx, - txn, + txn.KV(), cb.desc, sp, chunkSize, @@ -140,7 +139,7 @@ func (cb *columnBackfiller) runChunk( cb.flowCtx.TraceKV, ) return err - }) + }, isql.WithPriority(admissionpb.BulkNormalPri)) if err == nil { cb.commitWaitFns = append(cb.commitWaitFns, commitWaitFn) maxCommitWaitFns := int(backfillerMaxCommitWaitFns.Get(&cb.flowCtx.Cfg.Settings.SV)) diff --git a/pkg/sql/rowexec/indexbackfiller.go b/pkg/sql/rowexec/indexbackfiller.go index 3b28d8bb88c0..5c72927c3e42 100644 --- a/pkg/sql/rowexec/indexbackfiller.go +++ b/pkg/sql/rowexec/indexbackfiller.go @@ -14,7 +14,6 @@ import ( "context" "time" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" @@ -22,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -189,7 +189,7 @@ func (ib *indexBackfiller) ingestIndexEntries( InitialSplitsIfUnordered: int(ib.spec.InitialSplits), WriteAtBatchTimestamp: ib.spec.WriteAtBatchTimestamp, } - adder, err := ib.flowCtx.Cfg.BulkAdder(ctx, ib.flowCtx.Cfg.DB, ib.spec.WriteAsOf, opts) + adder, err := ib.flowCtx.Cfg.BulkAdder(ctx, ib.flowCtx.Cfg.DB.KV(), ib.spec.WriteAsOf, opts) if err != nil { return err } @@ -425,15 +425,15 @@ func (ib *indexBackfiller) buildIndexEntryBatch( defer traceSpan.Finish() start := timeutil.Now() var entries []rowenc.IndexEntry - if err := ib.flowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - if err := txn.SetFixedTimestamp(ctx, readAsOf); err != nil { + if err := ib.flowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + if err := txn.KV().SetFixedTimestamp(ctx, readAsOf); err != nil { return err } // TODO(knz): do KV tracing in DistSQL processors. var err error entries, key, memUsedBuildingBatch, err = ib.BuildIndexEntriesChunk( - ctx, txn, ib.desc, sp, ib.spec.ChunkSize, false, /* traceKV */ + ctx, txn.KV(), ib.desc, sp, ib.spec.ChunkSize, false, /* traceKV */ ) return err }); err != nil { diff --git a/pkg/sql/rowexec/sample_aggregator.go b/pkg/sql/rowexec/sample_aggregator.go index 1ed6af7bd622..d562f799c07c 100644 --- a/pkg/sql/rowexec/sample_aggregator.go +++ b/pkg/sql/rowexec/sample_aggregator.go @@ -17,11 +17,11 @@ import ( "github.com/axiomhq/hyperloglog" "github.com/cockroachdb/cockroach/pkg/jobs" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -233,10 +233,10 @@ func (s *sampleAggregator) mainLoop(ctx context.Context) (earlyExit bool, err er // If it changed by less than 1%, just check for cancellation (which is more // efficient). if fractionCompleted < 1.0 && fractionCompleted < lastReportedFractionCompleted+0.01 { - return job.CheckStatus(ctx, nil /* txn */) + return job.NoTxn().CheckStatus(ctx) } lastReportedFractionCompleted = fractionCompleted - return job.FractionProgressed(ctx, nil /* txn */, jobs.FractionUpdater(fractionCompleted)) + return job.NoTxn().FractionProgressed(ctx, jobs.FractionUpdater(fractionCompleted)) } var rowsProcessed uint64 @@ -435,7 +435,7 @@ func (s *sampleAggregator) writeResults(ctx context.Context) error { // internal executor instead of doing this weird thing where it uses the // internal executor to execute one statement at a time inside a db.Txn() // closure. - if err := s.FlowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := s.FlowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { for _, si := range s.sketches { var histogram *stats.HistogramData if si.spec.GenerateHistogram { @@ -516,7 +516,6 @@ func (s *sampleAggregator) writeResults(ctx context.Context) error { if si.spec.PartialPredicate == "" { if err := stats.DeleteOldStatsForColumns( ctx, - s.FlowCtx.Cfg.Executor, txn, s.tableID, columnIDs, @@ -529,7 +528,6 @@ func (s *sampleAggregator) writeResults(ctx context.Context) error { if err := stats.InsertNewStat( ctx, s.FlowCtx.Cfg.Settings, - s.FlowCtx.Cfg.Executor, txn, s.tableID, si.spec.StatName, @@ -564,13 +562,12 @@ func (s *sampleAggregator) writeResults(ctx context.Context) error { columnsUsed[i] = columnIDs } keepTime := stats.TableStatisticsRetentionPeriod.Get(&s.FlowCtx.Cfg.Settings.SV) - if err := s.FlowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := s.FlowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { // Delete old stats from columns that were not collected. This is // important to prevent single-column stats from deleted columns or // multi-column stats from deleted indexes from persisting indefinitely. return stats.DeleteOldStatsForOtherColumns( ctx, - s.FlowCtx.Cfg.Executor, txn, s.tableID, columnsUsed, diff --git a/pkg/sql/rowexec/sample_aggregator_test.go b/pkg/sql/rowexec/sample_aggregator_test.go index f86b7dc139f9..44a4a505ab73 100644 --- a/pkg/sql/rowexec/sample_aggregator_test.go +++ b/pkg/sql/rowexec/sample_aggregator_test.go @@ -19,17 +19,16 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catenumpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/randgen" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/stats" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/testutils/distsqlutils" @@ -58,7 +57,6 @@ func runSampleAggregator( t *testing.T, server serverutils.TestServerInterface, sqlDB *gosql.DB, - kvDB *kv.DB, st *cluster.Settings, evalCtx *eval.Context, memLimitBytes int64, @@ -74,8 +72,7 @@ func runSampleAggregator( Mon: evalCtx.TestingMon, Cfg: &execinfra.ServerConfig{ Settings: st, - DB: kvDB, - Executor: server.InternalExecutor().(sqlutil.InternalExecutor), + DB: server.InternalDB().(descs.DB), Gossip: gossip.MakeOptionalGossip(server.GossipI().(*gossip.Gossip)), }, } @@ -309,7 +306,7 @@ func runSampleAggregator( func TestSampleAggregator(t *testing.T) { defer leaktest.AfterTest(t)() - server, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) + server, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer server.Stopper().Stop(context.Background()) st := cluster.MakeTestingClusterSettings() @@ -457,7 +454,7 @@ func TestSampleAggregator(t *testing.T) { } { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { runSampleAggregator( - t, server, sqlDB, kvDB, st, &evalCtx, tc.memLimitBytes, tc.expectOutOfMemory, + t, server, sqlDB, st, &evalCtx, tc.memLimitBytes, tc.expectOutOfMemory, tc.childNumSamples, tc.childMinNumSamples, tc.aggNumSamples, tc.aggMinNumSamples, tc.maxBuckets, tc.expectedMaxBuckets, tc.inputRows, tc.expected, ) diff --git a/pkg/sql/rowexec/tablereader.go b/pkg/sql/rowexec/tablereader.go index 1900e934d2c4..3d331be50bb3 100644 --- a/pkg/sql/rowexec/tablereader.go +++ b/pkg/sql/rowexec/tablereader.go @@ -219,7 +219,7 @@ func (tr *tableReader) startScan(ctx context.Context) error { } else { initialTS := tr.FlowCtx.Txn.ReadTimestamp() err = tr.fetcher.StartInconsistentScan( - ctx, tr.FlowCtx.Cfg.DB, initialTS, tr.maxTimestampAge, tr.Spans, + ctx, tr.FlowCtx.Cfg.DB.KV(), initialTS, tr.maxTimestampAge, tr.Spans, bytesLimit, tr.limitHint, tr.EvalCtx.QualityOfService(), ) } diff --git a/pkg/sql/save_table.go b/pkg/sql/save_table.go index f2d091dbf666..bd5e90490f37 100644 --- a/pkg/sql/save_table.go +++ b/pkg/sql/save_table.go @@ -69,7 +69,8 @@ func (n *saveTableNode) startExec(params runParams) error { create.Defs = append(create.Defs, def) } - _, err := params.p.ExtendedEvalContext().ExecCfg.InternalExecutor.Exec( + _, err := params.p.ExtendedEvalContext().ExecCfg.InternalDB. + Executor().Exec( params.ctx, "create save table", nil, /* txn */ @@ -82,7 +83,8 @@ func (n *saveTableNode) startExec(params runParams) error { func (n *saveTableNode) issue(params runParams) error { if v := &n.run.vals; len(v.Rows) > 0 { stmt := fmt.Sprintf("INSERT INTO %s %s", n.target.String(), v.String()) - if _, err := params.p.ExtendedEvalContext().ExecCfg.InternalExecutor.Exec( + if _, err := params.p.ExtendedEvalContext().ExecCfg.InternalDB. + Executor().Exec( params.ctx, "insert into save table", nil, /* txn */ diff --git a/pkg/sql/scheduledlogging/BUILD.bazel b/pkg/sql/scheduledlogging/BUILD.bazel index 50484a27d8b5..c37f534a5d62 100644 --- a/pkg/sql/scheduledlogging/BUILD.bazel +++ b/pkg/sql/scheduledlogging/BUILD.bazel @@ -7,14 +7,13 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/sql/scheduledlogging", visibility = ["//visibility:public"], deps = [ - "//pkg/kv", "//pkg/roachpb", "//pkg/security/username", "//pkg/settings", "//pkg/settings/cluster", + "//pkg/sql/isql", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/util/log", "//pkg/util/log/eventpb", "//pkg/util/log/logpb", diff --git a/pkg/sql/scheduledlogging/captured_index_usage_stats.go b/pkg/sql/scheduledlogging/captured_index_usage_stats.go index f3e6110e0db3..b349e022a4e9 100644 --- a/pkg/sql/scheduledlogging/captured_index_usage_stats.go +++ b/pkg/sql/scheduledlogging/captured_index_usage_stats.go @@ -14,14 +14,13 @@ import ( "context" "time" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/log/eventpb" "github.com/cockroachdb/cockroach/pkg/util/log/logpb" @@ -82,9 +81,8 @@ func (*CaptureIndexUsageStatsTestingKnobs) ModuleTestingKnobs() {} // CaptureIndexUsageStatsLoggingScheduler is responsible for logging index usage stats // on a scheduled interval. type CaptureIndexUsageStatsLoggingScheduler struct { - db *kv.DB + db isql.DB st *cluster.Settings - ie sqlutil.InternalExecutor knobs *CaptureIndexUsageStatsTestingKnobs currentCaptureStartTime time.Time } @@ -120,15 +118,13 @@ func (s *CaptureIndexUsageStatsLoggingScheduler) durationUntilNextInterval() tim func Start( ctx context.Context, stopper *stop.Stopper, - db *kv.DB, + db isql.DB, cs *cluster.Settings, - ie sqlutil.InternalExecutor, knobs *CaptureIndexUsageStatsTestingKnobs, ) { scheduler := CaptureIndexUsageStatsLoggingScheduler{ db: db, st: cs, - ie: ie, knobs: knobs, } scheduler.start(ctx, stopper) @@ -139,7 +135,7 @@ func (s *CaptureIndexUsageStatsLoggingScheduler) start(ctx context.Context, stop // Start the scheduler immediately. timer := time.NewTimer(0 * time.Second) defer timer.Stop() - + ie := s.db.Executor() for { select { case <-stopper.ShouldQuiesce(): @@ -150,7 +146,7 @@ func (s *CaptureIndexUsageStatsLoggingScheduler) start(ctx context.Context, stop continue } s.currentCaptureStartTime = timeutil.Now() - err := captureIndexUsageStats(ctx, s.ie, stopper, telemetryCaptureIndexUsageStatsLoggingDelay.Get(&s.st.SV)) + err := captureIndexUsageStats(ctx, ie, stopper, telemetryCaptureIndexUsageStatsLoggingDelay.Get(&s.st.SV)) if err != nil { log.Warningf(ctx, "error capturing index usage stats: %+v", err) } @@ -170,10 +166,7 @@ func (s *CaptureIndexUsageStatsLoggingScheduler) start(ctx context.Context, stop } func captureIndexUsageStats( - ctx context.Context, - ie sqlutil.InternalExecutor, - stopper *stop.Stopper, - loggingDelay time.Duration, + ctx context.Context, ie isql.Executor, stopper *stop.Stopper, loggingDelay time.Duration, ) error { allDatabaseNames, err := getAllDatabaseNames(ctx, ie) if err != nil { @@ -305,7 +298,7 @@ func logIndexUsageStatsWithDelay( } } -func getAllDatabaseNames(ctx context.Context, ie sqlutil.InternalExecutor) (tree.NameList, error) { +func getAllDatabaseNames(ctx context.Context, ie isql.Executor) (tree.NameList, error) { var allDatabaseNames tree.NameList var ok bool var expectedNumDatums = 1 diff --git a/pkg/sql/schema_change_plan_node.go b/pkg/sql/schema_change_plan_node.go index 05b8ddc2930c..8c43850e391f 100644 --- a/pkg/sql/schema_change_plan_node.go +++ b/pkg/sql/schema_change_plan_node.go @@ -18,12 +18,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/descmetadata" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scbuild" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scdeps" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scerrors" @@ -67,8 +67,7 @@ func (p *planner) SchemaChange(ctx context.Context, stmt tree.Statement) (planNo deps := scdeps.NewBuilderDependencies( p.ExecCfg().NodeInfo.LogicalClusterID(), p.ExecCfg().Codec, - p.Txn(), - p.Descriptors(), + p.InternalSQLTxn(), NewSkippingCacheSchemaResolver, /* schemaResolverFactory */ p, /* authAccessor */ p, /* astFormatter */ @@ -76,7 +75,6 @@ func (p *planner) SchemaChange(ctx context.Context, stmt tree.Statement) (planNo p.SessionData(), p.ExecCfg().Settings, scs.stmts, - p.execCfg.InternalExecutor, p, ) state, err := scbuild.Build(ctx, deps, scs.state, stmt) @@ -130,14 +128,15 @@ func (p *planner) waitForDescriptorIDGeneratorMigration(ctx context.Context) err timeutil.Since(start), ) } - if err := p.ExecCfg().InternalExecutorFactory.DescsTxn(ctx, p.ExecCfg().DB, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + if err := p.ExecCfg().InternalDB.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { - if err := txn.SetFixedTimestamp(ctx, now); err != nil { + kvTxn := txn.KV() + if err := kvTxn.SetFixedTimestamp(ctx, now); err != nil { return err } k := catalogkeys.MakeDescMetadataKey(p.ExecCfg().Codec, keys.DescIDSequenceID) - result, err := txn.Get(ctx, k) + result, err := txn.KV().Get(ctx, k) if err != nil { return err } @@ -191,13 +190,13 @@ func (p *planner) waitForDescriptorSchemaChanges( ) } blocked := false - if err := p.ExecCfg().InternalExecutorFactory.DescsTxn(ctx, p.ExecCfg().DB, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + if err := p.ExecCfg().InternalDB.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { - if err := txn.SetFixedTimestamp(ctx, now); err != nil { + if err := txn.KV().SetFixedTimestamp(ctx, now); err != nil { return err } - desc, err := descriptors.ByID(txn).WithoutNonPublic().Get().Desc(ctx, descID) + desc, err := txn.Descriptors().ByID(txn.KV()).WithoutNonPublic().Get().Desc(ctx, descID) if err != nil { return err } @@ -246,8 +245,7 @@ func (s *schemaChangePlanNode) startExec(params runParams) error { deps := scdeps.NewBuilderDependencies( p.ExecCfg().NodeInfo.LogicalClusterID(), p.ExecCfg().Codec, - p.Txn(), - p.Descriptors(), + p.InternalSQLTxn(), NewSkippingCacheSchemaResolver, p, p, @@ -255,7 +253,6 @@ func (s *schemaChangePlanNode) startExec(params runParams) error { p.SessionData(), p.ExecCfg().Settings, scs.stmts, - p.ExecCfg().InternalExecutor, p, ) state, err := scbuild.Build(params.ctx, deps, scs.state, s.stmt) @@ -271,7 +268,7 @@ func (s *schemaChangePlanNode) startExec(params runParams) error { p.SessionData(), p.User(), p.ExecCfg(), - p.Txn(), + p.InternalSQLTxn(), p.Descriptors(), p.EvalContext(), p.ExtendedEvalContext().Tracing.KVTracingEnabled(), @@ -294,7 +291,7 @@ func newSchemaChangerTxnRunDependencies( sessionData *sessiondata.SessionData, user username.SQLUsername, execCfg *ExecutorConfig, - txn *kv.Txn, + txn isql.Txn, descriptors *descs.Collection, evalContext *eval.Context, kvTrace bool, @@ -303,10 +300,9 @@ func newSchemaChangerTxnRunDependencies( ) scexec.Dependencies { metaDataUpdater := descmetadata.NewMetadataUpdater( ctx, - execCfg.InternalExecutorFactory, + txn, descriptors, &execCfg.Settings.SV, - txn, sessionData, ) return scdeps.NewExecutorDependencies( diff --git a/pkg/sql/schema_changer.go b/pkg/sql/schema_changer.go index 71f4d754821f..7e974eb84cee 100644 --- a/pkg/sql/schema_changer.go +++ b/pkg/sql/schema_changer.go @@ -43,6 +43,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/faketreeeval" "github.com/cockroachdb/cockroach/pkg/sql/flowinfra" "github.com/cockroachdb/cockroach/pkg/sql/gcjob/gcjobnotifier" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -53,7 +54,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/grpcutil" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -112,8 +112,8 @@ type SchemaChanger struct { droppedDatabaseID descpb.ID droppedSchemaIDs catalog.DescriptorIDSet sqlInstanceID base.SQLInstanceID - db *kv.DB leaseMgr *lease.Manager + db isql.DB metrics *SchemaChangerMetrics @@ -129,7 +129,6 @@ type SchemaChanger struct { clock *hlc.Clock settings *cluster.Settings execCfg *ExecutorConfig - ieFactory sqlutil.InternalExecutorFactory } // NewSchemaChangerForTesting only for tests. @@ -137,26 +136,23 @@ func NewSchemaChangerForTesting( tableID descpb.ID, mutationID descpb.MutationID, sqlInstanceID base.SQLInstanceID, - db *kv.DB, + db isql.DB, leaseMgr *lease.Manager, jobRegistry *jobs.Registry, execCfg *ExecutorConfig, settings *cluster.Settings, ) SchemaChanger { return SchemaChanger{ - descID: tableID, - mutationID: mutationID, - sqlInstanceID: sqlInstanceID, - db: db, - leaseMgr: leaseMgr, - jobRegistry: jobRegistry, - settings: settings, - execCfg: execCfg, - // Note that this doesn't end up actually being session-bound but that's - // good enough for testing. - ieFactory: execCfg.InternalExecutorFactory, + descID: tableID, + mutationID: mutationID, + sqlInstanceID: sqlInstanceID, + db: db, + leaseMgr: leaseMgr, + jobRegistry: jobRegistry, + settings: settings, + execCfg: execCfg, metrics: NewSchemaChangerMetrics(), - clock: db.Clock(), + clock: db.KV().Clock(), distSQLPlanner: execCfg.DistSQLPlanner, testingKnobs: &SchemaChangerTestingKnobs{}, } @@ -277,8 +273,8 @@ func (sc *SchemaChanger) backfillQueryIntoTable( } } - return sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - if err := txn.SetFixedTimestamp(ctx, ts); err != nil { + return sc.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + if err := txn.KV().SetFixedTimestamp(ctx, ts); err != nil { return err } @@ -286,7 +282,7 @@ func (sc *SchemaChanger) backfillQueryIntoTable( // would have committed by this point. p, cleanup := NewInternalPlanner( desc, - txn, + txn.KV(), username.RootUserName(), &MemoryMetrics{}, sc.execCfg, @@ -329,7 +325,7 @@ func (sc *SchemaChanger) backfillQueryIntoTable( rw, tree.Rows, sc.execCfg.RangeDescriptorCache, - txn, + txn.KV(), sc.clock, // Make a session tracing object on-the-fly. This is OK // because it sets "enabled: false" and thus none of the @@ -369,7 +365,7 @@ func (sc *SchemaChanger) backfillQueryIntoTable( }} PlanAndRunCTAS(ctx, sc.distSQLPlanner, localPlanner, - txn, isLocal, localPlanner.curPlan.main, out, recv) + txn.KV(), isLocal, localPlanner.curPlan.main, out, recv) if planAndRunErr = rw.Err(); planAndRunErr != nil { return } @@ -403,7 +399,7 @@ func (sc *SchemaChanger) maybeUpdateScheduledJobsForRowLevelTTL( ) error { // Drop the scheduled job if one exists and the table descriptor is being dropped. if tableDesc.Dropped() && tableDesc.HasRowLevelTTL() { - if err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := sc.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { scheduleID := tableDesc.GetRowLevelTTL().ScheduleID if scheduleID > 0 { log.Infof(ctx, "dropping TTL schedule %d", scheduleID) @@ -445,8 +441,8 @@ func (sc *SchemaChanger) maybeMakeAddTablePublic( } log.Info(ctx, "making table public") - return sc.txn(ctx, func(ctx context.Context, txn *kv.Txn, descsCol *descs.Collection) error { - mut, err := descsCol.MutableByID(txn).Table(ctx, table.GetID()) + return sc.txn(ctx, func(ctx context.Context, txn isql.Txn, descsCol *descs.Collection) error { + mut, err := descsCol.MutableByID(txn.KV()).Table(ctx, table.GetID()) if err != nil { return err } @@ -454,7 +450,7 @@ func (sc *SchemaChanger) maybeMakeAddTablePublic( return nil } mut.State = descpb.DescriptorState_PUBLIC - return descsCol.WriteDesc(ctx, true /* kvTrace */, mut, txn) + return descsCol.WriteDesc(ctx, true /* kvTrace */, mut, txn.KV()) }) } @@ -486,8 +482,8 @@ func (sc *SchemaChanger) ignoreRevertedDropIndex( if !table.IsPhysicalTable() { return nil } - return sc.txn(ctx, func(ctx context.Context, txn *kv.Txn, descsCol *descs.Collection) error { - mut, err := descsCol.MutableByID(txn).Table(ctx, table.GetID()) + return sc.txn(ctx, func(ctx context.Context, txn isql.Txn, descsCol *descs.Collection) error { + mut, err := descsCol.MutableByID(txn.KV()).Table(ctx, table.GetID()) if err != nil { return err } @@ -505,7 +501,7 @@ func (sc *SchemaChanger) ignoreRevertedDropIndex( mutationsModified = true } if mutationsModified { - return descsCol.WriteDesc(ctx, true /* kvTrace */, mut, txn) + return descsCol.WriteDesc(ctx, true /* kvTrace */, mut, txn.KV()) } return nil }) @@ -513,7 +509,7 @@ func (sc *SchemaChanger) ignoreRevertedDropIndex( func startGCJob( ctx context.Context, - db *kv.DB, + db isql.DB, jobRegistry *jobs.Registry, userName username.SQLUsername, schemaChangeDescription string, @@ -524,7 +520,7 @@ func startGCJob( schemaChangeDescription, userName, details, useLegacyGCJob, ) jobID := jobRegistry.MakeJobID() - if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { _, err := jobRegistry.CreateJobWithTxn(ctx, jobRecord, jobID, txn) return err }); err != nil { @@ -579,9 +575,9 @@ func (sc *SchemaChanger) getTargetDescriptor(ctx context.Context) (catalog.Descr // Retrieve the descriptor that is being changed. var desc catalog.Descriptor if err := sc.txn(ctx, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + ctx context.Context, txn isql.Txn, descriptors *descs.Collection, ) (err error) { - desc, err = descriptors.ByID(txn).Get().Desc(ctx, sc.descID) + desc, err = descriptors.ByID(txn.KV()).Get().Desc(ctx, sc.descID) return err }); err != nil { return nil, err @@ -884,8 +880,8 @@ func (sc *SchemaChanger) handlePermanentSchemaChangeError( // initialize the job running status. func (sc *SchemaChanger) initJobRunningStatus(ctx context.Context) error { - return sc.txn(ctx, func(ctx context.Context, txn *kv.Txn, descriptors *descs.Collection) error { - desc, err := descriptors.ByID(txn).WithoutNonPublic().Get().Table(ctx, sc.descID) + return sc.txn(ctx, func(ctx context.Context, txn isql.Txn, descriptors *descs.Collection) error { + desc, err := descriptors.ByID(txn.KV()).WithoutNonPublic().Get().Table(ctx, sc.descID) if err != nil { return err } @@ -905,8 +901,8 @@ func (sc *SchemaChanger) initJobRunningStatus(ctx context.Context) error { } } if runStatus != "" && !desc.Dropped() { - if err := sc.job.RunningStatus( - ctx, txn, func(ctx context.Context, details jobspb.Details) (jobs.RunningStatus, error) { + if err := sc.job.WithTxn(txn).RunningStatus( + ctx, func(ctx context.Context, details jobspb.Details) (jobs.RunningStatus, error) { return runStatus, nil }); err != nil { return errors.Wrapf(err, "failed to update job status") @@ -1027,8 +1023,8 @@ func (sc *SchemaChanger) rollbackSchemaChange(ctx context.Context, err error) er // table was in the ADD state and the schema change failed, then we need to // clean up the descriptor. gcJobID := sc.jobRegistry.MakeJobID() - if err := sc.txn(ctx, func(ctx context.Context, txn *kv.Txn, descsCol *descs.Collection) error { - scTable, err := descsCol.MutableByID(txn).Table(ctx, sc.descID) + if err := sc.txn(ctx, func(ctx context.Context, txn isql.Txn, descsCol *descs.Collection) error { + scTable, err := descsCol.MutableByID(txn.KV()).Table(ctx, sc.descID) if err != nil { return err } @@ -1036,10 +1032,10 @@ func (sc *SchemaChanger) rollbackSchemaChange(ctx context.Context, err error) er return nil } - b := txn.NewBatch() + b := txn.KV().NewBatch() // For views, we need to clean up and references that exist to tables. if scTable.IsView() { - if err := sc.dropViewDeps(ctx, descsCol, txn, b, scTable); err != nil { + if err := sc.dropViewDeps(ctx, descsCol, txn.KV(), b, scTable); err != nil { return err } } @@ -1070,7 +1066,7 @@ func (sc *SchemaChanger) rollbackSchemaChange(ctx context.Context, err error) er if _, err := sc.jobRegistry.CreateJobWithTxn(ctx, jobRecord, gcJobID, txn); err != nil { return err } - return txn.Run(ctx, b) + return txn.KV().Run(ctx, b) }); err != nil { return err } @@ -1087,13 +1083,13 @@ func (sc *SchemaChanger) RunStateMachineBeforeBackfill(ctx context.Context) erro var runStatus jobs.RunningStatus if err := sc.txn(ctx, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) error { - tbl, err := descsCol.MutableByID(txn).Table(ctx, sc.descID) + tbl, err := descsCol.MutableByID(txn.KV()).Table(ctx, sc.descID) if err != nil { return err } - dbDesc, err := descsCol.ByID(txn).WithoutNonPublic().Get().Database(ctx, tbl.GetParentID()) + dbDesc, err := descsCol.ByID(txn.KV()).WithoutNonPublic().Get().Database(ctx, tbl.GetParentID()) if err != nil { return err } @@ -1145,12 +1141,12 @@ func (sc *SchemaChanger) RunStateMachineBeforeBackfill(ctx context.Context) erro return nil } if err := descsCol.WriteDesc( - ctx, true /* kvTrace */, tbl, txn, + ctx, true /* kvTrace */, tbl, txn.KV(), ); err != nil { return err } if sc.job != nil { - if err := sc.job.RunningStatus(ctx, txn, func( + if err := sc.job.WithTxn(txn).RunningStatus(ctx, func( ctx context.Context, details jobspb.Details, ) (jobs.RunningStatus, error) { return runStatus, nil @@ -1193,9 +1189,9 @@ func (sc *SchemaChanger) stepStateMachineAfterIndexBackfill(ctx context.Context) var runStatus jobs.RunningStatus if err := sc.txn(ctx, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) error { - tbl, err := descsCol.MutableByID(txn).Table(ctx, sc.descID) + tbl, err := descsCol.MutableByID(txn.KV()).Table(ctx, sc.descID) if err != nil { return err } @@ -1226,12 +1222,12 @@ func (sc *SchemaChanger) stepStateMachineAfterIndexBackfill(ctx context.Context) return nil } if err := descsCol.WriteDesc( - ctx, true /* kvTrace */, tbl, txn, + ctx, true /* kvTrace */, tbl, txn.KV(), ); err != nil { return err } if sc.job != nil { - if err := sc.job.RunningStatus(ctx, txn, func( + if err := sc.job.WithTxn(txn).RunningStatus(ctx, func( ctx context.Context, details jobspb.Details, ) (jobs.RunningStatus, error) { return runStatus, nil @@ -1247,21 +1243,21 @@ func (sc *SchemaChanger) stepStateMachineAfterIndexBackfill(ctx context.Context) } func (sc *SchemaChanger) createTemporaryIndexGCJob( - ctx context.Context, indexID descpb.IndexID, txn *kv.Txn, jobDesc string, + ctx context.Context, indexID descpb.IndexID, txn isql.Txn, jobDesc string, ) error { minimumDropTime := int64(1) return sc.createIndexGCJobWithDropTime(ctx, indexID, txn, jobDesc, minimumDropTime) } func (sc *SchemaChanger) createIndexGCJob( - ctx context.Context, indexID descpb.IndexID, txn *kv.Txn, jobDesc string, + ctx context.Context, indexID descpb.IndexID, txn isql.Txn, jobDesc string, ) error { dropTime := timeutil.Now().UnixNano() return sc.createIndexGCJobWithDropTime(ctx, indexID, txn, jobDesc, dropTime) } func (sc *SchemaChanger) createIndexGCJobWithDropTime( - ctx context.Context, indexID descpb.IndexID, txn *kv.Txn, jobDesc string, dropTime int64, + ctx context.Context, indexID descpb.IndexID, txn isql.Txn, jobDesc string, dropTime int64, ) error { indexGCDetails := jobspb.SchemaChangeGCDetails{ Indexes: []jobspb.SchemaChangeGCDetails_DroppedIndex{ @@ -1334,25 +1330,25 @@ func (sc *SchemaChanger) done(ctx context.Context) error { var didUpdate bool var depMutationJobs []jobspb.JobID var otherJobIDs []jobspb.JobID - err := sc.execCfg.InternalExecutorFactory.DescsTxn(ctx, sc.db, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + err := sc.txn(ctx, func( + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) error { depMutationJobs = depMutationJobs[:0] otherJobIDs = otherJobIDs[:0] var err error - scTable, err := descsCol.MutableByID(txn).Table(ctx, sc.descID) + scTable, err := descsCol.MutableByID(txn.KV()).Table(ctx, sc.descID) if err != nil { return err } - dbDesc, err := descsCol.ByID(txn).WithoutNonPublic().Get().Database(ctx, scTable.GetParentID()) + dbDesc, err := descsCol.ByID(txn.KV()).WithoutNonPublic().Get().Database(ctx, scTable.GetParentID()) if err != nil { return err } collectReferencedTypeIDs := func() (catalog.DescriptorIDSet, error) { typeLookupFn := func(id descpb.ID) (catalog.TypeDescriptor, error) { - desc, err := descsCol.ByIDWithLeased(txn).WithoutNonPublic().Get().Type(ctx, id) + desc, err := descsCol.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Type(ctx, id) if err != nil { return nil, err } @@ -1378,7 +1374,7 @@ func (sc *SchemaChanger) done(ctx context.Context) error { if err != nil { return err } - b := txn.NewBatch() + b := txn.KV().NewBatch() const kvTrace = true var i int // set to determine whether there is a mutation @@ -1408,7 +1404,7 @@ func (sc *SchemaChanger) done(ctx context.Context) error { if fk := m.AsForeignKey(); fk != nil && fk.Adding() && fk.GetConstraintValidity() == descpb.ConstraintValidity_Unvalidated { // Add backreference on the referenced table (which could be the same table) - backrefTable, err := descsCol.MutableByID(txn).Table(ctx, fk.GetReferencedTableID()) + backrefTable, err := descsCol.MutableByID(txn.KV()).Table(ctx, fk.GetReferencedTableID()) if err != nil { return err } @@ -1501,6 +1497,7 @@ func (sc *SchemaChanger) done(ctx context.Context) error { // If we are modifying TTL, then make sure the schedules are created // or dropped as appropriate. + scheduledJobs := jobs.ScheduledJobTxn(txn) if modify := m.AsModifyRowLevelTTL(); modify != nil && !modify.IsRollback() { if fn := sc.testingKnobs.RunBeforeModifyRowLevelTTL; fn != nil { if err := fn(); err != nil { @@ -1512,12 +1509,10 @@ func (sc *SchemaChanger) done(ctx context.Context) error { shouldCreateScheduledJob := scTable.RowLevelTTL.ScheduleID == 0 // Double check the job exists - if it does not, we need to recreate it. if scTable.RowLevelTTL.ScheduleID != 0 { - _, err := jobs.LoadScheduledJob( + _, err := scheduledJobs.Load( ctx, - JobSchedulerEnv(sc.execCfg), + JobSchedulerEnv(sc.execCfg.JobsKnobs()), scTable.RowLevelTTL.ScheduleID, - sc.execCfg.InternalExecutor, - txn, ) if err != nil { if !jobs.HasScheduledJobNotFoundError(err) { @@ -1530,8 +1525,8 @@ func (sc *SchemaChanger) done(ctx context.Context) error { if shouldCreateScheduledJob { j, err := CreateRowLevelTTLScheduledJob( ctx, - sc.execCfg, - txn, + sc.execCfg.JobsKnobs(), + scheduledJobs, scTable.GetPrivileges().Owner(), scTable.GetID(), modify.RowLevelTTL(), @@ -1543,7 +1538,10 @@ func (sc *SchemaChanger) done(ctx context.Context) error { } } else if m.Dropped() { if scTable.HasRowLevelTTL() { - if err := DeleteSchedule(ctx, sc.execCfg, txn, scTable.GetRowLevelTTL().ScheduleID); err != nil { + if err := scheduledJobs.DeleteByID( + ctx, JobSchedulerEnv(sc.execCfg.JobsKnobs()), + scTable.GetRowLevelTTL().ScheduleID, + ); err != nil { return err } } @@ -1585,7 +1583,8 @@ func (sc *SchemaChanger) done(ctx context.Context) error { } if err := setNewLocalityConfig( - ctx, scTable, txn, b, localityConfigToSwapTo, kvTrace, descsCol); err != nil { + ctx, txn.KV(), descsCol, b, scTable, localityConfigToSwapTo, kvTrace, + ); err != nil { return err } switch localityConfigToSwapTo.Locality.(type) { @@ -1604,7 +1603,7 @@ func (sc *SchemaChanger) done(ctx context.Context) error { // If we performed MakeMutationComplete on a PrimaryKeySwap mutation, then we need to start // a job for the index deletion mutations that the primary key swap mutation added, if any. - jobID, err := sc.queueCleanupJob(ctx, scTable, txn) + jobID, err := sc.queueCleanupJob(ctx, txn, scTable) if err != nil { return err } @@ -1621,7 +1620,7 @@ func (sc *SchemaChanger) done(ctx context.Context) error { // If we performed MakeMutationComplete on a computed column swap, then // we need to start a job for the column deletion that the swap mutation // added if any. - jobID, err := sc.queueCleanupJob(ctx, scTable, txn) + jobID, err := sc.queueCleanupJob(ctx, txn, scTable) if err != nil { return err } @@ -1680,7 +1679,7 @@ func (sc *SchemaChanger) done(ctx context.Context) error { // Update the set of back references. for id, isAddition := range update { - typ, err := descsCol.MutableByID(txn).Type(ctx, id) + typ, err := descsCol.MutableByID(txn.KV()).Type(ctx, id) if err != nil { return err } @@ -1721,7 +1720,7 @@ func (sc *SchemaChanger) done(ctx context.Context) error { // Update the set of back references. for id, colIDSet := range update { - tbl, err := descsCol.MutableByID(txn).Table(ctx, id) + tbl, err := descsCol.MutableByID(txn.KV()).Table(ctx, id) if err != nil { return err } @@ -1762,7 +1761,7 @@ func (sc *SchemaChanger) done(ctx context.Context) error { if err := descsCol.WriteDescToBatch(ctx, kvTrace, scTable, b); err != nil { return err } - if err := txn.Run(ctx, b); err != nil { + if err := txn.KV().Run(ctx, b); err != nil { return err } @@ -1791,7 +1790,7 @@ func (sc *SchemaChanger) done(ctx context.Context) error { // If any operations was skipped because a mutation was made // redundant due to a column getting dropped later on then we should // wait for those jobs to complete before returning our result back. - if err := sc.jobRegistry.Run(ctx, sc.execCfg.InternalExecutor, depMutationJobs); err != nil { + if err := sc.jobRegistry.Run(ctx, depMutationJobs); err != nil { return errors.Wrap(err, "A dependent transaction failed for this schema change") } @@ -1803,14 +1802,14 @@ func (sc *SchemaChanger) done(ctx context.Context) error { // tenant. func maybeUpdateZoneConfigsForPKChange( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, execCfg *ExecutorConfig, kvTrace bool, descriptors *descs.Collection, table *tabledesc.Mutable, swapInfo *descpb.PrimaryKeySwap, ) error { - zoneWithRaw, err := descriptors.GetZoneConfig(ctx, txn, table.GetID()) + zoneWithRaw, err := descriptors.GetZoneConfig(ctx, txn.KV(), table.GetID()) if err != nil { return err } @@ -1850,7 +1849,9 @@ func maybeUpdateZoneConfigsForPKChange( // Write the zone back. This call regenerates the index spans that apply // to each partition in the index. _, err = writeZoneConfig( - ctx, txn, table.ID, table, zoneWithRaw.ZoneConfigProto(), zoneWithRaw.GetRawBytesInStorage(), execCfg, descriptors, false, kvTrace, + ctx, txn.KV(), table.ID, table, + zoneWithRaw.ZoneConfigProto(), zoneWithRaw.GetRawBytesInStorage(), + execCfg, descriptors, false, kvTrace, ) if err != nil && !sqlerrors.IsCCLRequiredError(err) { return err @@ -1912,8 +1913,8 @@ func (sc *SchemaChanger) maybeReverseMutations(ctx context.Context, causingError // Get the other tables whose foreign key backreferences need to be removed. alreadyReversed := false const kvTrace = true // TODO(ajwerner): figure this out - err := sc.txn(ctx, func(ctx context.Context, txn *kv.Txn, descsCol *descs.Collection) error { - scTable, err := descsCol.MutableByID(txn).Table(ctx, sc.descID) + err := sc.txn(ctx, func(ctx context.Context, txn isql.Txn, descsCol *descs.Collection) error { + scTable, err := descsCol.MutableByID(txn.KV()).Table(ctx, sc.descID) if err != nil { return err } @@ -1950,7 +1951,7 @@ func (sc *SchemaChanger) maybeReverseMutations(ctx context.Context, causingError // Keep track of the column mutations being reversed so that indexes // referencing them can be dropped. columns := make(map[string]struct{}) - b := txn.NewBatch() + b := txn.KV().NewBatch() for _, m := range scTable.AllMutations() { if m.MutationID() != sc.mutationID { break @@ -1986,7 +1987,7 @@ func (sc *SchemaChanger) maybeReverseMutations(ctx context.Context, causingError } // Get the foreign key backreferences to remove. if fk := constraint.AsForeignKey(); fk != nil { - backrefTable, err := descsCol.MutableByID(txn).Table(ctx, fk.GetReferencedTableID()) + backrefTable, err := descsCol.MutableByID(txn.KV()).Table(ctx, fk.GetReferencedTableID()) if err != nil { return err } @@ -2021,7 +2022,7 @@ func (sc *SchemaChanger) maybeReverseMutations(ctx context.Context, causingError if err := descsCol.WriteDescToBatch(ctx, kvTrace, scTable, b); err != nil { return err } - if err := txn.Run(ctx, b); err != nil { + if err := txn.KV().Run(ctx, b); err != nil { return err } @@ -2064,7 +2065,7 @@ func (sc *SchemaChanger) maybeReverseMutations(ctx context.Context, causingError // updateJobForRollback updates the schema change job in the case of a rollback. func (sc *SchemaChanger) updateJobForRollback( - ctx context.Context, txn *kv.Txn, tableDesc catalog.TableDescriptor, + ctx context.Context, txn isql.Txn, tableDesc catalog.TableDescriptor, ) error { // Initialize refresh spans to scan the entire table. span := tableDesc.PrimaryIndexSpan(sc.execCfg.Codec) @@ -2079,8 +2080,9 @@ func (sc *SchemaChanger) updateJobForRollback( } } oldDetails := sc.job.Details().(jobspb.SchemaChangeDetails) - if err := sc.job.SetDetails( - ctx, txn, jobspb.SchemaChangeDetails{ + u := sc.job.WithTxn(txn) + if err := u.SetDetails( + ctx, jobspb.SchemaChangeDetails{ DescID: sc.descID, TableMutationID: sc.mutationID, ResumeSpanList: spanList, @@ -2089,7 +2091,7 @@ func (sc *SchemaChanger) updateJobForRollback( ); err != nil { return err } - if err := sc.job.SetProgress(ctx, txn, jobspb.SchemaChangeProgress{}); err != nil { + if err := u.SetProgress(ctx, jobspb.SchemaChangeProgress{}); err != nil { return err } @@ -2434,12 +2436,14 @@ type SchemaChangerTestingKnobs struct { func (*SchemaChangerTestingKnobs) ModuleTestingKnobs() {} // txn is a convenient wrapper around descs.Txn(). +// +// TODO(ajwerner): Replace this with direct calls to DescsTxn. func (sc *SchemaChanger) txn( - ctx context.Context, f func(context.Context, *kv.Txn, *descs.Collection) error, + ctx context.Context, f func(context.Context, isql.Txn, *descs.Collection) error, ) error { return sc.txnWithExecutor(ctx, func( - ctx context.Context, txn *kv.Txn, _ *sessiondata.SessionData, - collection *descs.Collection, _ sqlutil.InternalExecutor, + ctx context.Context, txn isql.Txn, _ *sessiondata.SessionData, + collection *descs.Collection, ) error { return f(ctx, txn, collection) }) @@ -2449,8 +2453,8 @@ func (sc *SchemaChanger) txn( func (sc *SchemaChanger) txnWithExecutor( ctx context.Context, f func( - context.Context, *kv.Txn, *sessiondata.SessionData, - *descs.Collection, sqlutil.InternalExecutor, + context.Context, isql.Txn, *sessiondata.SessionData, + *descs.Collection, ) error, ) error { if fn := sc.testingKnobs.RunBeforeDescTxn; fn != nil { @@ -2458,12 +2462,10 @@ func (sc *SchemaChanger) txnWithExecutor( return err } } - sd := NewFakeSessionData(sc.execCfg.SV()) - return sc.execCfg.InternalExecutorFactory.DescsTxnWithExecutor(ctx, sc.db, sd, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, - ie sqlutil.InternalExecutor, + return sc.execCfg.InternalDB.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { - return f(ctx, txn, sd, descriptors, ie) + return f(ctx, txn, txn.SessionData(), txn.Descriptors()) }) } @@ -2578,7 +2580,7 @@ func (r schemaChangeResumer) Resume(ctx context.Context, execCtx interface{}) er droppedSchemaIDs: catalog.MakeDescriptorIDSet(droppedSchemaIDs...), droppedDatabaseID: droppedDatabaseID, sqlInstanceID: p.ExecCfg().NodeInfo.NodeID.SQLInstanceID(), - db: p.ExecCfg().DB, + db: p.ExecCfg().InternalDB, leaseMgr: p.ExecCfg().LeaseManager, testingKnobs: p.ExecCfg().SchemaChangerTestingKnobs, distSQLPlanner: p.DistSQLPlanner(), @@ -2588,7 +2590,6 @@ func (r schemaChangeResumer) Resume(ctx context.Context, execCtx interface{}) er clock: p.ExecCfg().Clock, settings: p.ExecCfg().Settings, execCfg: p.ExecCfg(), - ieFactory: r.job.GetInternalExecutorFactory(), metrics: p.ExecCfg().SchemaChangerMetrics, } opts := retry.Options{ @@ -2724,7 +2725,7 @@ func (r schemaChangeResumer) Resume(ctx context.Context, execCtx interface{}) er if err := startGCJob( ctx, - p.ExecCfg().DB, + p.ExecCfg().InternalDB, p.ExecCfg().JobRegistry, r.job.Payload().UsernameProto.Decode(), r.job.Payload().Description, @@ -2769,7 +2770,7 @@ func (r schemaChangeResumer) OnFailOrCancel( descID: details.DescID, mutationID: details.TableMutationID, sqlInstanceID: p.ExecCfg().NodeInfo.NodeID.SQLInstanceID(), - db: p.ExecCfg().DB, + db: p.ExecCfg().InternalDB, leaseMgr: p.ExecCfg().LeaseManager, testingKnobs: p.ExecCfg().SchemaChangerTestingKnobs, distSQLPlanner: p.DistSQLPlanner(), @@ -2779,7 +2780,6 @@ func (r schemaChangeResumer) OnFailOrCancel( clock: p.ExecCfg().Clock, settings: p.ExecCfg().Settings, execCfg: p.ExecCfg(), - ieFactory: r.job.GetInternalExecutorFactory(), } if r.job.Payload().FinalResumeError == nil { @@ -2855,7 +2855,7 @@ func init() { // queueCleanupJob checks if the completed schema change needs to start a // child job to clean up dropped schema elements. func (sc *SchemaChanger) queueCleanupJob( - ctx context.Context, scDesc *tabledesc.Mutable, txn *kv.Txn, + ctx context.Context, txn isql.Txn, scDesc *tabledesc.Mutable, ) (jobspb.JobID, error) { // Create jobs for dropped columns / indexes to be deleted. mutationID := scDesc.ClusterVersion().NextMutationID @@ -2902,7 +2902,7 @@ func (sc *SchemaChanger) queueCleanupJob( func (sc *SchemaChanger) applyZoneConfigChangeForMutation( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, dbDesc catalog.DatabaseDescriptor, tableDesc *tabledesc.Mutable, mutation catalog.Mutation, @@ -2970,13 +2970,13 @@ func (sc *SchemaChanger) applyZoneConfigChangeForMutation( ) } - regionConfig, err := SynthesizeRegionConfig(ctx, txn, dbDesc.GetID(), descsCol) + regionConfig, err := SynthesizeRegionConfig(ctx, txn.KV(), dbDesc.GetID(), descsCol) if err != nil { return err } if err := ApplyZoneConfigForMultiRegionTable( ctx, - txn, + txn.KV(), sc.execCfg, false, /* kvTrace */ descsCol, @@ -3004,8 +3004,8 @@ func DeleteTableDescAndZoneConfig( ) error { log.Infof(ctx, "removing table descriptor and zone config for table %d", tableDesc.GetID()) const kvTrace = false - return DescsTxn(ctx, execCfg, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - b := txn.NewBatch() + return DescsTxn(ctx, execCfg, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + b := txn.KV().NewBatch() // Delete the descriptor. if err := col.DeleteDescToBatch(ctx, kvTrace, tableDesc.GetID(), b); err != nil { return err @@ -3015,7 +3015,7 @@ func DeleteTableDescAndZoneConfig( zoneKeyPrefix := config.MakeZoneKeyPrefix(codec, tableDesc.GetID()) b.DelRange(zoneKeyPrefix, zoneKeyPrefix.PrefixEnd(), false /* returnKeys */) } - return txn.Run(ctx, b) + return txn.KV().Run(ctx, b) }) } @@ -3057,16 +3057,16 @@ func (sc *SchemaChanger) shouldSplitAndScatter( func (sc *SchemaChanger) preSplitHashShardedIndexRanges(ctx context.Context) error { if err := sc.txn(ctx, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn isql.Txn, descsCol *descs.Collection, ) error { hour := hlc.Timestamp{WallTime: timeutil.Now().Add(time.Hour).UnixNano()} - tableDesc, err := descsCol.MutableByID(txn).Table(ctx, sc.descID) + tableDesc, err := descsCol.MutableByID(txn.KV()).Table(ctx, sc.descID) if err != nil { return err } if fn := sc.testingKnobs.RunBeforeHashShardedIndexRangePreSplit; fn != nil { - if err := fn(tableDesc, sc.db, sc.execCfg.Codec); err != nil { + if err := fn(tableDesc, sc.db.KV(), sc.execCfg.Codec); err != nil { return err } } @@ -3118,7 +3118,7 @@ func (sc *SchemaChanger) preSplitHashShardedIndexRanges(ctx context.Context) err for _, shard := range splitAtShards { keyPrefix := sc.execCfg.Codec.IndexPrefix(uint32(tableDesc.GetID()), uint32(idx.GetID())) splitKey := encoding.EncodeVarintAscending(keyPrefix, shard) - if err := splitAndScatter(ctx, sc.db, splitKey, hour); err != nil { + if err := splitAndScatter(ctx, sc.db.KV(), splitKey, hour); err != nil { return err } } @@ -3127,7 +3127,7 @@ func (sc *SchemaChanger) preSplitHashShardedIndexRanges(ctx context.Context) err for _, partPrefix := range partitionKeyPrefixes { for _, shard := range splitAtShards { splitKey := encoding.EncodeVarintAscending(partPrefix, shard) - if err := splitAndScatter(ctx, sc.db, splitKey, hour); err != nil { + if err := splitAndScatter(ctx, sc.db.KV(), splitKey, hour); err != nil { return err } } @@ -3137,7 +3137,7 @@ func (sc *SchemaChanger) preSplitHashShardedIndexRanges(ctx context.Context) err } if fn := sc.testingKnobs.RunAfterHashShardedIndexRangePreSplit; fn != nil { - if err := fn(tableDesc, sc.db, sc.execCfg.Codec); err != nil { + if err := fn(tableDesc, sc.db.KV(), sc.execCfg.Codec); err != nil { return err } } diff --git a/pkg/sql/schema_changer_test.go b/pkg/sql/schema_changer_test.go index 2b55d8505a65..235c470008ba 100644 --- a/pkg/sql/schema_changer_test.go +++ b/pkg/sql/schema_changer_test.go @@ -44,6 +44,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/gcjob" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/sqltestutils" "github.com/cockroachdb/cockroach/pkg/sql/stats" @@ -98,9 +99,8 @@ func TestSchemaChangeProcess(t *testing.T) { leaseMgr := lease.NewLeaseManager( s.AmbientCtx(), execCfg.NodeInfo.NodeID, - execCfg.DB, + s.InternalDB().(isql.DB), execCfg.Clock, - execCfg.InternalExecutor, execCfg.Settings, execCfg.Codec, lease.ManagerTestingKnobs{}, @@ -121,7 +121,7 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); tableID := descpb.ID(sqlutils.QueryTableID(t, sqlDB, "t", "public", "test")) changer := sql.NewSchemaChangerForTesting( - tableID, 0, instance, kvDB, leaseMgr, jobRegistry, &execCfg, cluster.MakeTestingClusterSettings()) + tableID, 0, instance, execCfg.InternalDB, leaseMgr, jobRegistry, &execCfg, cluster.MakeTestingClusterSettings()) // Read table descriptor for version. tableDesc := desctestutils.TestingGetMutableExistingTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") @@ -148,7 +148,7 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); index.ID = tableDesc.NextIndexID tableDesc.NextIndexID++ changer = sql.NewSchemaChangerForTesting( - tableID, tableDesc.NextMutationID, instance, kvDB, leaseMgr, jobRegistry, + tableID, tableDesc.NextMutationID, instance, execCfg.InternalDB, leaseMgr, jobRegistry, &execCfg, cluster.MakeTestingClusterSettings(), ) tableDesc.TableDesc().Mutations = append(tableDesc.TableDesc().Mutations, descpb.DescriptorMutation{ @@ -1502,13 +1502,13 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); // just waiting for the lease to expire. timeoutCtx, cancel := context.WithTimeout(ctx, base.DefaultDescriptorLeaseDuration/2) defer cancel() - if err := sql.TestingDescsTxn(timeoutCtx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - tbl, err := col.MutableByID(txn).Table(ctx, tableDesc.GetID()) + if err := sql.TestingDescsTxn(timeoutCtx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + tbl, err := col.MutableByID(txn.KV()).Table(ctx, tableDesc.GetID()) if err != nil { return err } tbl.Version++ - ba := txn.NewBatch() + ba := txn.KV().NewBatch() if err := col.WriteDescToBatch(ctx, false /* kvTrace */, tbl, ba); err != nil { return err } @@ -1522,7 +1522,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); // to exist on the object passed to descs.Txn, but, we have it, and it's // effective, so, let's use it. defer col.ReleaseAll(ctx) - return txn.Run(ctx, ba) + return txn.KV().Run(ctx, ba) }); err != nil { t.Error(err) } diff --git a/pkg/sql/schemachanger/scbackup/BUILD.bazel b/pkg/sql/schemachanger/scbackup/BUILD.bazel index fa3c80e86257..577438730ab2 100644 --- a/pkg/sql/schemachanger/scbackup/BUILD.bazel +++ b/pkg/sql/schemachanger/scbackup/BUILD.bazel @@ -11,14 +11,13 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/jobs", - "//pkg/kv", "//pkg/sql/catalog", "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/nstree", + "//pkg/sql/isql", "//pkg/sql/schemachanger/scexec", "//pkg/sql/schemachanger/scpb", "//pkg/sql/schemachanger/screl", - "//pkg/sql/sqlutil", ], ) diff --git a/pkg/sql/schemachanger/scbackup/job.go b/pkg/sql/schemachanger/scbackup/job.go index 4a908a0fbfe4..e8863b6321a5 100644 --- a/pkg/sql/schemachanger/scbackup/job.go +++ b/pkg/sql/schemachanger/scbackup/job.go @@ -14,14 +14,13 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/jobs" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/nstree" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/screl" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" ) // CreateDeclarativeSchemaChangeJobs is called during the last phase of a @@ -32,11 +31,7 @@ import ( // It should only be called for backups which do not restore the jobs table // directly. func CreateDeclarativeSchemaChangeJobs( - ctx context.Context, - registry *jobs.Registry, - txn *kv.Txn, - ie sqlutil.InternalExecutor, - allMut nstree.Catalog, + ctx context.Context, registry *jobs.Registry, txn isql.Txn, allMut nstree.Catalog, ) error { byJobID := make(map[catpb.JobID][]catalog.MutableDescriptor) _ = allMut.ForEachDescriptor(func(d catalog.Descriptor) error { @@ -76,6 +71,6 @@ func CreateDeclarativeSchemaChangeJobs( runningStatus, )) } - _, err := registry.CreateJobsWithTxn(ctx, txn, ie, records) + _, err := registry.CreateJobsWithTxn(ctx, txn, records) return err } diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_primary_key.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_primary_key.go index 836846e836cc..7bbaa1b2a4c2 100644 --- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_primary_key.go +++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_primary_key.go @@ -232,12 +232,16 @@ func checkForEarlyExit(b BuildCtx, tbl *scpb.Table, t alterPrimaryKeySpec) { RequiredPrivilege: privilege.CREATE, }) - colCurrentStatus, _, colElem := scpb.FindColumn(colElems) + colCurrentStatus, colTargetStatus, colElem := scpb.FindColumn(colElems) if colElem == nil { panic(errors.AssertionFailedf("programming error: resolving column %v does not give a "+ "Column element.", col.Column)) } if colCurrentStatus == scpb.Status_DROPPED || colCurrentStatus == scpb.Status_ABSENT { + if colTargetStatus == scpb.ToPublic { + panic(pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, + "column %q is being added", col.Column)) + } panic(pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "column %q is being dropped", col.Column)) } diff --git a/pkg/sql/schemachanger/scdeps/BUILD.bazel b/pkg/sql/schemachanger/scdeps/BUILD.bazel index 26970402aade..21f382fc1521 100644 --- a/pkg/sql/schemachanger/scdeps/BUILD.bazel +++ b/pkg/sql/schemachanger/scdeps/BUILD.bazel @@ -29,6 +29,7 @@ go_library( "//pkg/sql/catalog/descs", "//pkg/sql/catalog/nstree", "//pkg/sql/catalog/resolver", + "//pkg/sql/isql", "//pkg/sql/rowenc", "//pkg/sql/schemachanger/scbuild", "//pkg/sql/schemachanger/scexec", @@ -40,7 +41,6 @@ go_library( "//pkg/sql/sessiondata", "//pkg/sql/sqlerrors", "//pkg/sql/sqltelemetry", - "//pkg/sql/sqlutil", "//pkg/sql/types", "//pkg/util/timeutil", "//pkg/util/uuid", diff --git a/pkg/sql/schemachanger/scdeps/build_deps.go b/pkg/sql/schemachanger/scdeps/build_deps.go index 54ceb8a2d0af..af2f56022eb2 100644 --- a/pkg/sql/schemachanger/scdeps/build_deps.go +++ b/pkg/sql/schemachanger/scdeps/build_deps.go @@ -31,7 +31,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" @@ -43,8 +42,7 @@ import ( func NewBuilderDependencies( clusterID uuid.UUID, codec keys.SQLCodec, - txn *kv.Txn, - descsCollection *descs.Collection, + txn descs.Txn, schemaResolverFactory scbuild.SchemaResolverFactory, authAccessor scbuild.AuthorizationAccessor, astFormatter scbuild.AstFormatter, @@ -52,23 +50,21 @@ func NewBuilderDependencies( sessionData *sessiondata.SessionData, settings *cluster.Settings, statements []string, - internalExecutor sqlutil.InternalExecutor, clientNoticeSender eval.ClientNoticeSender, ) scbuild.Dependencies { return &buildDeps{ - clusterID: clusterID, - codec: codec, - txn: txn, - descsCollection: descsCollection, - authAccessor: authAccessor, - sessionData: sessionData, - settings: settings, - statements: statements, - astFormatter: astFormatter, - featureChecker: featureChecker, - internalExecutor: internalExecutor, + clusterID: clusterID, + codec: codec, + txn: txn.KV(), + descsCollection: txn.Descriptors(), + authAccessor: authAccessor, + sessionData: sessionData, + settings: settings, + statements: statements, + astFormatter: astFormatter, + featureChecker: featureChecker, schemaResolver: schemaResolverFactory( - descsCollection, sessiondata.NewStack(sessionData), txn, authAccessor, + txn.Descriptors(), sessiondata.NewStack(sessionData), txn.KV(), authAccessor, ), clientNoticeSender: clientNoticeSender, } @@ -86,7 +82,6 @@ type buildDeps struct { statements []string astFormatter scbuild.AstFormatter featureChecker scbuild.FeatureChecker - internalExecutor sqlutil.InternalExecutor clientNoticeSender eval.ClientNoticeSender } diff --git a/pkg/sql/schemachanger/scdeps/exec_deps.go b/pkg/sql/schemachanger/scdeps/exec_deps.go index b0bef5bc8d8e..f6e5d9cbe3b8 100644 --- a/pkg/sql/schemachanger/scdeps/exec_deps.go +++ b/pkg/sql/schemachanger/scdeps/exec_deps.go @@ -28,6 +28,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec/scmutationexec" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -40,9 +41,9 @@ import ( // job registry. Outside of tests this should always be backed by *job.Registry. type JobRegistry interface { MakeJobID() jobspb.JobID - CreateJobWithTxn(ctx context.Context, record jobs.Record, jobID jobspb.JobID, txn *kv.Txn) (*jobs.Job, error) + CreateJobWithTxn(ctx context.Context, record jobs.Record, jobID jobspb.JobID, txn isql.Txn) (*jobs.Job, error) UpdateJobWithTxn( - ctx context.Context, jobID jobspb.JobID, txn *kv.Txn, useReadLock bool, updateFunc jobs.UpdateFn, + ctx context.Context, jobID jobspb.JobID, txn isql.Txn, useReadLock bool, updateFunc jobs.UpdateFn, ) error CheckPausepoint(name string) error } @@ -53,7 +54,7 @@ func NewExecutorDependencies( settings *cluster.Settings, codec keys.SQLCodec, sessionData *sessiondata.SessionData, - txn *kv.Txn, + txn isql.Txn, user username.SQLUsername, descsCollection *descs.Collection, jobRegistry JobRegistry, @@ -99,7 +100,7 @@ func NewExecutorDependencies( } type txnDeps struct { - txn *kv.Txn + txn isql.Txn codec keys.SQLCodec descsCollection *descs.Collection jobRegistry JobRegistry @@ -119,7 +120,7 @@ func (d *txnDeps) UpdateSchemaChangeJob( ) error { const useReadLock = false return d.jobRegistry.UpdateJobWithTxn(ctx, id, d.txn, useReadLock, func( - txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, + txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, ) error { return callback(md, ju.UpdateProgress, ju.UpdatePayload) }) @@ -131,12 +132,12 @@ var _ scexec.Catalog = (*txnDeps)(nil) func (d *txnDeps) MustReadImmutableDescriptors( ctx context.Context, ids ...descpb.ID, ) ([]catalog.Descriptor, error) { - return d.descsCollection.ByID(d.txn).WithoutSynthetic().Get().Descs(ctx, ids) + return d.descsCollection.ByID(d.txn.KV()).WithoutSynthetic().Get().Descs(ctx, ids) } // GetFullyQualifiedName implements the scmutationexec.CatalogReader interface func (d *txnDeps) GetFullyQualifiedName(ctx context.Context, id descpb.ID) (string, error) { - g := d.descsCollection.ByID(d.txn).WithoutSynthetic().Get() + g := d.descsCollection.ByID(d.txn.KV()).WithoutSynthetic().Get() objectDesc, err := g.Desc(ctx, id) if err != nil { return "", err @@ -175,7 +176,7 @@ func (d *txnDeps) GetFullyQualifiedName(ctx context.Context, id descpb.ID) (stri func (d *txnDeps) MustReadMutableDescriptor( ctx context.Context, id descpb.ID, ) (catalog.MutableDescriptor, error) { - return d.descsCollection.MutableByID(d.txn).Desc(ctx, id) + return d.descsCollection.MutableByID(d.txn.KV()).Desc(ctx, id) } // AddSyntheticDescriptor is part of the @@ -188,7 +189,7 @@ func (d *txnDeps) AddSyntheticDescriptor(desc catalog.Descriptor) { func (d *txnDeps) NewCatalogChangeBatcher() scexec.CatalogChangeBatcher { return &catalogChangeBatcher{ txnDeps: d, - batch: d.txn.NewBatch(), + batch: d.txn.KV().NewBatch(), } } @@ -225,10 +226,10 @@ func (b *catalogChangeBatcher) DeleteZoneConfig(ctx context.Context, id descpb.I // ValidateAndRun implements the scexec.CatalogChangeBatcher interface. func (b *catalogChangeBatcher) ValidateAndRun(ctx context.Context) error { - if err := b.descsCollection.ValidateUncommittedDescriptors(ctx, b.txn); err != nil { + if err := b.descsCollection.ValidateUncommittedDescriptors(ctx, b.txn.KV()); err != nil { return err } - if err := b.txn.Run(ctx, b.batch); err != nil { + if err := b.txn.KV().Run(ctx, b.batch); err != nil { return errors.Wrap(err, "writing descriptors") } return nil @@ -305,15 +306,15 @@ func (d *txnDeps) MaybeSplitIndexSpans( span := table.IndexSpan(d.codec, indexToBackfill.GetID()) const backfillSplitExpiration = time.Hour - expirationTime := d.txn.DB().Clock().Now().Add(backfillSplitExpiration.Nanoseconds(), 0) - return d.txn.DB().AdminSplit(ctx, span.Key, expirationTime) + expirationTime := d.txn.KV().DB().Clock().Now().Add(backfillSplitExpiration.Nanoseconds(), 0) + return d.txn.KV().DB().AdminSplit(ctx, span.Key, expirationTime) } // GetResumeSpans implements the scexec.BackfillerTracker interface. func (d *txnDeps) GetResumeSpans( ctx context.Context, tableID descpb.ID, indexID descpb.IndexID, ) ([]roachpb.Span, error) { - table, err := d.descsCollection.ByID(d.txn).WithoutNonPublic().WithoutSynthetic().Get().Table(ctx, tableID) + table, err := d.descsCollection.ByID(d.txn.KV()).WithoutNonPublic().WithoutSynthetic().Get().Table(ctx, tableID) if err != nil { return nil, err } @@ -407,10 +408,10 @@ func (d *execDeps) DescriptorMetadataUpdater(ctx context.Context) scexec.Descrip } // EventLoggerFactory constructs a new event logger with a txn. -type EventLoggerFactory = func(*kv.Txn) scexec.EventLogger +type EventLoggerFactory = func(isql.Txn) scexec.EventLogger // MetadataUpdaterFactory constructs a new metadata updater with a txn. -type MetadataUpdaterFactory = func(ctx context.Context, descriptors *descs.Collection, txn *kv.Txn) scexec.DescriptorMetadataUpdater +type MetadataUpdaterFactory = func(ctx context.Context, descriptors *descs.Collection, txn isql.Txn) scexec.DescriptorMetadataUpdater // EventLogger implements scexec.Dependencies func (d *execDeps) EventLogger() scexec.EventLogger { diff --git a/pkg/sql/schemachanger/scdeps/run_deps.go b/pkg/sql/schemachanger/scdeps/run_deps.go index a9f8dd9de176..f2011295175d 100644 --- a/pkg/sql/schemachanger/scdeps/run_deps.go +++ b/pkg/sql/schemachanger/scdeps/run_deps.go @@ -17,10 +17,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec/backfiller" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scrun" @@ -32,8 +32,7 @@ import ( // given arguments. func NewJobRunDependencies( collectionFactory *descs.CollectionFactory, - ieFactory descs.TxnManager, - db *kv.DB, + db descs.DB, backfiller scexec.Backfiller, merger scexec.Merger, rangeCounter backfiller.RangeCounter, @@ -51,40 +50,38 @@ func NewJobRunDependencies( kvTrace bool, ) scrun.JobRunDependencies { return &jobExecutionDeps{ - collectionFactory: collectionFactory, - internalExecutorFactory: ieFactory, - db: db, - backfiller: backfiller, - merger: merger, - rangeCounter: rangeCounter, - eventLoggerFactory: eventLoggerFactory, - jobRegistry: jobRegistry, - job: job, - codec: codec, - settings: settings, - testingKnobs: testingKnobs, - statements: statements, - indexValidator: indexValidator, - commentUpdaterFactory: metadataUpdaterFactory, - sessionData: sessionData, - kvTrace: kvTrace, - statsRefresher: statsRefresher, + collectionFactory: collectionFactory, + db: db, + backfiller: backfiller, + merger: merger, + rangeCounter: rangeCounter, + eventLoggerFactory: eventLoggerFactory, + jobRegistry: jobRegistry, + job: job, + codec: codec, + settings: settings, + testingKnobs: testingKnobs, + statements: statements, + indexValidator: indexValidator, + commentUpdaterFactory: metadataUpdaterFactory, + sessionData: sessionData, + kvTrace: kvTrace, + statsRefresher: statsRefresher, } } type jobExecutionDeps struct { - collectionFactory *descs.CollectionFactory - internalExecutorFactory descs.TxnManager - db *kv.DB - eventLoggerFactory func(txn *kv.Txn) scexec.EventLogger - statsRefresher scexec.StatsRefresher - backfiller scexec.Backfiller - merger scexec.Merger - commentUpdaterFactory MetadataUpdaterFactory - rangeCounter backfiller.RangeCounter - jobRegistry *jobs.Registry - job *jobs.Job - kvTrace bool + collectionFactory *descs.CollectionFactory + db descs.DB + eventLoggerFactory func(txn isql.Txn) scexec.EventLogger + statsRefresher scexec.StatsRefresher + backfiller scexec.Backfiller + merger scexec.Merger + commentUpdaterFactory MetadataUpdaterFactory + rangeCounter backfiller.RangeCounter + jobRegistry *jobs.Registry + job *jobs.Job + kvTrace bool indexValidator scexec.Validator @@ -106,15 +103,15 @@ func (d *jobExecutionDeps) ClusterSettings() *cluster.Settings { func (d *jobExecutionDeps) WithTxnInJob(ctx context.Context, fn scrun.JobTxnFunc) error { var createdJobs []jobspb.JobID var tableStatsToRefresh []descpb.ID - err := d.internalExecutorFactory.DescsTxn(ctx, d.db, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + err := d.db.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { pl := d.job.Payload() ed := &execDeps{ txnDeps: txnDeps{ txn: txn, codec: d.codec, - descsCollection: descriptors, + descsCollection: txn.Descriptors(), jobRegistry: d.jobRegistry, validator: d.indexValidator, eventLogger: d.eventLoggerFactory(txn), @@ -137,7 +134,7 @@ func (d *jobExecutionDeps) WithTxnInJob(ctx context.Context, fn scrun.JobTxnFunc statements: d.statements, user: pl.UsernameProto.Decode(), clock: NewConstantClock(timeutil.FromUnixMicros(pl.StartedMicros)), - metadataUpdater: d.commentUpdaterFactory(ctx, descriptors, txn), + metadataUpdater: d.commentUpdaterFactory(ctx, txn.Descriptors(), txn), sessionData: d.sessionData, testingKnobs: d.testingKnobs, } @@ -155,11 +152,11 @@ func (d *jobExecutionDeps) WithTxnInJob(ctx context.Context, fn scrun.JobTxnFunc d.jobRegistry.NotifyToResume(ctx, createdJobs...) } if len(tableStatsToRefresh) > 0 { - err := d.internalExecutorFactory.DescsTxn(ctx, d.db, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + err := d.db.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { for _, id := range tableStatsToRefresh { - tbl, err := descriptors.ByIDWithLeased(txn).WithoutNonPublic().Get().Table(ctx, id) + tbl, err := txn.Descriptors().ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Table(ctx, id) if err != nil { return err } diff --git a/pkg/sql/schemachanger/scdeps/sctestutils/sctestutils.go b/pkg/sql/schemachanger/scdeps/sctestutils/sctestutils.go index fea0faa8545d..345ec7fc317b 100644 --- a/pkg/sql/schemachanger/scdeps/sctestutils/sctestutils.go +++ b/pkg/sql/schemachanger/scdeps/sctestutils/sctestutils.go @@ -59,7 +59,7 @@ func WithBuilderDependenciesFromTestServer( ) defer cleanup() planner := ip.(interface { - Txn() *kv.Txn + InternalSQLTxn() descs.Txn Descriptors() *descs.Collection SessionData() *sessiondata.SessionData resolver.SchemaResolver @@ -76,8 +76,7 @@ func WithBuilderDependenciesFromTestServer( fn(scdeps.NewBuilderDependencies( execCfg.NodeInfo.LogicalClusterID(), execCfg.Codec, - planner.Txn(), - planner.Descriptors(), + planner.InternalSQLTxn(), sql.NewSkippingCacheSchemaResolver, /* schemaResolverFactory */ planner, /* authAccessor */ planner, /* astFormatter */ @@ -85,7 +84,6 @@ func WithBuilderDependenciesFromTestServer( planner.SessionData(), execCfg.Settings, nil, /* statements */ - execCfg.InternalExecutor, &faketreeeval.DummyClientNoticeSender{}, )) } diff --git a/pkg/sql/schemachanger/scdeps/validator.go b/pkg/sql/schemachanger/scdeps/validator.go index d18b281486d8..8c6009b2dc83 100644 --- a/pkg/sql/schemachanger/scdeps/validator.go +++ b/pkg/sql/schemachanger/scdeps/validator.go @@ -21,9 +21,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" ) // ValidateForwardIndexesFn callback function for validating forward indexes. @@ -72,7 +72,7 @@ type validator struct { db *kv.DB codec keys.SQLCodec settings *cluster.Settings - ieFactory sqlutil.InternalExecutorFactory + ieFactory isql.DB validateForwardIndexes ValidateForwardIndexesFn validateInvertedIndexes ValidateInvertedIndexesFn validateConstraint ValidateConstraintFn @@ -130,13 +130,13 @@ func (vd validator) ValidateConstraint( func (vd validator) makeHistoricalInternalExecTxnRunner() descs.HistoricalInternalExecTxnRunner { now := vd.db.Clock().Now() return descs.NewHistoricalInternalExecTxnRunner(now, func(ctx context.Context, fn descs.InternalExecFn) error { - return vd.ieFactory.(descs.TxnManager).DescsTxnWithExecutor(ctx, vd.db, vd.newFakeSessionData(&vd.settings.SV), func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor, + return vd.ieFactory.(descs.DB).DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { - if err := txn.SetFixedTimestamp(ctx, now); err != nil { + if err := txn.KV().SetFixedTimestamp(ctx, now); err != nil { return err } - return fn(ctx, txn, ie, descriptors) + return fn(ctx, txn, txn.Descriptors()) }) }) } @@ -147,7 +147,7 @@ func NewValidator( db *kv.DB, codec keys.SQLCodec, settings *cluster.Settings, - ieFactory sqlutil.InternalExecutorFactory, + ieFactory isql.DB, protectedTimestampProvider scexec.ProtectedTimestampManager, validateForwardIndexes ValidateForwardIndexesFn, validateInvertedIndexes ValidateInvertedIndexesFn, diff --git a/pkg/sql/schemachanger/scexec/BUILD.bazel b/pkg/sql/schemachanger/scexec/BUILD.bazel index a9e045901db1..f9d8d9e598b9 100644 --- a/pkg/sql/schemachanger/scexec/BUILD.bazel +++ b/pkg/sql/schemachanger/scexec/BUILD.bazel @@ -63,7 +63,6 @@ go_test( "//pkg/base", "//pkg/jobs", "//pkg/jobs/jobspb", - "//pkg/kv", "//pkg/security/securityassets", "//pkg/security/securitytest", "//pkg/security/username", @@ -77,6 +76,7 @@ go_test( "//pkg/sql/catalog/lease", "//pkg/sql/catalog/nstree", "//pkg/sql/catalog/tabledesc", + "//pkg/sql/isql", "//pkg/sql/parser", "//pkg/sql/schemachanger/scbuild", "//pkg/sql/schemachanger/scdeps", diff --git a/pkg/sql/schemachanger/scexec/backfiller/BUILD.bazel b/pkg/sql/schemachanger/scexec/backfiller/BUILD.bazel index 7fe1407378e6..7bac1e76cb5c 100644 --- a/pkg/sql/schemachanger/scexec/backfiller/BUILD.bazel +++ b/pkg/sql/schemachanger/scexec/backfiller/BUILD.bazel @@ -14,11 +14,11 @@ go_library( "//pkg/jobs", "//pkg/jobs/jobspb", "//pkg/keys", - "//pkg/kv", "//pkg/roachpb", "//pkg/settings/cluster", "//pkg/sql/backfill", "//pkg/sql/catalog/descpb", + "//pkg/sql/isql", "//pkg/sql/schemachanger/scexec", "//pkg/util/intsets", "//pkg/util/syncutil", diff --git a/pkg/sql/schemachanger/scexec/backfiller/tracker.go b/pkg/sql/schemachanger/scexec/backfiller/tracker.go index ec55669e4a38..1a308abc5e66 100644 --- a/pkg/sql/schemachanger/scexec/backfiller/tracker.go +++ b/pkg/sql/schemachanger/scexec/backfiller/tracker.go @@ -17,9 +17,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/syncutil" @@ -101,16 +101,16 @@ func newTrackerConfig(codec keys.SQLCodec, rc RangeCounter, job *jobs.Job) track return trackerConfig{ numRangesInSpanContainedBy: rc.NumRangesInSpanContainedBy, writeProgressFraction: func(ctx context.Context, fractionProgressed float32) error { - if err := job.FractionProgressed( - ctx, nil /* txn */, jobs.FractionUpdater(fractionProgressed), + if err := job.NoTxn().FractionProgressed( + ctx, jobs.FractionUpdater(fractionProgressed), ); err != nil { return jobs.SimplifyInvalidStatusError(err) } return nil }, writeCheckpoint: func(ctx context.Context, bps []scexec.BackfillProgress, mps []scexec.MergeProgress) error { - return job.Update(ctx, nil /* txn */, func( - txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, + return job.NoTxn().Update(ctx, func( + txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, ) error { pl := md.Payload backfillJobProgress, err := convertToJobBackfillProgress(codec, bps) diff --git a/pkg/sql/schemachanger/scexec/executor_external_test.go b/pkg/sql/schemachanger/scexec/executor_external_test.go index ba106484ce93..b60c64c41568 100644 --- a/pkg/sql/schemachanger/scexec/executor_external_test.go +++ b/pkg/sql/schemachanger/scexec/executor_external_test.go @@ -17,7 +17,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" @@ -27,6 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scbuild" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scdeps" @@ -49,16 +49,13 @@ import ( type testInfra struct { tc *testcluster.TestCluster settings *cluster.Settings - db *kv.DB lm *lease.Manager tsql *sqlutils.SQLRunner cf *descs.CollectionFactory - ief descs.TxnManager + db descs.DB } -func (ti testInfra) newExecDeps( - txn *kv.Txn, descsCollection *descs.Collection, -) scexec.Dependencies { +func (ti testInfra) newExecDeps(txn descs.Txn) scexec.Dependencies { const kvTrace = true const schemaChangerJobID = 1 return scdeps.NewExecutorDependencies( @@ -67,7 +64,7 @@ func (ti testInfra) newExecDeps( &sessiondata.SessionData{}, txn, username.RootUserName(), - descsCollection, + txn.Descriptors(), noopJobRegistry{}, noopBackfiller{}, noopMerger{}, @@ -90,21 +87,13 @@ func setupTestInfra(t testing.TB) *testInfra { return &testInfra{ tc: tc, settings: tc.Server(0).ClusterSettings(), - db: tc.Server(0).DB(), + db: tc.Server(0).ExecutorConfig().(sql.ExecutorConfig).InternalDB, lm: tc.Server(0).LeaseManager().(*lease.Manager), cf: tc.Server(0).ExecutorConfig().(sql.ExecutorConfig).CollectionFactory, - ief: tc.Server(0).ExecutorConfig().(sql.ExecutorConfig).InternalExecutorFactory, tsql: sqlutils.MakeSQLRunner(tc.ServerConn(0)), } } -func (ti *testInfra) txn( - ctx context.Context, - f func(ctx context.Context, txn *kv.Txn, descriptors *descs.Collection) error, -) error { - return ti.ief.DescsTxn(ctx, ti.db, f) -} - func TestExecutorDescriptorMutationOps(t *testing.T) { defer leaktest.AfterTest(t)() @@ -137,24 +126,24 @@ CREATE TABLE db.t ( )`) tn := tree.MakeTableNameWithSchema("db", tree.PublicSchemaName, "t") - require.NoError(t, ti.txn(ctx, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + require.NoError(t, ti.db.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) (err error) { - if _, table, err = descs.PrefixAndMutableTable(ctx, descriptors.MutableByName(txn), &tn); err != nil { + if _, table, err = descs.PrefixAndMutableTable(ctx, txn.Descriptors().MutableByName(txn.KV()), &tn); err != nil { return err } return nil })) - require.NoError(t, ti.txn(ctx, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + require.NoError(t, ti.db.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { - exDeps := ti.newExecDeps(txn, descriptors) - _, orig, err := descs.PrefixAndTable(ctx, descriptors.ByName(txn).Get(), &tn) + exDeps := ti.newExecDeps(txn) + _, orig, err := descs.PrefixAndTable(ctx, txn.Descriptors().ByName(txn.KV()).Get(), &tn) require.NoError(t, err) require.Equal(t, c.orig().TableDesc(), orig.TableDesc()) require.NoError(t, scexec.ExecuteStage(ctx, exDeps, c.ops())) - _, after, err := descs.PrefixAndTable(ctx, descriptors.ByName(txn).Get(), &tn) + _, after, err := descs.PrefixAndTable(ctx, txn.Descriptors().ByName(txn.KV()).Get(), &tn) require.NoError(t, err) require.Equal(t, c.exp().TableDesc(), after.TableDesc()) return nil @@ -249,11 +238,11 @@ func TestSchemaChanger(t *testing.T) { ti.tsql.Exec(t, `CREATE TABLE db.foo (i INT PRIMARY KEY)`) var cs scpb.CurrentState - require.NoError(t, ti.txn(ctx, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + require.NoError(t, ti.db.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) (err error) { tn := tree.MakeTableNameWithSchema("db", tree.PublicSchemaName, "foo") - _, fooTable, err := descs.PrefixAndTable(ctx, descriptors.ByNameWithLeased(txn).Get(), &tn) + _, fooTable, err := descs.PrefixAndTable(ctx, txn.Descriptors().ByNameWithLeased(txn.KV()).Get(), &tn) require.NoError(t, err) stmts := []scpb.Statement{ @@ -321,19 +310,17 @@ func TestSchemaChanger(t *testing.T) { sc := sctestutils.MakePlan(t, initial, scop.PreCommitPhase) stages := sc.StagesForCurrentPhase() for _, s := range stages { - exDeps := ti.newExecDeps(txn, descriptors) + exDeps := ti.newExecDeps(txn) require.NoError(t, sc.DecorateErrorWithPlanDetails(scexec.ExecuteStage(ctx, exDeps, s.Ops()))) cs = scpb.CurrentState{TargetState: initial.TargetState, Current: s.After} } return nil })) var after scpb.CurrentState - require.NoError(t, ti.txn(ctx, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, - ) error { + require.NoError(t, ti.db.DescsTxn(ctx, func(ctx context.Context, txn descs.Txn) error { sc := sctestutils.MakePlan(t, cs, scop.PostCommitPhase) for _, s := range sc.Stages { - exDeps := ti.newExecDeps(txn, descriptors) + exDeps := ti.newExecDeps(txn) require.NoError(t, sc.DecorateErrorWithPlanDetails(scexec.ExecuteStage(ctx, exDeps, s.Ops()))) after = scpb.CurrentState{TargetState: cs.TargetState, Current: s.After} } @@ -354,9 +341,7 @@ func TestSchemaChanger(t *testing.T) { ti.tsql.Exec(t, `CREATE TABLE db.foo (i INT PRIMARY KEY)`) var cs scpb.CurrentState - require.NoError(t, ti.txn(ctx, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, - ) (err error) { + require.NoError(t, ti.db.DescsTxn(ctx, func(ctx context.Context, txn descs.Txn) (err error) { sctestutils.WithBuilderDependenciesFromTestServer(ti.tc.Server(0), func(buildDeps scbuild.Dependencies) { parsed, err := parser.Parse("ALTER TABLE db.foo ADD COLUMN j INT") require.NoError(t, err) @@ -367,7 +352,7 @@ func TestSchemaChanger(t *testing.T) { { sc := sctestutils.MakePlan(t, initial, scop.PreCommitPhase) for _, s := range sc.StagesForCurrentPhase() { - exDeps := ti.newExecDeps(txn, descriptors) + exDeps := ti.newExecDeps(txn) require.NoError(t, sc.DecorateErrorWithPlanDetails(scexec.ExecuteStage(ctx, exDeps, s.Ops()))) cs = scpb.CurrentState{TargetState: initial.TargetState, Current: s.After} } @@ -375,12 +360,10 @@ func TestSchemaChanger(t *testing.T) { }) return nil })) - require.NoError(t, ti.txn(ctx, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, - ) error { + require.NoError(t, ti.db.DescsTxn(ctx, func(ctx context.Context, txn descs.Txn) error { sc := sctestutils.MakePlan(t, cs, scop.PostCommitPhase) for _, s := range sc.Stages { - exDeps := ti.newExecDeps(txn, descriptors) + exDeps := ti.newExecDeps(txn) require.NoError(t, sc.DecorateErrorWithPlanDetails(scexec.ExecuteStage(ctx, exDeps, s.Ops()))) } return nil @@ -396,7 +379,7 @@ func (n noopJobRegistry) CheckPausepoint(name string) error { } func (n noopJobRegistry) UpdateJobWithTxn( - ctx context.Context, jobID jobspb.JobID, txn *kv.Txn, useReadLock bool, updateFunc jobs.UpdateFn, + ctx context.Context, jobID jobspb.JobID, txn isql.Txn, useReadLock bool, updateFunc jobs.UpdateFn, ) error { return nil } @@ -408,7 +391,7 @@ func (n noopJobRegistry) MakeJobID() jobspb.JobID { } func (n noopJobRegistry) CreateJobWithTxn( - ctx context.Context, record jobs.Record, jobID jobspb.JobID, txn *kv.Txn, + ctx context.Context, record jobs.Record, jobID jobspb.JobID, txn isql.Txn, ) (*jobs.Job, error) { return &jobs.Job{}, nil } diff --git a/pkg/sql/schemachanger/scjob/BUILD.bazel b/pkg/sql/schemachanger/scjob/BUILD.bazel index 8ded157a7041..76a6c7a7c484 100644 --- a/pkg/sql/schemachanger/scjob/BUILD.bazel +++ b/pkg/sql/schemachanger/scjob/BUILD.bazel @@ -19,6 +19,7 @@ go_library( "//pkg/sql", "//pkg/sql/catalog/descs", "//pkg/sql/descmetadata", + "//pkg/sql/isql", "//pkg/sql/schemachanger/scdeps", "//pkg/sql/schemachanger/scexec", "//pkg/sql/schemachanger/scexec/backfiller", diff --git a/pkg/sql/schemachanger/scjob/job.go b/pkg/sql/schemachanger/scjob/job.go index 381b495e92e4..25a279d0250d 100644 --- a/pkg/sql/schemachanger/scjob/job.go +++ b/pkg/sql/schemachanger/scjob/job.go @@ -15,11 +15,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/descmetadata" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scdeps" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scrun" @@ -62,7 +62,7 @@ func (n *newSchemaChangeResumer) OnFailOrCancel( func (n *newSchemaChangeResumer) run(ctx context.Context, execCtxI interface{}) error { execCtx := execCtxI.(sql.JobExecContext) execCfg := execCtx.ExecCfg() - if err := n.job.Update(ctx, nil /* txn */, func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + if err := n.job.NoTxn().Update(ctx, func(txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { return nil }); err != nil { // TODO(ajwerner): Detect transient errors and classify as retriable here or @@ -77,12 +77,11 @@ func (n *newSchemaChangeResumer) run(ctx context.Context, execCtxI interface{}) payload := n.job.Payload() deps := scdeps.NewJobRunDependencies( execCfg.CollectionFactory, - execCfg.InternalExecutorFactory, - execCfg.DB, + execCfg.InternalDB, execCfg.IndexBackfiller, execCfg.IndexMerger, NewRangeCounter(execCfg.DB, execCfg.DistSQLPlanner), - func(txn *kv.Txn) scexec.EventLogger { + func(txn isql.Txn) scexec.EventLogger { return sql.NewSchemaChangerEventLogger(txn, execCfg, 0) }, execCfg.JobRegistry, @@ -90,12 +89,11 @@ func (n *newSchemaChangeResumer) run(ctx context.Context, execCtxI interface{}) execCfg.Codec, execCfg.Settings, execCfg.Validator, - func(ctx context.Context, descriptors *descs.Collection, txn *kv.Txn) scexec.DescriptorMetadataUpdater { + func(ctx context.Context, descriptors *descs.Collection, txn isql.Txn) scexec.DescriptorMetadataUpdater { return descmetadata.NewMetadataUpdater(ctx, - execCfg.InternalExecutorFactory, + txn, descriptors, &execCfg.Settings.SV, - txn, execCtx.SessionData(), ) }, diff --git a/pkg/sql/scrub_constraint.go b/pkg/sql/scrub_constraint.go index 6b162789f8a9..0095ca35ce84 100644 --- a/pkg/sql/scrub_constraint.go +++ b/pkg/sql/scrub_constraint.go @@ -97,7 +97,7 @@ func (o *sqlCheckConstraintCheckOperation) Start(params runParams) error { } } - rows, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.QueryBuffered( + rows, err := params.p.InternalSQLTxn().QueryBuffered( ctx, "check-constraint", params.p.txn, tree.AsStringWithFlags(sel, tree.FmtParsable), ) if err != nil { diff --git a/pkg/sql/scrub_fk.go b/pkg/sql/scrub_fk.go index 583fbbaf88c7..2de520f82f99 100644 --- a/pkg/sql/scrub_fk.go +++ b/pkg/sql/scrub_fk.go @@ -72,7 +72,7 @@ func (o *sqlForeignKeyCheckOperation) Start(params runParams) error { return err } - rows, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.QueryBuffered( + rows, err := params.p.InternalSQLTxn().QueryBuffered( ctx, "scrub-fk", params.p.txn, checkQuery, ) if err != nil { @@ -91,7 +91,7 @@ func (o *sqlForeignKeyCheckOperation) Start(params runParams) error { if err != nil { return err } - rows, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.QueryBuffered( + rows, err := params.p.InternalSQLTxn().QueryBuffered( ctx, "scrub-fk", params.p.txn, checkNullsQuery, ) if err != nil { diff --git a/pkg/sql/scrub_index.go b/pkg/sql/scrub_index.go index cf2175e810fd..4c9b013cd350 100644 --- a/pkg/sql/scrub_index.go +++ b/pkg/sql/scrub_index.go @@ -116,7 +116,7 @@ func (o *indexCheckOperation) Start(params runParams) error { colNames(pkColumns), colNames(otherColumns), o.tableDesc.GetID(), o.index, o.tableDesc.GetPrimaryIndexID(), ) - rows, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.QueryBuffered( + rows, err := params.p.InternalSQLTxn().QueryBuffered( ctx, "scrub-index", params.p.txn, checkQuery, ) if err != nil { diff --git a/pkg/sql/scrub_unique_constraint.go b/pkg/sql/scrub_unique_constraint.go index bb930ce4a4ba..e36128e371ac 100644 --- a/pkg/sql/scrub_unique_constraint.go +++ b/pkg/sql/scrub_unique_constraint.go @@ -142,7 +142,7 @@ ON %[4]s asOf, // 5 ) - rows, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.QueryBuffered( + rows, err := params.p.InternalSQLTxn().QueryBuffered( ctx, "scrub-unique", params.p.txn, sel, ) if err != nil { diff --git a/pkg/sql/sem/eval/deps.go b/pkg/sql/sem/eval/deps.go index 1d26202baf51..4e1b72d1de48 100644 --- a/pkg/sql/sem/eval/deps.go +++ b/pkg/sql/sem/eval/deps.go @@ -363,7 +363,7 @@ type Planner interface { // executor. It provides access to the rows from a query. // InternalRows is a copy of the one in sql/internal.go excluding the // Types function - we don't need the Types function for use cases where -// QueryIteratorEx is used from the InternalExecutor on the Planner. +// QueryIteratorEx is used from the Executor on the Planner. // Furthermore, we cannot include the Types function due to a cyclic // dependency on colinfo.ResultColumns - we cannot import colinfo in tree. type InternalRows interface { diff --git a/pkg/sql/sessiondata/internal.go b/pkg/sql/sessiondata/internal.go index c51b4d7485b5..2a83beb23aa3 100644 --- a/pkg/sql/sessiondata/internal.go +++ b/pkg/sql/sessiondata/internal.go @@ -15,7 +15,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" ) -// InternalExecutorOverride is used by the InternalExecutor interface +// InternalExecutorOverride is used by the Executor interface // to allow control over some of the session data. type InternalExecutorOverride struct { // User represents the user that the query will run under. diff --git a/pkg/sql/sessioninit/BUILD.bazel b/pkg/sql/sessioninit/BUILD.bazel index dcaff275f8a7..6a09c78a243c 100644 --- a/pkg/sql/sessioninit/BUILD.bazel +++ b/pkg/sql/sessioninit/BUILD.bazel @@ -10,7 +10,6 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/sql/sessioninit", visibility = ["//visibility:public"], deps = [ - "//pkg/kv", "//pkg/security/password", "//pkg/security/username", "//pkg/settings", diff --git a/pkg/sql/sessioninit/cache.go b/pkg/sql/sessioninit/cache.go index 62211d26fdc2..30e636e6e98c 100644 --- a/pkg/sql/sessioninit/cache.go +++ b/pkg/sql/sessioninit/cache.go @@ -15,7 +15,6 @@ import ( "fmt" "unsafe" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/password" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings" @@ -105,12 +104,11 @@ func NewCache(account mon.BoundAccount, stopper *stop.Stopper) *Cache { func (a *Cache) GetAuthInfo( ctx context.Context, settings *cluster.Settings, - db *kv.DB, - f descs.TxnManager, + db descs.DB, username username.SQLUsername, readFromSystemTables func( ctx context.Context, - f descs.TxnManager, + db descs.DB, username username.SQLUsername, makePlanner func(opName string) (interface{}, func()), settings *cluster.Settings, @@ -118,19 +116,19 @@ func (a *Cache) GetAuthInfo( makePlanner func(opName string) (interface{}, func()), ) (aInfo AuthInfo, err error) { if !CacheEnabled.Get(&settings.SV) { - return readFromSystemTables(ctx, f, username, makePlanner, settings) + return readFromSystemTables(ctx, db, username, makePlanner, settings) } var usersTableDesc catalog.TableDescriptor var roleOptionsTableDesc catalog.TableDescriptor - err = f.DescsTxn(ctx, db, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + err = db.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { - _, usersTableDesc, err = descs.PrefixAndTable(ctx, descriptors.ByNameWithLeased(txn).Get(), UsersTableName) + _, usersTableDesc, err = descs.PrefixAndTable(ctx, txn.Descriptors().ByNameWithLeased(txn.KV()).Get(), UsersTableName) if err != nil { return err } - _, roleOptionsTableDesc, err = descs.PrefixAndTable(ctx, descriptors.ByNameWithLeased(txn).Get(), RoleOptionsTableName) + _, roleOptionsTableDesc, err = descs.PrefixAndTable(ctx, txn.Descriptors().ByNameWithLeased(txn.KV()).Get(), RoleOptionsTableName) return err }) if err != nil { @@ -155,7 +153,7 @@ func (a *Cache) GetAuthInfo( val, err := a.loadValueOutsideOfCache( ctx, fmt.Sprintf("authinfo-%s-%d-%d", username.Normalized(), usersTableVersion, roleOptionsTableVersion), func(loadCtx context.Context) (interface{}, error) { - return readFromSystemTables(loadCtx, f, username, makePlanner, settings) + return readFromSystemTables(loadCtx, db, username, makePlanner, settings) }) if err != nil { return aInfo, err @@ -265,29 +263,28 @@ func (a *Cache) maybeWriteAuthInfoBackToCache( func (a *Cache) GetDefaultSettings( ctx context.Context, settings *cluster.Settings, - db *kv.DB, - f descs.TxnManager, + db descs.DB, userName username.SQLUsername, databaseName string, readFromSystemTables func( ctx context.Context, - f descs.TxnManager, + f descs.DB, userName username.SQLUsername, databaseID descpb.ID, ) ([]SettingsCacheEntry, error), ) (settingsEntries []SettingsCacheEntry, err error) { var dbRoleSettingsTableDesc catalog.TableDescriptor var databaseID descpb.ID - err = f.DescsTxn(ctx, db, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + err = db.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { - _, dbRoleSettingsTableDesc, err = descs.PrefixAndTable(ctx, descriptors.ByNameWithLeased(txn).Get(), DatabaseRoleSettingsTableName) + _, dbRoleSettingsTableDesc, err = descs.PrefixAndTable(ctx, txn.Descriptors().ByNameWithLeased(txn.KV()).Get(), DatabaseRoleSettingsTableName) if err != nil { return err } databaseID = descpb.ID(0) if databaseName != "" { - dbDesc, err := descriptors.ByNameWithLeased(txn).MaybeGet().Database(ctx, databaseName) + dbDesc, err := txn.Descriptors().ByNameWithLeased(txn.KV()).MaybeGet().Database(ctx, databaseName) if err != nil { return err } @@ -309,7 +306,7 @@ func (a *Cache) GetDefaultSettings( if !CacheEnabled.Get(&settings.SV) { settingsEntries, err = readFromSystemTables( ctx, - f, + db, userName, databaseID, ) @@ -333,7 +330,7 @@ func (a *Cache) GetDefaultSettings( val, err := a.loadValueOutsideOfCache( ctx, fmt.Sprintf("defaultsettings-%s-%d-%d", userName.Normalized(), databaseID, dbRoleSettingsTableVersion), func(loadCtx context.Context) (interface{}, error) { - return readFromSystemTables(loadCtx, f, userName, databaseID) + return readFromSystemTables(loadCtx, db, userName, databaseID) }, ) if err != nil { diff --git a/pkg/sql/sessioninit/cache_test.go b/pkg/sql/sessioninit/cache_test.go index c9d344fa9882..90e76a72a543 100644 --- a/pkg/sql/sessioninit/cache_test.go +++ b/pkg/sql/sessioninit/cache_test.go @@ -63,11 +63,10 @@ func TestCacheInvalidation(t *testing.T) { settings, err := execCfg.SessionInitCache.GetDefaultSettings( ctx, s.ClusterSettings(), - s.DB(), - s.InternalExecutorFactory().(*sql.InternalExecutorFactory), + s.InternalDB().(descs.DB), username.TestUserName(), "defaultdb", - func(ctx context.Context, ief descs.TxnManager, userName username.SQLUsername, databaseID descpb.ID) ([]sessioninit.SettingsCacheEntry, error) { + func(ctx context.Context, ief descs.DB, userName username.SQLUsername, databaseID descpb.ID) ([]sessioninit.SettingsCacheEntry, error) { didReadFromSystemTable = true return nil, nil }) @@ -89,10 +88,9 @@ func TestCacheInvalidation(t *testing.T) { aInfo, err := execCfg.SessionInitCache.GetAuthInfo( ctx, settings, - s.DB(), - s.InternalExecutorFactory().(*sql.InternalExecutorFactory), + s.InternalDB().(descs.DB), username.TestUserName(), - func(ctx context.Context, f descs.TxnManager, userName username.SQLUsername, makePlanner func(opName string) (interface{}, func()), settings *cluster.Settings) (sessioninit.AuthInfo, error) { + func(ctx context.Context, f descs.DB, userName username.SQLUsername, makePlanner func(opName string) (interface{}, func()), settings *cluster.Settings) (sessioninit.AuthInfo, error) { didReadFromSystemTable = true return sessioninit.AuthInfo{}, nil }, @@ -243,18 +241,21 @@ func TestCacheSingleFlight(t *testing.T) { go func() { didReadFromSystemTable := false - _, err := c.GetAuthInfo(ctx, settings, s.DB(), s.ExecutorConfig().(sql.ExecutorConfig).InternalExecutorFactory, testuser, func( - ctx context.Context, - f descs.TxnManager, - userName username.SQLUsername, - makePlanner func(opName string) (interface{}, func()), - settings *cluster.Settings, - ) (sessioninit.AuthInfo, error) { - wgFirstGetAuthInfoCallInProgress.Done() - wgForConcurrentReadWrite.Wait() - didReadFromSystemTable = true - return sessioninit.AuthInfo{}, nil - }, + _, err := c.GetAuthInfo( + ctx, settings, + execCfg.InternalDB, + testuser, func( + ctx context.Context, + f descs.DB, + userName username.SQLUsername, + makePlanner func(opName string) (interface{}, func()), + settings *cluster.Settings, + ) (sessioninit.AuthInfo, error) { + wgFirstGetAuthInfoCallInProgress.Done() + wgForConcurrentReadWrite.Wait() + didReadFromSystemTable = true + return sessioninit.AuthInfo{}, nil + }, makePlanner) require.NoError(t, err) require.True(t, didReadFromSystemTable) @@ -270,16 +271,21 @@ func TestCacheSingleFlight(t *testing.T) { for i := 0; i < 2; i++ { go func() { didReadFromSystemTable := false - _, err := c.GetAuthInfo(ctx, settings, s.DB(), s.ExecutorConfig().(sql.ExecutorConfig).InternalExecutorFactory, testuser, func( - ctx context.Context, - f descs.TxnManager, - userName username.SQLUsername, - makePlanner func(opName string) (interface{}, func()), - settings *cluster.Settings, - ) (sessioninit.AuthInfo, error) { - didReadFromSystemTable = true - return sessioninit.AuthInfo{}, nil - }, + _, err := c.GetAuthInfo( + ctx, + settings, + execCfg.InternalDB, + testuser, + func( + ctx context.Context, + f descs.DB, + userName username.SQLUsername, + makePlanner func(opName string) (interface{}, func()), + settings *cluster.Settings, + ) (sessioninit.AuthInfo, error) { + didReadFromSystemTable = true + return sessioninit.AuthInfo{}, nil + }, makePlanner) require.NoError(t, err) require.False(t, didReadFromSystemTable) @@ -294,16 +300,21 @@ func TestCacheSingleFlight(t *testing.T) { // GetAuthInfo should not be using the cache since it is outdated. didReadFromSystemTable := false - _, err = c.GetAuthInfo(ctx, settings, s.DB(), s.ExecutorConfig().(sql.ExecutorConfig).InternalExecutorFactory, testuser, func( - ctx context.Context, - f descs.TxnManager, - userName username.SQLUsername, - makePlanner func(opName string) (interface{}, func()), - settings *cluster.Settings, - ) (sessioninit.AuthInfo, error) { - didReadFromSystemTable = true - return sessioninit.AuthInfo{}, nil - }, + _, err = c.GetAuthInfo( + ctx, + settings, + execCfg.InternalDB, + testuser, + func( + ctx context.Context, + f descs.DB, + userName username.SQLUsername, + makePlanner func(opName string) (interface{}, func()), + settings *cluster.Settings, + ) (sessioninit.AuthInfo, error) { + didReadFromSystemTable = true + return sessioninit.AuthInfo{}, nil + }, makePlanner) require.NoError(t, err) diff --git a/pkg/sql/set_cluster_setting.go b/pkg/sql/set_cluster_setting.go index 4e939e725e6f..ed386470fd0c 100644 --- a/pkg/sql/set_cluster_setting.go +++ b/pkg/sql/set_cluster_setting.go @@ -20,7 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/docs" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server/telemetry" @@ -29,6 +28,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/colexec" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/paramparse" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -243,7 +243,8 @@ func (n *setClusterSettingNode) startExec(params runParams) error { expectedEncodedValue, err := writeSettingInternal( params.ctx, - params.extendedEvalCtx.ExecCfg, + params.extendedEvalCtx.ExecCfg.VersionUpgradeHook, + params.extendedEvalCtx.ExecCfg.InternalDB, n.setting, n.name, params.p.User(), n.st, @@ -322,7 +323,8 @@ func (n *setClusterSettingNode) startExec(params runParams) error { func writeSettingInternal( ctx context.Context, - execCfg *ExecutorConfig, + hook VersionUpgradeHook, + db isql.DB, setting settings.NonMaskedSetting, name string, user username.SQLUsername, @@ -333,55 +335,54 @@ func writeSettingInternal( logFn func(context.Context, descpb.ID, logpb.EventPayload) error, releaseLeases func(context.Context), ) (expectedEncodedValue string, err error) { - err = execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := func() error { var reportedValue string if value == nil { // This code is doing work for RESET CLUSTER SETTING. var err error - reportedValue, expectedEncodedValue, err = writeDefaultSettingValue(ctx, execCfg, setting, name, txn) + reportedValue, expectedEncodedValue, err = writeDefaultSettingValue(ctx, db, setting, name) if err != nil { return err } } else { + // Setting a non-DEFAULT value. value, err := eval.Expr(ctx, evalCtx, value) if err != nil { return err } reportedValue, expectedEncodedValue, err = writeNonDefaultSettingValue( - ctx, execCfg, setting, name, txn, - user, st, value, forSystemTenant, + ctx, hook, db, + setting, name, user, st, value, forSystemTenant, releaseLeases, ) if err != nil { return err } } - return logFn(ctx, 0, /* no target */ &eventpb.SetClusterSetting{ SettingName: name, Value: reportedValue, }) - }) - return expectedEncodedValue, err + }(); err != nil { + return "", err + } + + return expectedEncodedValue, nil } // writeDefaultSettingValue performs the data write corresponding to a // RESET CLUSTER SETTING statement or changing the value of a setting // to DEFAULT. func writeDefaultSettingValue( - ctx context.Context, - execCfg *ExecutorConfig, - setting settings.NonMaskedSetting, - name string, - txn *kv.Txn, + ctx context.Context, db isql.DB, setting settings.NonMaskedSetting, name string, ) (reportedValue string, expectedEncodedValue string, err error) { reportedValue = "DEFAULT" expectedEncodedValue = setting.EncodedDefault() - _, err = execCfg.InternalExecutor.ExecEx( - ctx, "reset-setting", txn, + _, err = db.Executor().ExecEx( + ctx, "reset-setting", nil, sessiondata.RootUserSessionDataOverride, "DELETE FROM system.settings WHERE name = $1", name, ) @@ -392,10 +393,10 @@ func writeDefaultSettingValue( // setting to a non-DEFAULT value. func writeNonDefaultSettingValue( ctx context.Context, - execCfg *ExecutorConfig, + hook VersionUpgradeHook, + db isql.DB, setting settings.NonMaskedSetting, name string, - txn *kv.Txn, user username.SQLUsername, st *cluster.Settings, value tree.Datum, @@ -415,15 +416,15 @@ func writeNonDefaultSettingValue( verSetting, isSetVersion := setting.(*settings.VersionSetting) if isSetVersion { if err := setVersionSetting( - ctx, execCfg, verSetting, name, txn, user, st, value, encoded, + ctx, hook, verSetting, name, db, user, st, value, encoded, forSystemTenant, releaseLeases, ); err != nil { return reportedValue, expectedEncodedValue, err } } else { // Modifying another setting than the version. - if _, err = execCfg.InternalExecutor.ExecEx( - ctx, "update-setting", txn, + if _, err = db.Executor().ExecEx( + ctx, "update-setting", nil, sessiondata.RootUserSessionDataOverride, `UPSERT INTO system.settings (name, value, "lastUpdated", "valueType") VALUES ($1, $2, now(), $3)`, name, encoded, setting.Typ(), @@ -439,10 +440,10 @@ func writeNonDefaultSettingValue( // cluster setting. func setVersionSetting( ctx context.Context, - execCfg *ExecutorConfig, + hook VersionUpgradeHook, setting *settings.VersionSetting, name string, - txn *kv.Txn, + db isql.DB, user username.SQLUsername, st *cluster.Settings, value tree.Datum, @@ -453,8 +454,8 @@ func setVersionSetting( // In the special case of the 'version' cluster setting, // we must first read the previous value to validate that the // value change is valid. - datums, err := execCfg.InternalExecutor.QueryRowEx( - ctx, "retrieve-prev-setting", txn, + datums, err := db.Executor().QueryRowEx( + ctx, "retrieve-prev-setting", nil, sessiondata.RootUserSessionDataOverride, "SELECT value FROM system.settings WHERE name = $1", name, ) @@ -505,10 +506,10 @@ func setVersionSetting( if err != nil { return err } - return execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + return db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { // Confirm if the version has actually changed on us. - datums, err := execCfg.InternalExecutor.QueryRowEx( - ctx, "retrieve-prev-setting", txn, + datums, err := txn.QueryRowEx( + ctx, "retrieve-prev-setting", txn.KV(), sessiondata.RootUserSessionDataOverride, "SELECT value FROM system.settings WHERE name = $1", name, ) @@ -535,8 +536,8 @@ func setVersionSetting( } } // Only if the version has increased, alter the setting. - if _, err = execCfg.InternalExecutor.ExecEx( - ctx, "update-setting", txn, + if _, err = txn.ExecEx( + ctx, "update-setting", txn.KV(), sessiondata.RootUserSessionDataOverride, `UPSERT INTO system.settings (name, value, "lastUpdated", "valueType") VALUES ($1, $2, now(), $3)`, name, string(rawValue), setting.Typ(), @@ -547,8 +548,8 @@ func setVersionSetting( // If we're the system tenant, also send an override to each tenant // to ensure that they know about the new cluster version. if forSystemTenant { - if _, err = execCfg.InternalExecutor.ExecEx( - ctx, "update-setting", txn, + if _, err = txn.ExecEx( + ctx, "update-setting", txn.KV(), sessiondata.RootUserSessionDataOverride, `UPSERT INTO system.tenant_settings (tenant_id, name, value, "last_updated", "value_type") VALUES ($1, $2, $3, now(), $4)`, tree.NewDInt(0), name, string(rawValue), setting.Typ(), @@ -567,7 +568,7 @@ func setVersionSetting( // because the code isn't relying on them. releaseLeases(ctx) return runMigrationsAndUpgradeVersion( - ctx, execCfg, user, prev, value, updateVersionSystemSetting, + ctx, hook, user, prev, value, updateVersionSystemSetting, ) } @@ -611,7 +612,7 @@ func waitForSettingUpdate( // the system table. func runMigrationsAndUpgradeVersion( ctx context.Context, - execCfg *ExecutorConfig, + hook VersionUpgradeHook, user username.SQLUsername, prev tree.Datum, value tree.Datum, @@ -630,7 +631,7 @@ func runMigrationsAndUpgradeVersion( // toSettingString already validated the input, and checked to // see that we are allowed to transition. Let's call into our // upgrade hook to run migrations, if any. - if err := execCfg.VersionUpgradeHook(ctx, user, from, to, updateVersionSystemSetting); err != nil { + if err := hook(ctx, user, from, to, updateVersionSystemSetting); err != nil { return err } return nil diff --git a/pkg/sql/set_zone_config.go b/pkg/sql/set_zone_config.go index 476f44f7474f..42afe8d625ac 100644 --- a/pkg/sql/set_zone_config.go +++ b/pkg/sql/set_zone_config.go @@ -26,6 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/zone" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" @@ -1199,14 +1200,14 @@ func writeZoneConfigUpdate( // reuse an existing client.Txn safely. func RemoveIndexZoneConfigs( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, execCfg *ExecutorConfig, kvTrace bool, descriptors *descs.Collection, tableDesc catalog.TableDescriptor, indexIDs []uint32, ) error { - zoneWithRaw, err := descriptors.GetZoneConfig(ctx, txn, tableDesc.GetID()) + zoneWithRaw, err := descriptors.GetZoneConfig(ctx, txn.KV(), tableDesc.GetID()) if err != nil { return err } @@ -1235,7 +1236,9 @@ func RemoveIndexZoneConfigs( if zcRewriteNecessary { // Ignore CCL required error to allow schema change to progress. _, err = writeZoneConfig( - ctx, txn, tableDesc.GetID(), tableDesc, zone, zoneWithRaw.GetRawBytesInStorage(), execCfg, descriptors, false /* hasNewSubzones */, kvTrace, + ctx, txn.KV(), tableDesc.GetID(), tableDesc, zone, + zoneWithRaw.GetRawBytesInStorage(), execCfg, descriptors, + false /* hasNewSubzones */, kvTrace, ) if err != nil && !sqlerrors.IsCCLRequiredError(err) { return err diff --git a/pkg/sql/show_cluster_setting.go b/pkg/sql/show_cluster_setting.go index ce2ee18cf82d..59bad5e9a705 100644 --- a/pkg/sql/show_cluster_setting.go +++ b/pkg/sql/show_cluster_setting.go @@ -19,10 +19,10 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -55,10 +55,10 @@ func (p *planner) getCurrentEncodedVersionSettingValue( // The (slight ab)use of WithMaxAttempts achieves convenient context cancellation. return retry.WithMaxAttempts(ctx, retry.Options{}, math.MaxInt32, func() error { - return p.execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - datums, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.QueryRowEx( + return p.execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + datums, err := txn.QueryRowEx( ctx, "read-setting", - txn, + txn.KV(), sessiondata.RootUserSessionDataOverride, "SELECT value FROM system.settings WHERE name = $1", name, ) diff --git a/pkg/sql/show_create_clauses.go b/pkg/sql/show_create_clauses.go index 52d809f186f1..3fc2332b8c69 100644 --- a/pkg/sql/show_create_clauses.go +++ b/pkg/sql/show_create_clauses.go @@ -54,10 +54,10 @@ type comment struct { // An alternative approach would be to leverage a virtual table which internally // uses the collection. func selectComment(ctx context.Context, p PlanHookState, tableID descpb.ID) (tc *tableComments) { - query := fmt.Sprintf("SELECT type, object_id, sub_id, comment FROM system.comments WHERE object_id = %d", tableID) + query := fmt.Sprintf("SELECT type, object_id, sub_id, comment FROM system.comments WHERE object_id = %d ORDER BY type, sub_id", tableID) txn := p.Txn() - it, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.QueryIterator( + it, err := p.InternalSQLTxn().QueryIterator( ctx, "show-tables-with-comment", txn, query) if err != nil { log.VEventf(ctx, 1, "%q", err) diff --git a/pkg/sql/show_create_external_connection.go b/pkg/sql/show_create_external_connection.go index 926738e08c58..2a49505bbd66 100644 --- a/pkg/sql/show_create_external_connection.go +++ b/pkg/sql/show_create_external_connection.go @@ -45,7 +45,7 @@ func loadExternalConnections( } rows = append(rows, tree.Datums{tree.NewDString(name)}) } else { - datums, _, err := params.ExecCfg().InternalExecutor.QueryBufferedExWithCols( + datums, _, err := params.p.InternalSQLTxn().QueryBufferedExWithCols( params.ctx, "load-external-connections", params.p.Txn(), sessiondata.NodeUserSessionDataOverride, @@ -58,8 +58,9 @@ func loadExternalConnections( for _, row := range rows { connectionName := tree.MustBeDString(row[0]) - connection, err := externalconn.LoadExternalConnection(params.ctx, string(connectionName), - params.p.ExecCfg().InternalExecutor, params.p.Txn()) + connection, err := externalconn.LoadExternalConnection( + params.ctx, string(connectionName), params.p.InternalSQLTxn(), + ) if err != nil { return nil, err } diff --git a/pkg/sql/show_create_schedule.go b/pkg/sql/show_create_schedule.go index ecfe8e4bfe65..bc88780c766c 100644 --- a/pkg/sql/show_create_schedule.go +++ b/pkg/sql/show_create_schedule.go @@ -37,7 +37,7 @@ const ( ) func loadSchedules(params runParams, n *tree.ShowCreateSchedules) ([]*jobs.ScheduledJob, error) { - env := JobSchedulerEnv(params.ExecCfg()) + env := JobSchedulerEnv(params.ExecCfg().JobsKnobs()) var schedules []*jobs.ScheduledJob var rows []tree.Datums var cols colinfo.ResultColumns @@ -48,7 +48,7 @@ func loadSchedules(params runParams, n *tree.ShowCreateSchedules) ([]*jobs.Sched return nil, err } - datums, columns, err := params.ExecCfg().InternalExecutor.QueryRowExWithCols( + datums, columns, err := params.p.InternalSQLTxn().QueryRowExWithCols( params.ctx, "load-schedules", params.p.Txn(), sessiondata.RootUserSessionDataOverride, @@ -60,7 +60,7 @@ func loadSchedules(params runParams, n *tree.ShowCreateSchedules) ([]*jobs.Sched rows = append(rows, datums) cols = columns } else { - datums, columns, err := params.ExecCfg().InternalExecutor.QueryBufferedExWithCols( + datums, columns, err := params.p.InternalSQLTxn().QueryBufferedExWithCols( params.ctx, "load-schedules", params.p.Txn(), sessiondata.RootUserSessionDataOverride, @@ -112,14 +112,7 @@ func (p *planner) ShowCreateSchedule( return nil, err } - createStmtStr, err := ex.GetCreateScheduleStatement( - ctx, - scheduledjobs.ProdJobSchedulerEnv, - p.Txn(), - p.Descriptors(), - sj, - p.ExecCfg().InternalExecutor, - ) + createStmtStr, err := ex.GetCreateScheduleStatement(ctx, p.InternalSQLTxn(), scheduledjobs.ProdJobSchedulerEnv, sj) if err != nil { return nil, err } diff --git a/pkg/sql/show_create_table_test.go b/pkg/sql/show_create_table_test.go index 999be78899d1..7fc8ac363700 100644 --- a/pkg/sql/show_create_table_test.go +++ b/pkg/sql/show_create_table_test.go @@ -16,12 +16,10 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -45,23 +43,21 @@ func TestShowCreateTableWithConstraintInvalidated(t *testing.T) { tdb.Exec(t, `CREATE SCHEMA schema`) tdb.Exec(t, `CREATE TABLE db.schema.table(x INT, y INT, INDEX(y) USING HASH)`) - ief := s0.InternalExecutorFactory().(descs.TxnManager) + ief := s0.InternalDB().(descs.DB) require.NoError( t, - ief.DescsTxnWithExecutor(ctx, s0.DB(), nil /* sessionData */, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor, - ) error { + ief.DescsTxn(ctx, func(ctx context.Context, txn descs.Txn) error { tn := tree.MakeTableNameWithSchema("db", "schema", "table") - _, mut, err := descs.PrefixAndMutableTable(ctx, descriptors.MutableByName(txn), &tn) + _, mut, err := descs.PrefixAndMutableTable(ctx, txn.Descriptors().MutableByName(txn.KV()), &tn) require.NoError(t, err) require.NotNil(t, mut) // Check the show create table res before we invalidate the constraint. // The check constraint from the hash shared index should not appear. - rows, err := ie.QueryRowEx( + rows, err := txn.QueryRowEx( ctx, "show-create-table-before-invalidate-constraint", - txn, + txn.KV(), sessiondata.NoSessionDataOverride, `SHOW CREATE TABLE db.schema.table`, ) @@ -88,14 +84,16 @@ func TestShowCreateTableWithConstraintInvalidated(t *testing.T) { } } - require.NoError(t, descriptors.WriteDesc(ctx, true /* kvTrace */, mut, txn)) + require.NoError(t, txn.Descriptors().WriteDesc( + ctx, true /* kvTrace */, mut, txn.KV(), + )) // Check the show create table res after we invalidate the constraint. // The constraint should appear now. - rows, err = ie.QueryRowEx( + rows, err = txn.QueryRowEx( ctx, "show-create-table-after-invalidate-constraint", - txn, + txn.KV(), sessiondata.NoSessionDataOverride, `SHOW CREATE TABLE db.schema.table`, ) diff --git a/pkg/sql/show_fingerprints.go b/pkg/sql/show_fingerprints.go index e6c79c5a9aad..cd6b4bf02fb0 100644 --- a/pkg/sql/show_fingerprints.go +++ b/pkg/sql/show_fingerprints.go @@ -164,7 +164,7 @@ func (n *showFingerprintsNode) Next(params runParams) (bool, error) { sql = sql + " AS OF SYSTEM TIME " + ts.AsOfSystemTime() } - fingerprintCols, err := params.extendedEvalCtx.ExecCfg.InternalExecutor.QueryRowEx( + fingerprintCols, err := params.p.InternalSQLTxn().QueryRowEx( params.ctx, "hash-fingerprint", params.p.txn, sessiondata.RootUserSessionDataOverride, diff --git a/pkg/sql/show_histogram.go b/pkg/sql/show_histogram.go index 41ed1902a653..0e90a864b1d1 100644 --- a/pkg/sql/show_histogram.go +++ b/pkg/sql/show_histogram.go @@ -43,7 +43,7 @@ func (p *planner) ShowHistogram(ctx context.Context, n *tree.ShowHistogram) (pla columns: showHistogramColumns, constructor: func(ctx context.Context, p *planner) (planNode, error) { - row, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.QueryRowEx( + row, err := p.InternalSQLTxn().QueryRowEx( ctx, "read-histogram", p.txn, diff --git a/pkg/sql/show_stats.go b/pkg/sql/show_stats.go index 460c7bfacaca..b7431e6efec5 100644 --- a/pkg/sql/show_stats.go +++ b/pkg/sql/show_stats.go @@ -145,7 +145,7 @@ func (p *planner) ShowTableStats(ctx context.Context, n *tree.ShowTableStats) (p FROM system.table_statistics WHERE "tableID" = $1 ORDER BY "createdAt", "columnIDs", "statisticID"`, partialPredicateCol, fullStatisticIDCol) - rows, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.QueryBuffered( + rows, err := p.InternalSQLTxn().QueryBuffered( ctx, "read-table-stats", p.txn, diff --git a/pkg/sql/show_tenant.go b/pkg/sql/show_tenant.go index a6c6b4cb8453..fe22c3a224b3 100644 --- a/pkg/sql/show_tenant.go +++ b/pkg/sql/show_tenant.go @@ -84,7 +84,7 @@ func (p *planner) ShowTenant(ctx context.Context, n *tree.ShowTenant) (planNode, func (n *showTenantNode) startExec(params runParams) error { if _, ok := n.tenantSpec.(tenantSpecAll); ok { - ids, err := GetAllNonDropTenantIDs(params.ctx, params.p.execCfg, params.p.Txn()) + ids, err := GetAllNonDropTenantIDs(params.ctx, params.p.InternalSQLTxn()) if err != nil { return err } @@ -127,8 +127,8 @@ func getReplicationStats( log.Warningf(params.ctx, "protected timestamp unavailable for tenant %q and job %d", details.DestinationTenantName, job.ID()) } else { - ptp := params.p.execCfg.ProtectedTimestampProvider - record, err := ptp.GetRecord(params.ctx, params.p.Txn(), *stats.IngestionDetails.ProtectedTimestampRecordID) + ptp := params.p.execCfg.ProtectedTimestampProvider.WithTxn(params.p.InternalSQLTxn()) + record, err := ptp.GetRecord(params.ctx, *stats.IngestionDetails.ProtectedTimestampRecordID) if err != nil { // Protected timestamp might not be set yet, no need to fail. log.Warningf(params.ctx, "protected timestamp unavailable for tenant %q and job %d: %v", @@ -185,7 +185,7 @@ func (n *showTenantNode) getTenantValues( // There is a replication job, we need to get the job info and the // replication stats in order to generate the exact tenant status. registry := params.p.execCfg.JobRegistry - job, err := registry.LoadJobWithTxn(params.ctx, jobId, params.p.Txn()) + job, err := registry.LoadJobWithTxn(params.ctx, jobId, params.p.InternalSQLTxn()) if err != nil { log.Errorf(params.ctx, "cannot load job info for replicated tenant %q and job %d: %v", tenantInfo.Name, jobId, err) @@ -218,7 +218,7 @@ func (n *showTenantNode) Next(params runParams) (bool, error) { return false, nil } - tenantInfo, err := GetTenantRecordByID(params.ctx, params.p.execCfg, params.p.Txn(), n.tenantIds[n.row]) + tenantInfo, err := GetTenantRecordByID(params.ctx, params.p.InternalSQLTxn(), n.tenantIds[n.row]) if err != nil { return false, err } diff --git a/pkg/sql/sql_cursor.go b/pkg/sql/sql_cursor.go index 0d5b35ab44dc..9abd4517d39a 100644 --- a/pkg/sql/sql_cursor.go +++ b/pkg/sql/sql_cursor.go @@ -16,11 +16,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/clusterunique" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -48,7 +48,7 @@ func (p *planner) DeclareCursor(ctx context.Context, s *tree.DeclareCursor) (pla return nil, pgerror.Newf(pgcode.NoActiveSQLTransaction, "DECLARE CURSOR can only be used in transaction blocks") } - ie := p.ExecCfg().InternalExecutorFactory.NewInternalExecutor(p.SessionData()) + ie := p.ExecCfg().InternalDB.NewInternalExecutor(p.SessionData()) if cursor := p.sqlCursors.getCursor(s.Name); cursor != nil { return nil, pgerror.Newf(pgcode.DuplicateCursor, "cursor %q already exists", s.Name) } @@ -94,12 +94,12 @@ func (p *planner) DeclareCursor(ctx context.Context, s *tree.DeclareCursor) (pla } inputState := p.txn.GetLeafTxnInputState(ctx) cursor := &sqlCursor{ - InternalRows: rows, - readSeqNum: inputState.ReadSeqNum, - txn: p.txn, - statement: statement, - created: timeutil.Now(), - withHold: s.Hold, + Rows: rows, + readSeqNum: inputState.ReadSeqNum, + txn: p.txn, + statement: statement, + created: timeutil.Now(), + withHold: s.Hold, } if err := p.sqlCursors.addCursor(s.Name, cursor); err != nil { // This case shouldn't happen because cursor names are scoped to a session, @@ -226,7 +226,7 @@ func (f fetchNode) Values() tree.Datums { } func (f fetchNode) Close(ctx context.Context) { - // We explicitly do not pass through the Close to our InternalRows, because + // We explicitly do not pass through the Close to our Rows, because // running FETCH on a CURSOR does not close it. // Reset the transaction's read sequence number to what it was before the @@ -249,7 +249,7 @@ func (p *planner) CloseCursor(ctx context.Context, n *tree.CloseCursor) (planNod } type sqlCursor struct { - sqlutil.InternalRows + isql.Rows // txn is the transaction object that the internal executor for this cursor // is running with. txn *kv.Txn @@ -262,9 +262,9 @@ type sqlCursor struct { withHold bool } -// Next implements the InternalRows interface. +// Next implements the Rows interface. func (s *sqlCursor) Next(ctx context.Context) (bool, error) { - more, err := s.InternalRows.Next(ctx) + more, err := s.Rows.Next(ctx) if err == nil { s.curRow++ } diff --git a/pkg/sql/sqlinstance/instancestorage/instancestorage.go b/pkg/sql/sqlinstance/instancestorage/instancestorage.go index b75c3570c9aa..22be76e5a243 100644 --- a/pkg/sql/sqlinstance/instancestorage/instancestorage.go +++ b/pkg/sql/sqlinstance/instancestorage/instancestorage.go @@ -396,16 +396,18 @@ func (s *Storage) RunInstanceIDReclaimLoop( ctx context.Context, stopper *stop.Stopper, ts timeutil.TimeSource, - internalExecutorFactory descs.TxnManager, + db descs.DB, sessionExpirationFn func() hlc.Timestamp, ) error { loadRegions := func() ([][]byte, error) { // Load regions from the system DB. var regions [][]byte - if err := internalExecutorFactory.DescsTxn(ctx, s.db, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + if err := db.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { - enumReps, _, err := sql.GetRegionEnumRepresentations(ctx, txn, keys.SystemDatabaseID, descsCol) + enumReps, _, err := sql.GetRegionEnumRepresentations( + ctx, txn.KV(), keys.SystemDatabaseID, txn.Descriptors(), + ) if err != nil { if errors.Is(err, sql.ErrNotMultiRegionDatabase) { return nil diff --git a/pkg/sql/sqlinstance/instancestorage/instancestorage_test.go b/pkg/sql/sqlinstance/instancestorage/instancestorage_test.go index 58dd1462e2d9..6ff841f18a47 100644 --- a/pkg/sql/sqlinstance/instancestorage/instancestorage_test.go +++ b/pkg/sql/sqlinstance/instancestorage/instancestorage_test.go @@ -523,8 +523,8 @@ func TestReclaimLoop(t *testing.T) { const expiration = 5 * time.Hour sessionExpiry := clock.Now().Add(expiration.Nanoseconds(), 0) - ief := s.InternalExecutorFactory().(descs.TxnManager) - err := storage.RunInstanceIDReclaimLoop(ctx, s.Stopper(), ts, ief, func() hlc.Timestamp { + db := s.InternalDB().(descs.DB) + err := storage.RunInstanceIDReclaimLoop(ctx, s.Stopper(), ts, db, func() hlc.Timestamp { return sessionExpiry }) require.NoError(t, err) diff --git a/pkg/sql/sqlstats/persistedsqlstats/BUILD.bazel b/pkg/sql/sqlstats/persistedsqlstats/BUILD.bazel index 58eb12733b15..78c3f853731a 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/BUILD.bazel +++ b/pkg/sql/sqlstats/persistedsqlstats/BUILD.bazel @@ -27,7 +27,6 @@ go_library( "//pkg/clusterversion", "//pkg/jobs", "//pkg/jobs/jobspb", - "//pkg/kv", "//pkg/roachpb", "//pkg/scheduledjobs", "//pkg/security/username", @@ -35,13 +34,13 @@ go_library( "//pkg/settings", "//pkg/settings/cluster", "//pkg/sql/catalog/systemschema", + "//pkg/sql/isql", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", "//pkg/sql/sqlstats", "//pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil", "//pkg/sql/sqlstats/sslocal", "//pkg/sql/sqlstats/ssmemstorage", - "//pkg/sql/sqlutil", "//pkg/sql/types", "//pkg/util", "//pkg/util/log", @@ -87,10 +86,10 @@ go_test( "//pkg/sql", "//pkg/sql/catalog", "//pkg/sql/catalog/systemschema", + "//pkg/sql/isql", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", "//pkg/sql/sqlstats", - "//pkg/sql/sqlutil", "//pkg/sql/tests", "//pkg/testutils", "//pkg/testutils/datapathutils", diff --git a/pkg/sql/sqlstats/persistedsqlstats/combined_iterator.go b/pkg/sql/sqlstats/persistedsqlstats/combined_iterator.go index b637bb3d1910..545f6b2c8544 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/combined_iterator.go +++ b/pkg/sql/sqlstats/persistedsqlstats/combined_iterator.go @@ -15,7 +15,7 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/errors" ) @@ -35,14 +35,14 @@ type CombinedStmtStatsIterator struct { disk struct { canBeAdvanced bool paused bool - it sqlutil.InternalRows + it isql.Rows } } // NewCombinedStmtStatsIterator returns a new instance of // CombinedStmtStatsIterator. func NewCombinedStmtStatsIterator( - memIter *memStmtStatsIterator, diskIter sqlutil.InternalRows, expectedColCnt int, + memIter *memStmtStatsIterator, diskIter isql.Rows, expectedColCnt int, ) *CombinedStmtStatsIterator { c := &CombinedStmtStatsIterator{ expectedColCnt: expectedColCnt, @@ -221,14 +221,14 @@ type CombinedTxnStatsIterator struct { disk struct { canBeAdvanced bool paused bool - it sqlutil.InternalRows + it isql.Rows } } // NewCombinedTxnStatsIterator returns a new instance of // CombinedTxnStatsIterator. func NewCombinedTxnStatsIterator( - memIter *memTxnStatsIterator, diskIter sqlutil.InternalRows, expectedColCnt int, + memIter *memTxnStatsIterator, diskIter isql.Rows, expectedColCnt int, ) *CombinedTxnStatsIterator { c := &CombinedTxnStatsIterator{ expectedColCnt: expectedColCnt, diff --git a/pkg/sql/sqlstats/persistedsqlstats/compaction_exec.go b/pkg/sql/sqlstats/persistedsqlstats/compaction_exec.go index 30114a7dabfe..1c26a128e714 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/compaction_exec.go +++ b/pkg/sql/sqlstats/persistedsqlstats/compaction_exec.go @@ -15,13 +15,12 @@ import ( "fmt" "time" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" @@ -31,8 +30,7 @@ import ( // executed by sql.sqlStatsCompactionResumer. type StatsCompactor struct { st *cluster.Settings - db *kv.DB - ie sqlutil.InternalExecutor + db isql.DB rowsRemovedCounter *metric.Counter @@ -46,15 +44,13 @@ type StatsCompactor struct { // NewStatsCompactor returns a new instance of StatsCompactor. func NewStatsCompactor( setting *cluster.Settings, - internalEx sqlutil.InternalExecutor, - db *kv.DB, + db isql.DB, rowsRemovedCounter *metric.Counter, knobs *sqlstats.TestingKnobs, ) *StatsCompactor { return &StatsCompactor{ st: setting, db: db, - ie: internalEx, rowsRemovedCounter: rowsRemovedCounter, knobs: knobs, } @@ -114,7 +110,7 @@ func (c *StatsCompactor) removeStaleRowsPerShard( func (c *StatsCompactor) getRowCountForShard( ctx context.Context, stmt string, shardIdx int, count *int64, ) error { - row, err := c.ie.QueryRowEx(ctx, + row, err := c.db.Executor().QueryRowEx(ctx, "scan-row-count", nil, sessiondata.NodeUserSessionDataOverride, @@ -211,7 +207,7 @@ func (c *StatsCompactor) removeStaleRowsForShard( func (c *StatsCompactor) executeDeleteStmt( ctx context.Context, delStmt string, qargs []interface{}, ) (lastRow tree.Datums, rowsDeleted int64, err error) { - it, err := c.ie.QueryIteratorEx(ctx, + it, err := c.db.Executor().QueryIteratorEx(ctx, "delete-old-sql-stats", nil, /* txn */ sessiondata.NodeUserSessionDataOverride, diff --git a/pkg/sql/sqlstats/persistedsqlstats/compaction_scheduling.go b/pkg/sql/sqlstats/persistedsqlstats/compaction_scheduling.go index 168140074d88..b747e5baf568 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/compaction_scheduling.go +++ b/pkg/sql/sqlstats/persistedsqlstats/compaction_scheduling.go @@ -15,13 +15,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/errors" pbtypes "github.com/gogo/protobuf/types" ) @@ -36,9 +35,9 @@ var ErrDuplicatedSchedules = errors.New("creating multiple sql stats compaction // scheduled job subsystem so the compaction job can be run periodically. This // is done during the cluster startup upgrade. func CreateSQLStatsCompactionScheduleIfNotYetExist( - ctx context.Context, ie sqlutil.InternalExecutor, txn *kv.Txn, st *cluster.Settings, + ctx context.Context, txn isql.Txn, st *cluster.Settings, ) (*jobs.ScheduledJob, error) { - scheduleExists, err := checkExistingCompactionSchedule(ctx, ie, txn) + scheduleExists, err := checkExistingCompactionSchedule(ctx, txn) if err != nil { return nil, err } @@ -72,7 +71,7 @@ func CreateSQLStatsCompactionScheduleIfNotYetExist( ) compactionSchedule.SetScheduleStatus(string(jobs.StatusPending)) - if err = compactionSchedule.Create(ctx, ie, txn); err != nil { + if err := jobs.ScheduledJobTxn(txn).Create(ctx, compactionSchedule); err != nil { return nil, err } @@ -83,7 +82,7 @@ func CreateSQLStatsCompactionScheduleIfNotYetExist( // We do not need to worry about checking if the job already exist; // at most 1 job semantics are enforced by scheduled jobs system. func CreateCompactionJob( - ctx context.Context, createdByInfo *jobs.CreatedByInfo, txn *kv.Txn, jobRegistry *jobs.Registry, + ctx context.Context, createdByInfo *jobs.CreatedByInfo, txn isql.Txn, jobRegistry *jobs.Registry, ) (jobspb.JobID, error) { record := jobs.Record{ Description: "automatic SQL Stats compaction", @@ -100,12 +99,10 @@ func CreateCompactionJob( return jobID, nil } -func checkExistingCompactionSchedule( - ctx context.Context, ie sqlutil.InternalExecutor, txn *kv.Txn, -) (exists bool, _ error) { +func checkExistingCompactionSchedule(ctx context.Context, txn isql.Txn) (exists bool, _ error) { query := "SELECT count(*) FROM system.scheduled_jobs WHERE schedule_name = $1" - row, err := ie.QueryRowEx(ctx, "check-existing-sql-stats-schedule", txn, + row, err := txn.QueryRowEx(ctx, "check-existing-sql-stats-schedule", txn.KV(), sessiondata.NodeUserSessionDataOverride, query, compactionScheduleName, ) diff --git a/pkg/sql/sqlstats/persistedsqlstats/compaction_test.go b/pkg/sql/sqlstats/persistedsqlstats/compaction_test.go index b7f3f73e9aae..d38f3cfcf15c 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/compaction_test.go +++ b/pkg/sql/sqlstats/persistedsqlstats/compaction_test.go @@ -27,10 +27,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/persistedsqlstats" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" @@ -52,8 +52,7 @@ func TestSQLStatsCompactorNilTestingKnobCheck(t *testing.T) { statsCompactor := persistedsqlstats.NewStatsCompactor( server.ClusterSettings(), - server.InternalExecutor().(sqlutil.InternalExecutor), - server.DB(), + server.InternalDB().(isql.DB), metric.NewCounter(metric.Metadata{}), nil, /* knobs */ ) @@ -140,7 +139,7 @@ func TestSQLStatsCompactor(t *testing.T) { defer server.Stopper().Stop(ctx) sqlConn := sqlutils.MakeSQLRunner(conn) - internalExecutor := server.InternalExecutor().(sqlutil.InternalExecutor) + internalExecutor := server.InternalExecutor().(isql.Executor) // Disable automatic flush since the test will handle the flush manually. sqlConn.Exec(t, "SET CLUSTER SETTING sql.stats.flush.interval = '24h'") @@ -188,8 +187,7 @@ func TestSQLStatsCompactor(t *testing.T) { statsCompactor := persistedsqlstats.NewStatsCompactor( server.ClusterSettings(), - server.InternalExecutor().(sqlutil.InternalExecutor), - server.DB(), + server.InternalDB().(isql.DB), metric.NewCounter(metric.Metadata{}), &sqlstats.TestingKnobs{ AOSTClause: "AS OF SYSTEM TIME '-1us'", @@ -308,8 +306,7 @@ func TestSQLStatsForegroundInterference(t *testing.T) { statsCompactor := persistedsqlstats.NewStatsCompactor( s.ClusterSettings(), - s.InternalExecutor().(sqlutil.InternalExecutor), - s.DB(), + s.InternalDB().(isql.DB), metric.NewCounter(metric.Metadata{}), params.Knobs.SQLStatsKnobs.(*sqlstats.TestingKnobs), ) diff --git a/pkg/sql/sqlstats/persistedsqlstats/controller.go b/pkg/sql/sqlstats/persistedsqlstats/controller.go index afbf2b76c926..5c6a760ec86c 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/controller.go +++ b/pkg/sql/sqlstats/persistedsqlstats/controller.go @@ -13,12 +13,11 @@ package persistedsqlstats import ( "context" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/sslocal" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" ) // Controller implements the SQL Stats subsystem control plane. This exposes @@ -27,22 +26,17 @@ import ( // subsystem. type Controller struct { *sslocal.Controller - db *kv.DB - ie sqlutil.InternalExecutor + db isql.DB st *cluster.Settings } // NewController returns a new instance of sqlstats.Controller. func NewController( - sqlStats *PersistedSQLStats, - status serverpb.SQLStatusServer, - db *kv.DB, - ie sqlutil.InternalExecutor, + sqlStats *PersistedSQLStats, status serverpb.SQLStatusServer, db isql.DB, ) *Controller { return &Controller{ Controller: sslocal.NewController(sqlStats.SQLStats, status), db: db, - ie: ie, st: sqlStats.cfg.Settings, } } @@ -50,8 +44,8 @@ func NewController( // CreateSQLStatsCompactionSchedule implements the tree.SQLStatsController // interface. func (s *Controller) CreateSQLStatsCompactionSchedule(ctx context.Context) error { - return s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - _, err := CreateSQLStatsCompactionScheduleIfNotYetExist(ctx, s.ie, txn, s.st) + return s.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + _, err := CreateSQLStatsCompactionScheduleIfNotYetExist(ctx, txn, s.st) return err }) } @@ -64,17 +58,15 @@ func (s *Controller) ResetClusterSQLStats(ctx context.Context) error { return err } - resetSysTableStats := func(tableName string) error { - if _, err := s.ie.ExecEx( + resetSysTableStats := func(tableName string) (err error) { + ex := s.db.Executor() + _, err = ex.ExecEx( ctx, "reset-sql-stats", nil, /* txn */ sessiondata.NodeUserSessionDataOverride, - "TRUNCATE "+tableName); err != nil { - return err - } - - return nil + "TRUNCATE "+tableName) + return err } if err := resetSysTableStats("system.statement_statistics"); err != nil { return err diff --git a/pkg/sql/sqlstats/persistedsqlstats/flush.go b/pkg/sql/sqlstats/persistedsqlstats/flush.go index 49a773f49018..13963c8bfdc0 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/flush.go +++ b/pkg/sql/sqlstats/persistedsqlstats/flush.go @@ -16,8 +16,8 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats" @@ -128,13 +128,13 @@ func (s *PersistedSQLStats) doFlush(ctx context.Context, workFn func() error, er func (s *PersistedSQLStats) doFlushSingleTxnStats( ctx context.Context, stats *roachpb.CollectedTransactionStatistics, aggregatedTs time.Time, ) error { - return s.cfg.KvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + return s.cfg.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { // Explicitly copy the stats variable so the txn closure is retryable. scopedStats := *stats serializedFingerprintID := sqlstatsutil.EncodeUint64ToBytes(uint64(stats.TransactionFingerprintID)) - insertFn := func(ctx context.Context, txn *kv.Txn) (alreadyExists bool, err error) { + insertFn := func(ctx context.Context, txn isql.Txn) (alreadyExists bool, err error) { rowsAffected, err := s.insertTransactionStats(ctx, txn, aggregatedTs, serializedFingerprintID, &scopedStats) if err != nil { @@ -148,7 +148,7 @@ func (s *PersistedSQLStats) doFlushSingleTxnStats( return false /* alreadyExists */, nil /* err */ } - readFn := func(ctx context.Context, txn *kv.Txn) error { + readFn := func(ctx context.Context, txn isql.Txn) error { persistedData := roachpb.TransactionStatistics{} err := s.fetchPersistedTransactionStats(ctx, txn, aggregatedTs, serializedFingerprintID, scopedStats.App, &persistedData) if err != nil { @@ -159,7 +159,7 @@ func (s *PersistedSQLStats) doFlushSingleTxnStats( return nil } - updateFn := func(ctx context.Context, txn *kv.Txn) error { + updateFn := func(ctx context.Context, txn isql.Txn) error { return s.updateTransactionStats(ctx, txn, aggregatedTs, serializedFingerprintID, &scopedStats) } @@ -174,7 +174,7 @@ func (s *PersistedSQLStats) doFlushSingleTxnStats( func (s *PersistedSQLStats) doFlushSingleStmtStats( ctx context.Context, stats *roachpb.CollectedStatementStatistics, aggregatedTs time.Time, ) error { - return s.cfg.KvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + return s.cfg.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { // Explicitly copy the stats so that this closure is retryable. scopedStats := *stats @@ -182,7 +182,7 @@ func (s *PersistedSQLStats) doFlushSingleStmtStats( serializedTransactionFingerprintID := sqlstatsutil.EncodeUint64ToBytes(uint64(scopedStats.Key.TransactionFingerprintID)) serializedPlanHash := sqlstatsutil.EncodeUint64ToBytes(scopedStats.Key.PlanHash) - insertFn := func(ctx context.Context, txn *kv.Txn) (alreadyExists bool, err error) { + insertFn := func(ctx context.Context, txn isql.Txn) (alreadyExists bool, err error) { rowsAffected, err := s.insertStatementStats( ctx, txn, @@ -204,7 +204,7 @@ func (s *PersistedSQLStats) doFlushSingleStmtStats( return false /* alreadyExists */, nil /* err */ } - readFn := func(ctx context.Context, txn *kv.Txn) error { + readFn := func(ctx context.Context, txn isql.Txn) error { persistedData := roachpb.StatementStatistics{} err := s.fetchPersistedStatementStats( ctx, @@ -224,7 +224,7 @@ func (s *PersistedSQLStats) doFlushSingleStmtStats( return nil } - updateFn := func(ctx context.Context, txn *kv.Txn) error { + updateFn := func(ctx context.Context, txn isql.Txn) error { return s.updateStatementStats( ctx, txn, @@ -246,10 +246,10 @@ func (s *PersistedSQLStats) doFlushSingleStmtStats( func (s *PersistedSQLStats) doInsertElseDoUpdate( ctx context.Context, - txn *kv.Txn, - insertFn func(context.Context, *kv.Txn) (alreadyExists bool, err error), - readFn func(context.Context, *kv.Txn) error, - updateFn func(context.Context, *kv.Txn) error, + txn isql.Txn, + insertFn func(context.Context, isql.Txn) (alreadyExists bool, err error), + readFn func(context.Context, isql.Txn) error, + updateFn func(context.Context, isql.Txn) error, ) error { alreadyExists, err := insertFn(ctx, txn) if err != nil { @@ -298,7 +298,7 @@ func (s *PersistedSQLStats) getTimeNow() time.Time { func (s *PersistedSQLStats) insertTransactionStats( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, aggregatedTs time.Time, serializedFingerprintID []byte, stats *roachpb.CollectedTransactionStatistics, @@ -326,10 +326,10 @@ DO NOTHING statistics := tree.NewDJSON(statisticsJSON) nodeID := s.GetEnabledSQLInstanceID() - rowsAffected, err = s.cfg.InternalExecutor.ExecEx( + rowsAffected, err = txn.ExecEx( ctx, "insert-txn-stats", - txn, /* txn */ + txn.KV(), sessiondata.NodeUserSessionDataOverride, insertStmt, aggregatedTs, // aggregated_ts @@ -345,7 +345,7 @@ DO NOTHING } func (s *PersistedSQLStats) updateTransactionStats( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, aggregatedTs time.Time, serializedFingerprintID []byte, stats *roachpb.CollectedTransactionStatistics, @@ -366,10 +366,10 @@ WHERE fingerprint_id = $2 statistics := tree.NewDJSON(statisticsJSON) nodeID := s.GetEnabledSQLInstanceID() - rowsAffected, err := s.cfg.InternalExecutor.ExecEx( + rowsAffected, err := txn.ExecEx( ctx, "update-stmt-stats", - txn, /* txn */ + txn.KV(), /* txn */ sessiondata.NodeUserSessionDataOverride, updateStmt, statistics, // statistics @@ -393,7 +393,7 @@ WHERE fingerprint_id = $2 func (s *PersistedSQLStats) updateStatementStats( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, aggregatedTs time.Time, serializedFingerprintID []byte, serializedTransactionFingerprintID []byte, @@ -424,10 +424,10 @@ WHERE fingerprint_id = $3 } nodeID := s.GetEnabledSQLInstanceID() - rowsAffected, err := s.cfg.InternalExecutor.ExecEx( + rowsAffected, err := txn.ExecEx( ctx, "update-stmt-stats", - txn, /* txn */ + txn.KV(), /* txn */ sessiondata.NodeUserSessionDataOverride, updateStmt, statistics, // statistics @@ -461,7 +461,7 @@ WHERE fingerprint_id = $3 func (s *PersistedSQLStats) insertStatementStats( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, aggregatedTs time.Time, serializedFingerprintID []byte, serializedTransactionFingerprintID []byte, @@ -518,10 +518,10 @@ ON CONFLICT (crdb_internal_aggregated_ts_app_name_fingerprint_id_node_id_plan_ha aggregated_ts, fingerprint_id, transaction_fingerprint_id, app_name, plan_hash, node_id) DO NOTHING `, values) - rowsAffected, err = s.cfg.InternalExecutor.ExecEx( + rowsAffected, err = txn.ExecEx( ctx, "insert-stmt-stats", - txn, /* txn */ + txn.KV(), /* txn */ sessiondata.NodeUserSessionDataOverride, insertStmt, args..., @@ -532,7 +532,7 @@ DO NOTHING func (s *PersistedSQLStats) fetchPersistedTransactionStats( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, aggregatedTs time.Time, serializedFingerprintID []byte, appName string, @@ -553,10 +553,10 @@ FOR UPDATE ` nodeID := s.GetEnabledSQLInstanceID() - row, err := s.cfg.InternalExecutor.QueryRowEx( + row, err := txn.QueryRowEx( ctx, "fetch-txn-stats", - txn, /* txn */ + txn.KV(), /* txn */ sessiondata.NodeUserSessionDataOverride, readStmt, // stmt serializedFingerprintID, // fingerprint_id @@ -587,7 +587,7 @@ FOR UPDATE func (s *PersistedSQLStats) fetchPersistedStatementStats( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, aggregatedTs time.Time, serializedFingerprintID []byte, serializedTransactionFingerprintID []byte, @@ -609,10 +609,10 @@ WHERE fingerprint_id = $1 FOR UPDATE ` nodeID := s.GetEnabledSQLInstanceID() - row, err := s.cfg.InternalExecutor.QueryRowEx( + row, err := txn.QueryRowEx( ctx, "fetch-stmt-stats", - txn, /* txn */ + txn.KV(), /* txn */ sessiondata.NodeUserSessionDataOverride, readStmt, // stmt serializedFingerprintID, // fingerprint_id diff --git a/pkg/sql/sqlstats/persistedsqlstats/provider.go b/pkg/sql/sqlstats/persistedsqlstats/provider.go index 4bf1483f1da5..f6581e68eac1 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/provider.go +++ b/pkg/sql/sqlstats/persistedsqlstats/provider.go @@ -21,12 +21,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/jobs" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/sslocal" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/cockroach/pkg/util/mon" @@ -37,9 +36,8 @@ import ( // Config is a configuration struct for the persisted SQL stats subsystem. type Config struct { Settings *cluster.Settings - InternalExecutor sqlutil.InternalExecutor InternalExecutorMonitor *mon.BytesMonitor - KvDB *kv.DB + DB isql.DB SQLIDContainer *base.SQLIDContainer JobRegistry *jobs.Registry @@ -88,8 +86,7 @@ func New(cfg *Config, memSQLStats *sslocal.SQLStats) *PersistedSQLStats { p.jobMonitor = jobMonitor{ st: cfg.Settings, - ie: cfg.InternalExecutor, - db: cfg.KvDB, + db: cfg.DB, scanInterval: defaultScanInterval, jitterFn: p.jitterInterval, } @@ -111,7 +108,7 @@ func (s *PersistedSQLStats) Start(ctx context.Context, stopper *stop.Stopper) { // GetController returns the controller of the PersistedSQLStats. func (s *PersistedSQLStats) GetController(server serverpb.SQLStatusServer) *Controller { - return NewController(s, server, s.cfg.KvDB, s.cfg.InternalExecutor) + return NewController(s, server, s.cfg.DB) } func (s *PersistedSQLStats) startSQLStatsFlushLoop(ctx context.Context, stopper *stop.Stopper) { diff --git a/pkg/sql/sqlstats/persistedsqlstats/scheduled_job_monitor.go b/pkg/sql/sqlstats/persistedsqlstats/scheduled_job_monitor.go index 0520a9913b7d..5bf0c898a048 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/scheduled_job_monitor.go +++ b/pkg/sql/sqlstats/persistedsqlstats/scheduled_job_monitor.go @@ -15,12 +15,11 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/jobs" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/retry" "github.com/cockroachdb/cockroach/pkg/util/stop" @@ -60,8 +59,7 @@ var longIntervalWarningThreshold = time.Hour * 24 // periodically every scanInterval (subject to jittering). type jobMonitor struct { st *cluster.Settings - ie sqlutil.InternalExecutor - db *kv.DB + db isql.DB scanInterval time.Duration jitterFn func(time.Duration) time.Duration testingKnobs struct { @@ -99,7 +97,7 @@ func (j *jobMonitor) start(ctx context.Context, stopper *stop.Stopper) { return } if SQLStatsCleanupRecurrence.Get(&j.st.SV) != currentRecurrence || nextJobScheduleCheck.Before(timeutil.Now()) { - j.updateSchedule(stopCtx, j.ie) + j.updateSchedule(stopCtx) nextJobScheduleCheck = timeutil.Now().Add(j.jitterFn(j.scanInterval)) currentRecurrence = SQLStatsCleanupRecurrence.Get(&j.st.SV) } @@ -110,12 +108,12 @@ func (j *jobMonitor) start(ctx context.Context, stopper *stop.Stopper) { } func (j *jobMonitor) getSchedule( - ctx context.Context, txn *kv.Txn, + ctx context.Context, txn isql.Txn, ) (sj *jobs.ScheduledJob, _ error) { - row, err := j.ie.QueryRowEx( + row, err := txn.QueryRowEx( ctx, "load-sql-stats-scheduled-job", - txn, + txn.KV(), sessiondata.NodeUserSessionDataOverride, "SELECT schedule_id FROM system.scheduled_jobs WHERE schedule_name = $1", compactionScheduleName, @@ -130,7 +128,7 @@ func (j *jobMonitor) getSchedule( scheduledJobID := int64(tree.MustBeDInt(row[0])) - sj, err = jobs.LoadScheduledJob(ctx, scheduledjobs.ProdJobSchedulerEnv, scheduledJobID, j.ie, txn) + sj, err = jobs.ScheduledJobTxn(txn).Load(ctx, scheduledjobs.ProdJobSchedulerEnv, scheduledJobID) if err != nil { return nil, err } @@ -138,7 +136,7 @@ func (j *jobMonitor) getSchedule( return sj, nil } -func (j *jobMonitor) updateSchedule(ctx context.Context, ie sqlutil.InternalExecutor) { +func (j *jobMonitor) updateSchedule(ctx context.Context) { var sj *jobs.ScheduledJob var err error retryOptions := retry.Options{ @@ -146,15 +144,16 @@ func (j *jobMonitor) updateSchedule(ctx context.Context, ie sqlutil.InternalExec MaxBackoff: 10 * time.Minute, } for r := retry.StartWithCtx(ctx, retryOptions); r.Next(); { - if err = j.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err = j.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { // We check if we can get load the schedule, if the schedule cannot be // loaded because it's not found, we recreate the schedule. sj, err = j.getSchedule(ctx, txn) + if err != nil { if !jobs.HasScheduledJobNotFoundError(err) && !errors.Is(err, errScheduleNotFound) { return err } - sj, err = CreateSQLStatsCompactionScheduleIfNotYetExist(ctx, j.ie, txn, j.st) + sj, err = CreateSQLStatsCompactionScheduleIfNotYetExist(ctx, txn, j.st) if err != nil { return err } @@ -168,7 +167,7 @@ func (j *jobMonitor) updateSchedule(ctx context.Context, ie sqlutil.InternalExec return err } sj.SetScheduleStatus(string(jobs.StatusPending)) - return sj.Update(ctx, ie, txn) + return jobs.ScheduledJobTxn(txn).Update(ctx, sj) }); err != nil && ctx.Err() == nil { log.Errorf(ctx, "failed to update stats scheduled compaction job: %s", err) } else { diff --git a/pkg/sql/sqlstats/persistedsqlstats/scheduled_sql_stats_compaction_test.go b/pkg/sql/sqlstats/persistedsqlstats/scheduled_sql_stats_compaction_test.go index 10a2a1e2e8a3..e8ceda693685 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/scheduled_sql_stats_compaction_test.go +++ b/pkg/sql/sqlstats/persistedsqlstats/scheduled_sql_stats_compaction_test.go @@ -22,10 +22,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobstest" "github.com/cockroachdb/cockroach/pkg/scheduledjobs" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/persistedsqlstats" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -112,14 +112,8 @@ func getSQLStatsCompactionSchedule(t *testing.T, helper *testHelper) *jobs.Sched helper.sqlDB. QueryRow(t, `SELECT schedule_id FROM system.scheduled_jobs WHERE schedule_name = 'sql-stats-compaction'`). Scan(&jobID) - sj, err := - jobs.LoadScheduledJob( - context.Background(), - helper.env, - jobID, - helper.server.InternalExecutor().(sqlutil.InternalExecutor), - nil, /* txn */ - ) + schedules := jobs.ScheduledJobDB(helper.server.InternalDB().(isql.DB)) + sj, err := schedules.Load(context.Background(), helper.env, jobID) require.NoError(t, err) require.NotNil(t, sj) return sj diff --git a/pkg/sql/sqlstats/persistedsqlstats/stmt_reader.go b/pkg/sql/sqlstats/persistedsqlstats/stmt_reader.go index b4a662fc601a..2f4ba4dcb7e3 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/stmt_reader.go +++ b/pkg/sql/sqlstats/persistedsqlstats/stmt_reader.go @@ -18,11 +18,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/errors" ) @@ -44,7 +44,7 @@ func (s *PersistedSQLStats) IterateStatementStats( aggInterval := s.GetAggregationInterval() memIter := newMemStmtStatsIterator(s.SQLStats, options, curAggTs, aggInterval) - var persistedIter sqlutil.InternalRows + var persistedIter isql.Rows var colCnt int persistedIter, colCnt, err = s.persistedStmtStatsIter(ctx, options) if err != nil { @@ -81,10 +81,10 @@ func (s *PersistedSQLStats) IterateStatementStats( func (s *PersistedSQLStats) persistedStmtStatsIter( ctx context.Context, options *sqlstats.IteratorOptions, -) (iter sqlutil.InternalRows, expectedColCnt int, err error) { +) (iter isql.Rows, expectedColCnt int, err error) { query, expectedColCnt := s.getFetchQueryForStmtStatsTable(ctx, options) - persistedIter, err := s.cfg.InternalExecutor.QueryIteratorEx( + persistedIter, err := s.cfg.DB.Executor().QueryIteratorEx( ctx, "read-stmt-stats", nil, /* txn */ diff --git a/pkg/sql/sqlstats/persistedsqlstats/txn_reader.go b/pkg/sql/sqlstats/persistedsqlstats/txn_reader.go index 875ad1b16d7d..517c509fe021 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/txn_reader.go +++ b/pkg/sql/sqlstats/persistedsqlstats/txn_reader.go @@ -17,11 +17,11 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/persistedsqlstats/sqlstatsutil" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/errors" ) @@ -42,7 +42,7 @@ func (s *PersistedSQLStats) IterateTransactionStats( aggInterval := s.GetAggregationInterval() memIter := newMemTxnStatsIterator(s.SQLStats, options, curAggTs, aggInterval) - var persistedIter sqlutil.InternalRows + var persistedIter isql.Rows var colCnt int persistedIter, colCnt, err = s.persistedTxnStatsIter(ctx, options) if err != nil { @@ -79,22 +79,20 @@ func (s *PersistedSQLStats) IterateTransactionStats( func (s *PersistedSQLStats) persistedTxnStatsIter( ctx context.Context, options *sqlstats.IteratorOptions, -) (iter sqlutil.InternalRows, expectedColCnt int, err error) { +) (iter isql.Rows, expectedColCnt int, err error) { query, expectedColCnt := s.getFetchQueryForTxnStatsTable(options) - - persistedIter, err := s.cfg.InternalExecutor.QueryIteratorEx( + exec := s.cfg.DB.Executor() + if iter, err = exec.QueryIteratorEx( ctx, "read-txn-stats", nil, /* txn */ sessiondata.NodeUserSessionDataOverride, query, - ) - - if err != nil { + ); err != nil { return nil /* iter */, 0 /* expectedColCnt */, err } - return persistedIter, expectedColCnt, err + return iter, expectedColCnt, err } func (s *PersistedSQLStats) getFetchQueryForTxnStatsTable( diff --git a/pkg/sql/sqlstats/sslocal/BUILD.bazel b/pkg/sql/sqlstats/sslocal/BUILD.bazel index cd2a765c269e..8d87eed24555 100644 --- a/pkg/sql/sqlstats/sslocal/BUILD.bazel +++ b/pkg/sql/sqlstats/sslocal/BUILD.bazel @@ -16,7 +16,6 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/sslocal", visibility = ["//visibility:public"], deps = [ - "//pkg/kv", "//pkg/roachpb", "//pkg/server/serverpb", "//pkg/settings", @@ -25,7 +24,6 @@ go_library( "//pkg/sql/sqlstats", "//pkg/sql/sqlstats/insights", "//pkg/sql/sqlstats/ssmemstorage", - "//pkg/sql/sqlutil", "//pkg/util/log", "//pkg/util/metric", "//pkg/util/mon", diff --git a/pkg/sql/sqlstats/sslocal/sslocal_provider.go b/pkg/sql/sqlstats/sslocal/sslocal_provider.go index bf7d7e439a08..b508533ada09 100644 --- a/pkg/sql/sqlstats/sslocal/sslocal_provider.go +++ b/pkg/sql/sqlstats/sslocal/sslocal_provider.go @@ -15,14 +15,12 @@ import ( "sort" "time" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/insights" "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/ssmemstorage" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/cockroach/pkg/util/mon" @@ -52,9 +50,7 @@ var _ sqlstats.Provider = &SQLStats{} // GetController returns a sqlstats.Controller responsible for the current // SQLStats. -func (s *SQLStats) GetController( - server serverpb.SQLStatusServer, db *kv.DB, ie sqlutil.InternalExecutor, -) *Controller { +func (s *SQLStats) GetController(server serverpb.SQLStatusServer) *Controller { return NewController(s, server) } diff --git a/pkg/sql/stats/BUILD.bazel b/pkg/sql/stats/BUILD.bazel index 95336e7ca4f4..a2eb553e2162 100644 --- a/pkg/sql/stats/BUILD.bazel +++ b/pkg/sql/stats/BUILD.bazel @@ -26,7 +26,6 @@ go_library( "//pkg/clusterversion", "//pkg/jobs/jobspb", "//pkg/keys", - "//pkg/kv", "//pkg/kv/kvclient/rangefeed", "//pkg/roachpb", "//pkg/settings", @@ -36,6 +35,7 @@ go_library( "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/descs", + "//pkg/sql/isql", "//pkg/sql/memsize", "//pkg/sql/opt/cat", "//pkg/sql/parser", @@ -46,7 +46,6 @@ go_library( "//pkg/sql/sem/eval", "//pkg/sql/sem/tree", "//pkg/sql/sqlerrors", - "//pkg/sql/sqlutil", "//pkg/sql/types", "//pkg/util/cache", "//pkg/util/encoding", @@ -107,13 +106,13 @@ go_test( "//pkg/sql/catalog/desctestutils", "//pkg/sql/catalog/tabledesc", "//pkg/sql/execinfra", + "//pkg/sql/isql", "//pkg/sql/opt/cat", "//pkg/sql/rowenc", "//pkg/sql/rowexec", "//pkg/sql/sem/catid", "//pkg/sql/sem/eval", "//pkg/sql/sem/tree", - "//pkg/sql/sqlutil", "//pkg/sql/types", "//pkg/testutils", "//pkg/testutils/jobutils", diff --git a/pkg/sql/stats/automatic_stats.go b/pkg/sql/stats/automatic_stats.go index 8766afb46a53..43056b5c32fd 100644 --- a/pkg/sql/stats/automatic_stats.go +++ b/pkg/sql/stats/automatic_stats.go @@ -19,17 +19,16 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/stop" @@ -211,7 +210,7 @@ const ( type Refresher struct { log.AmbientContext st *cluster.Settings - ex sqlutil.InternalExecutor + ex isql.Executor cache *TableStatisticsCache randGen autoStatsRand @@ -267,7 +266,7 @@ type settingOverride struct { func MakeRefresher( ambientCtx log.AmbientContext, st *cluster.Settings, - ex sqlutil.InternalExecutor, + ex isql.Executor, cache *TableStatisticsCache, asOfTime time.Duration, ) *Refresher { @@ -351,10 +350,10 @@ func (r *Refresher) autoStatsFractionStaleRows(explicitSettings *catpb.AutoStats func (r *Refresher) getTableDescriptor( ctx context.Context, tableID descpb.ID, ) (desc catalog.TableDescriptor) { - if err := r.cache.internalExecutorFactory.DescsTxn(ctx, r.cache.ClientDB, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + if err := r.cache.db.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) (err error) { - if desc, err = descriptors.ByIDWithLeased(txn).WithoutNonPublic().Get().Table(ctx, tableID); err != nil { + if desc, err = txn.Descriptors().ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Table(ctx, tableID); err != nil { err = errors.Wrapf(err, "failed to get table descriptor for automatic stats on table id: %d", tableID) } diff --git a/pkg/sql/stats/automatic_stats_test.go b/pkg/sql/stats/automatic_stats_test.go index 2ae0f1eebb35..1c06aced3a9b 100644 --- a/pkg/sql/stats/automatic_stats_test.go +++ b/pkg/sql/stats/automatic_stats_test.go @@ -30,9 +30,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -48,7 +48,7 @@ func TestMaybeRefreshStats(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) + s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer sqlDB.Close() defer s.Stopper().Stop(ctx) @@ -66,14 +66,12 @@ func TestMaybeRefreshStats(t *testing.T) { INSERT INTO t.a VALUES (1); CREATE VIEW t.vw AS SELECT k, k+1 FROM t.a;`) - executor := s.InternalExecutor().(sqlutil.InternalExecutor) + executor := s.InternalExecutor().(isql.Executor) descA := desctestutils.TestingGetPublicTableDescriptor(s.DB(), keys.SystemSQLCodec, "t", "a") cache := NewTableStatisticsCache( 10, /* cacheSize */ - kvDB, - executor, s.ClusterSettings(), - s.InternalExecutorFactory().(descs.TxnManager), + s.InternalDB().(descs.DB), ) require.NoError(t, cache.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory))) refresher := MakeRefresher(s.AmbientCtx(), st, executor, cache, time.Microsecond /* asOfTime */) @@ -181,7 +179,7 @@ func TestEnsureAllTablesQueries(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) + s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer sqlDB.Close() defer s.Stopper().Stop(ctx) @@ -194,13 +192,11 @@ func TestEnsureAllTablesQueries(t *testing.T) { sqlRun.Exec(t, `CREATE TABLE t.b (k INT PRIMARY KEY);`) - executor := s.InternalExecutor().(sqlutil.InternalExecutor) + executor := s.InternalExecutor().(isql.Executor) cache := NewTableStatisticsCache( 10, /* cacheSize */ - kvDB, - executor, s.ClusterSettings(), - s.InternalExecutorFactory().(descs.TxnManager), + s.InternalDB().(descs.DB), ) require.NoError(t, cache.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory))) r := MakeRefresher(s.AmbientCtx(), st, executor, cache, time.Microsecond /* asOfTime */) @@ -283,7 +279,7 @@ func TestAverageRefreshTime(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) + s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer sqlDB.Close() defer s.Stopper().Stop(ctx) @@ -299,14 +295,12 @@ func TestAverageRefreshTime(t *testing.T) { CREATE TABLE t.a (k INT PRIMARY KEY); INSERT INTO t.a VALUES (1);`) - executor := s.InternalExecutor().(sqlutil.InternalExecutor) + executor := s.InternalExecutor().(isql.Executor) table := desctestutils.TestingGetPublicTableDescriptor(s.DB(), keys.SystemSQLCodec, "t", "a") cache := NewTableStatisticsCache( 10, /* cacheSize */ - kvDB, - executor, s.ClusterSettings(), - s.InternalExecutorFactory().(descs.TxnManager), + s.InternalDB().(descs.DB), ) require.NoError(t, cache.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory))) refresher := MakeRefresher(s.AmbientCtx(), st, executor, cache, time.Microsecond /* asOfTime */) @@ -529,7 +523,7 @@ func TestAutoStatsReadOnlyTables(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) + s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer sqlDB.Close() defer s.Stopper().Stop(ctx) @@ -548,13 +542,11 @@ func TestAutoStatsReadOnlyTables(t *testing.T) { `CREATE SCHEMA my_schema; CREATE TABLE my_schema.b (j INT PRIMARY KEY);`) - executor := s.InternalExecutor().(sqlutil.InternalExecutor) + executor := s.InternalExecutor().(isql.Executor) cache := NewTableStatisticsCache( 10, /* cacheSize */ - kvDB, - executor, s.ClusterSettings(), - s.InternalExecutorFactory().(descs.TxnManager), + s.InternalDB().(descs.DB), ) require.NoError(t, cache.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory))) refresher := MakeRefresher(s.AmbientCtx(), st, executor, cache, time.Microsecond /* asOfTime */) @@ -587,7 +579,7 @@ func TestAutoStatsOnStartupClusterSettingOff(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) + s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer sqlDB.Close() defer s.Stopper().Stop(ctx) @@ -605,13 +597,11 @@ func TestAutoStatsOnStartupClusterSettingOff(t *testing.T) { ALTER TABLE t.b SET (sql_stats_automatic_collection_enabled = false); CREATE TABLE t.c (k INT PRIMARY KEY);`) - executor := s.InternalExecutor().(sqlutil.InternalExecutor) + executor := s.InternalExecutor().(isql.Executor) cache := NewTableStatisticsCache( 10, /* cacheSize */ - kvDB, - executor, s.ClusterSettings(), - s.InternalExecutorFactory().(descs.TxnManager), + s.InternalDB().(descs.DB), ) require.NoError(t, cache.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory))) refresher := MakeRefresher(s.AmbientCtx(), st, executor, cache, time.Microsecond /* asOfTime */) @@ -646,20 +636,18 @@ func TestNoRetryOnFailure(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) st := cluster.MakeTestingClusterSettings() evalCtx := eval.NewTestingEvalContext(st) defer evalCtx.Stop(ctx) - executor := s.InternalExecutor().(sqlutil.InternalExecutor) + executor := s.InternalExecutor().(isql.Executor) cache := NewTableStatisticsCache( 10, /* cacheSize */ - kvDB, - executor, s.ClusterSettings(), - s.InternalExecutorFactory().(descs.TxnManager), + s.InternalDB().(descs.DB), ) require.NoError(t, cache.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory))) r := MakeRefresher(s.AmbientCtx(), st, executor, cache, time.Microsecond /* asOfTime */) @@ -763,20 +751,18 @@ func TestAnalyzeSystemTables(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) + s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) st := cluster.MakeTestingClusterSettings() AutomaticStatisticsClusterMode.Override(ctx, &st.SV, false) evalCtx := eval.NewTestingEvalContext(st) defer evalCtx.Stop(ctx) - executor := s.InternalExecutor().(sqlutil.InternalExecutor) + executor := s.InternalExecutor().(isql.Executor) cache := NewTableStatisticsCache( 10, /* cacheSize */ - kvDB, - executor, s.ClusterSettings(), - s.InternalExecutorFactory().(descs.TxnManager), + s.InternalDB().(descs.DB), ) require.NoError(t, cache.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory))) var tableNames []string diff --git a/pkg/sql/stats/delete_stats.go b/pkg/sql/stats/delete_stats.go index 02e524c3e941..4081e8e934d7 100644 --- a/pkg/sql/stats/delete_stats.go +++ b/pkg/sql/stats/delete_stats.go @@ -17,11 +17,10 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/types" ) @@ -52,11 +51,7 @@ var TableStatisticsRetentionPeriod = settings.RegisterDurationSetting( // DeleteOldStatsForColumns keeps the most recent keepCount automatic // statistics and deletes all the others. func DeleteOldStatsForColumns( - ctx context.Context, - executor sqlutil.InternalExecutor, - txn *kv.Txn, - tableID descpb.ID, - columnIDs []descpb.ColumnID, + ctx context.Context, txn isql.Txn, tableID descpb.ID, columnIDs []descpb.ColumnID, ) error { columnIDsVal := tree.NewDArray(types.Int) for _, c := range columnIDs { @@ -68,8 +63,8 @@ func DeleteOldStatsForColumns( // This will delete all old statistics for the given table and columns, // including stats created manually (except for a few automatic statistics, // which are identified by the name AutoStatsName). - _, err := executor.Exec( - ctx, "delete-statistics", txn, + _, err := txn.Exec( + ctx, "delete-statistics", txn.KV(), `DELETE FROM system.table_statistics WHERE "tableID" = $1 AND "columnIDs" = $3 @@ -94,8 +89,7 @@ func DeleteOldStatsForColumns( // IDs that are older than keepTime. func DeleteOldStatsForOtherColumns( ctx context.Context, - executor sqlutil.InternalExecutor, - txn *kv.Txn, + txn isql.Txn, tableID descpb.ID, columnIDs [][]descpb.ColumnID, keepTime time.Duration, @@ -120,8 +114,8 @@ func DeleteOldStatsForOtherColumns( // This will delete all statistics for the given table that are not // on the given columns and are older than keepTime. - _, err := executor.Exec( - ctx, "delete-statistics", txn, + _, err := txn.Exec( + ctx, "delete-statistics", txn.KV(), fmt.Sprintf(`DELETE FROM system.table_statistics WHERE "tableID" = $1 AND "columnIDs"::string NOT IN (%s) diff --git a/pkg/sql/stats/delete_stats_test.go b/pkg/sql/stats/delete_stats_test.go index 2925ad405df1..bd6d864e6f46 100644 --- a/pkg/sql/stats/delete_stats_test.go +++ b/pkg/sql/stats/delete_stats_test.go @@ -20,11 +20,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangefeed" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -38,15 +37,13 @@ func TestDeleteOldStatsForColumns(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - s, _, db := serverutils.StartServer(t, base.TestServerArgs{}) + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) - ex := s.InternalExecutor().(sqlutil.InternalExecutor) + db := s.InternalDB().(descs.DB) cache := NewTableStatisticsCache( 10, /* cacheSize */ - db, - ex, s.ClusterSettings(), - s.InternalExecutorFactory().(descs.TxnManager), + db, ) require.NoError(t, cache.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory))) @@ -244,7 +241,7 @@ func TestDeleteOldStatsForColumns(t *testing.T) { for i := range testData { stat := &testData[i] - if err := insertTableStat(ctx, db, ex, stat); err != nil { + if err := insertTableStat(ctx, db.Executor(), stat); err != nil { t.Fatal(err) } } @@ -255,8 +252,8 @@ func TestDeleteOldStatsForColumns(t *testing.T) { checkDelete := func( tableID descpb.ID, columnIDs []descpb.ColumnID, expectDeleted map[uint64]struct{}, ) error { - if err := s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return DeleteOldStatsForColumns(ctx, ex, txn, tableID, columnIDs) + if err := s.InternalDB().(isql.DB).Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return DeleteOldStatsForColumns(ctx, txn, tableID, columnIDs) }); err != nil { return err } @@ -335,15 +332,13 @@ func TestDeleteOldStatsForOtherColumns(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - s, _, db := serverutils.StartServer(t, base.TestServerArgs{}) + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) - ex := s.InternalExecutor().(sqlutil.InternalExecutor) + db := s.InternalDB().(isql.DB) cache := NewTableStatisticsCache( 10, /* cacheSize */ - db, - ex, s.ClusterSettings(), - s.InternalExecutorFactory().(descs.TxnManager), + s.InternalDB().(descs.DB), ) require.NoError(t, cache.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory))) testData := []TableStatisticProto{ @@ -538,7 +533,7 @@ func TestDeleteOldStatsForOtherColumns(t *testing.T) { for i := range testData { stat := &testData[i] - if err := insertTableStat(ctx, db, ex, stat); err != nil { + if err := insertTableStat(ctx, db.Executor(), stat); err != nil { t.Fatal(err) } } @@ -549,8 +544,8 @@ func TestDeleteOldStatsForOtherColumns(t *testing.T) { checkDelete := func( tableID descpb.ID, columnIDs [][]descpb.ColumnID, expectDeleted map[uint64]struct{}, ) error { - if err := s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - return DeleteOldStatsForOtherColumns(ctx, ex, txn, tableID, columnIDs, defaultKeepTime) + if err := db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + return DeleteOldStatsForOtherColumns(ctx, txn, tableID, columnIDs, defaultKeepTime) }); err != nil { return err } diff --git a/pkg/sql/stats/new_stat.go b/pkg/sql/stats/new_stat.go index b0dbe5ada7dd..adc4aa21cfba 100644 --- a/pkg/sql/stats/new_stat.go +++ b/pkg/sql/stats/new_stat.go @@ -14,11 +14,10 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" @@ -27,18 +26,13 @@ import ( // InsertNewStats inserts a slice of statistics at the current time into the // system table. func InsertNewStats( - ctx context.Context, - settings *cluster.Settings, - executor sqlutil.InternalExecutor, - txn *kv.Txn, - tableStats []*TableStatisticProto, + ctx context.Context, settings *cluster.Settings, txn isql.Txn, tableStats []*TableStatisticProto, ) error { var err error for _, statistic := range tableStats { err = InsertNewStat( ctx, settings, - executor, txn, statistic.TableID, statistic.Name, @@ -65,8 +59,7 @@ func InsertNewStats( func InsertNewStat( ctx context.Context, settings *cluster.Settings, - executor sqlutil.InternalExecutor, - txn *kv.Txn, + txn isql.Txn, tableID descpb.ID, name string, columnIDs []descpb.ColumnID, @@ -99,8 +92,8 @@ func InsertNewStat( if partialPredicate != "" { return errors.New("unable to insert new partial statistic as cluster version is from before V23.1.") } - _, err := executor.Exec( - ctx, "insert-statistic", txn, + _, err := txn.Exec( + ctx, "insert-statistic", txn.KV(), `INSERT INTO system.table_statistics ( "tableID", "name", @@ -130,8 +123,8 @@ func InsertNewStat( predicateValue = partialPredicate } - _, err := executor.Exec( - ctx, "insert-statistic", txn, + _, err := txn.Exec( + ctx, "insert-statistic", txn.KV(), `INSERT INTO system.table_statistics ( "tableID", "name", diff --git a/pkg/sql/stats/stats_cache.go b/pkg/sql/stats/stats_cache.go index a031d8f747b8..4c7cd8ea6224 100644 --- a/pkg/sql/stats/stats_cache.go +++ b/pkg/sql/stats/stats_cache.go @@ -18,7 +18,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangefeed" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -30,7 +29,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/cache" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -70,11 +68,8 @@ type TableStatisticsCache struct { // from the system table. numInternalQueries int64 } - ClientDB *kv.DB - SQLExecutor sqlutil.InternalExecutor - Settings *cluster.Settings - - internalExecutorFactory descs.TxnManager + db descs.DB + settings *cluster.Settings // Used when decoding KV from the range feed. datumAlloc tree.DatumAlloc @@ -118,17 +113,11 @@ type cacheEntry struct { // NewTableStatisticsCache creates a new TableStatisticsCache that can hold // statistics for tables. func NewTableStatisticsCache( - cacheSize int, - db *kv.DB, - sqlExecutor sqlutil.InternalExecutor, - settings *cluster.Settings, - ief descs.TxnManager, + cacheSize int, settings *cluster.Settings, db descs.DB, ) *TableStatisticsCache { tableStatsCache := &TableStatisticsCache{ - ClientDB: db, - SQLExecutor: sqlExecutor, - Settings: settings, - internalExecutorFactory: ief, + db: db, + settings: settings, } tableStatsCache.mu.cache = cache.NewUnorderedCache(cache.Config{ Policy: cache.CacheLRU, @@ -179,7 +168,7 @@ func (sc *TableStatisticsCache) Start( ctx, "table-stats-cache", []roachpb.Span{statsTableSpan}, - sc.ClientDB.Clock().Now(), + sc.db.KV().Clock().Now(), handleEvent, rangefeed.WithSystemTablePriority(), ) @@ -222,10 +211,10 @@ func decodeTableStatisticsKV( func (sc *TableStatisticsCache) GetTableStats( ctx context.Context, table catalog.TableDescriptor, ) ([]*TableStatistic, error) { - if !statsUsageAllowed(table, sc.Settings) { + if !statsUsageAllowed(table, sc.settings) { return nil, nil } - forecast := forecastAllowed(table, sc.Settings) + forecast := forecastAllowed(table, sc.settings) return sc.getTableStatsFromCache(ctx, table.GetID(), &forecast) } @@ -646,10 +635,10 @@ func (sc *TableStatisticsCache) parseStats( // TypeDescriptor's with the timestamp that the stats were recorded with. // // TODO(ajwerner): We now do delete members from enum types. See #67050. - if err := sc.internalExecutorFactory.DescsTxn(ctx, sc.ClientDB, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + if err := sc.db.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { - resolver := descs.NewDistSQLTypeResolver(descriptors, txn) + resolver := descs.NewDistSQLTypeResolver(txn.Descriptors(), txn.KV()) var err error res.HistogramData.ColumnType, err = resolver.ResolveTypeByOID(ctx, typ.Oid()) return err @@ -751,7 +740,7 @@ func (tabStat *TableStatistic) String() string { func (sc *TableStatisticsCache) getTableStatsFromDB( ctx context.Context, tableID descpb.ID, forecast bool, ) ([]*TableStatistic, error) { - partialStatisticsColumnsVerActive := sc.Settings.Version.IsActive(ctx, clusterversion.V23_1AddPartialStatisticsColumns) + partialStatisticsColumnsVerActive := sc.settings.Version.IsActive(ctx, clusterversion.V23_1AddPartialStatisticsColumns) var partialPredicateCol string var fullStatisticIDCol string if partialStatisticsColumnsVerActive { @@ -782,7 +771,7 @@ ORDER BY "createdAt" DESC, "columnIDs" DESC, "statisticID" DESC // TODO(michae2): Add an index on system.table_statistics (tableID, createdAt, // columnIDs, statisticID). - it, err := sc.SQLExecutor.QueryIterator( + it, err := sc.db.Executor().QueryIterator( ctx, "get-table-statistics", nil /* txn */, getTableStatisticsStmt, tableID, ) if err != nil { diff --git a/pkg/sql/stats/stats_cache_test.go b/pkg/sql/stats/stats_cache_test.go index ddc0577b87a0..4b867de446ab 100644 --- a/pkg/sql/stats/stats_cache_test.go +++ b/pkg/sql/stats/stats_cache_test.go @@ -22,13 +22,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangefeed" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -41,9 +40,7 @@ import ( "github.com/stretchr/testify/require" ) -func insertTableStat( - ctx context.Context, db *kv.DB, ex sqlutil.InternalExecutor, stat *TableStatisticProto, -) error { +func insertTableStat(ctx context.Context, ex isql.Executor, stat *TableStatisticProto) error { insertStatStmt := ` INSERT INTO system.table_statistics ("tableID", "statisticID", name, "columnIDs", "createdAt", "rowCount", "distinctCount", "nullCount", "avgSize", histogram) @@ -144,7 +141,7 @@ func checkStats(actual []*TableStatistic, expected []*TableStatisticProto) bool } func initTestData( - ctx context.Context, db *kv.DB, ex sqlutil.InternalExecutor, + ctx context.Context, ex isql.Executor, ) (map[descpb.ID][]*TableStatisticProto, error) { // The expected stats must be ordered by TableID+, CreatedAt- so they can // later be compared with the returned stats using reflect.DeepEqual. @@ -202,7 +199,7 @@ func initTestData( for i := range expStatsList { stat := &expStatsList[i] - if err := insertTableStat(ctx, db, ex, stat); err != nil { + if err := insertTableStat(ctx, ex, stat); err != nil { return nil, err } @@ -220,11 +217,10 @@ func TestCacheBasic(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - s, _, db := serverutils.StartServer(t, base.TestServerArgs{}) + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) - ex := s.InternalExecutor().(sqlutil.InternalExecutor) - - expectedStats, err := initTestData(ctx, db, ex) + db := s.InternalDB().(descs.DB) + expectedStats, err := initTestData(ctx, db.Executor()) if err != nil { t.Fatal(err) } @@ -240,13 +236,7 @@ func TestCacheBasic(t *testing.T) { // Create a cache and iteratively query the cache for each tableID. This // will result in the cache getting populated. When the stats cache size is // exceeded, entries should be evicted according to the LRU policy. - sc := NewTableStatisticsCache( - 2, /* cacheSize */ - db, - ex, - s.ClusterSettings(), - s.InternalExecutorFactory().(descs.TxnManager), - ) + sc := NewTableStatisticsCache(2 /* cacheSize */, s.ClusterSettings(), db) require.NoError(t, sc.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory))) for _, tableID := range tableIDs { checkStatsForTable(ctx, t, sc, expectedStats[tableID], tableID) @@ -284,7 +274,7 @@ func TestCacheBasic(t *testing.T) { DistinctCount: 10, NullCount: 0, } - if err := insertTableStat(ctx, db, ex, &stat); err != nil { + if err := insertTableStat(ctx, db.Executor(), &stat); err != nil { t.Fatal(err) } @@ -344,15 +334,10 @@ func TestCacheUserDefinedTypes(t *testing.T) { sqlRunner.Exec(t, `INSERT INTO tt VALUES ('hello');`) sqlRunner.Exec(t, `CREATE STATISTICS s FROM tt;`) - _ = kvDB + insqlDB := s.InternalDB().(descs.DB) + // Make a stats cache. - sc := NewTableStatisticsCache( - 1, - kvDB, - s.InternalExecutor().(sqlutil.InternalExecutor), - s.ClusterSettings(), - s.InternalExecutorFactory().(descs.TxnManager), - ) + sc := NewTableStatisticsCache(1, s.ClusterSettings(), insqlDB) require.NoError(t, sc.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory))) tbl := desctestutils.TestingGetPublicTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "tt") // Get stats for our table. We are ensuring here that the access to the stats @@ -388,11 +373,11 @@ func TestCacheWait(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - s, _, db := serverutils.StartServer(t, base.TestServerArgs{}) + s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) - ex := s.InternalExecutor().(sqlutil.InternalExecutor) + db := s.InternalDB().(descs.DB) - expectedStats, err := initTestData(ctx, db, ex) + expectedStats, err := initTestData(ctx, db.Executor()) if err != nil { t.Fatal(err) } @@ -404,13 +389,7 @@ func TestCacheWait(t *testing.T) { tableIDs = append(tableIDs, tableID) } sort.Sort(tableIDs) - sc := NewTableStatisticsCache( - len(tableIDs), /* cacheSize */ - db, - ex, - s.ClusterSettings(), - s.InternalExecutorFactory().(descs.TxnManager), - ) + sc := NewTableStatisticsCache(len(tableIDs) /* cacheSize */, s.ClusterSettings(), db) require.NoError(t, sc.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory))) for _, tableID := range tableIDs { checkStatsForTable(ctx, t, sc, expectedStats[tableID], tableID) @@ -457,14 +436,11 @@ func TestCacheAutoRefresh(t *testing.T) { ctx := context.Background() tc := serverutils.StartNewTestCluster(t, 3 /* numNodes */, base.TestClusterArgs{}) defer tc.Stopper().Stop(ctx) - s := tc.Server(0) sc := NewTableStatisticsCache( 10, /* cacheSize */ - s.DB(), - s.InternalExecutor().(sqlutil.InternalExecutor), s.ClusterSettings(), - s.InternalExecutorFactory().(descs.TxnManager), + s.InternalDB().(descs.DB), ) require.NoError(t, sc.Start(ctx, keys.SystemSQLCodec, s.RangeFeedFactory().(*rangefeed.Factory))) diff --git a/pkg/sql/stmtdiagnostics/BUILD.bazel b/pkg/sql/stmtdiagnostics/BUILD.bazel index f9e22c5d21fc..c02afda348ea 100644 --- a/pkg/sql/stmtdiagnostics/BUILD.bazel +++ b/pkg/sql/stmtdiagnostics/BUILD.bazel @@ -8,13 +8,12 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/clusterversion", - "//pkg/kv", "//pkg/multitenant", "//pkg/settings", "//pkg/settings/cluster", + "//pkg/sql/isql", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/sql/types", "//pkg/util/intsets", "//pkg/util/log", diff --git a/pkg/sql/stmtdiagnostics/statement_diagnostics.go b/pkg/sql/stmtdiagnostics/statement_diagnostics.go index 61b150193d63..38eedcfdbb3c 100644 --- a/pkg/sql/stmtdiagnostics/statement_diagnostics.go +++ b/pkg/sql/stmtdiagnostics/statement_diagnostics.go @@ -17,13 +17,12 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/multitenant" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -106,8 +105,7 @@ type Registry struct { rand *rand.Rand } st *cluster.Settings - ie sqlutil.InternalExecutor - db *kv.DB + db isql.DB } // Request describes a statement diagnostics request along with some conditional @@ -136,9 +134,8 @@ func (r *Request) continueCollecting(st *cluster.Settings) bool { } // NewRegistry constructs a new Registry. -func NewRegistry(ie sqlutil.InternalExecutor, db *kv.DB, st *cluster.Settings) *Registry { +func NewRegistry(db isql.DB, st *cluster.Settings) *Registry { r := &Registry{ - ie: ie, db: db, st: st, } @@ -306,9 +303,9 @@ func (r *Registry) insertRequestInternal( var reqID RequestID var expiresAt time.Time - err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + err := r.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { // Check if there's already a pending request for this fingerprint. - row, err := r.ie.QueryRowEx(ctx, "stmt-diag-check-pending", txn, + row, err := txn.QueryRowEx(ctx, "stmt-diag-check-pending", txn.KV(), sessiondata.RootUserSessionDataOverride, `SELECT count(1) FROM system.statement_diagnostics_requests WHERE @@ -354,8 +351,8 @@ func (r *Registry) insertRequestInternal( } stmt := "INSERT INTO system.statement_diagnostics_requests (" + insertColumns + ") VALUES (" + valuesClause + ") RETURNING id;" - row, err = r.ie.QueryRowEx( - ctx, "stmt-diag-insert-request", txn, + row, err = txn.QueryRowEx( + ctx, "stmt-diag-insert-request", txn.KV(), sessiondata.RootUserSessionDataOverride, stmt, qargs..., ) @@ -387,7 +384,7 @@ func (r *Registry) insertRequestInternal( // CancelRequest is part of the server.StmtDiagnosticsRequester interface. func (r *Registry) CancelRequest(ctx context.Context, requestID int64) error { - row, err := r.ie.QueryRowEx(ctx, "stmt-diag-cancel-request", nil, /* txn */ + row, err := r.db.Executor().QueryRowEx(ctx, "stmt-diag-cancel-request", nil, /* txn */ sessiondata.RootUserSessionDataOverride, // Rather than deleting the row from the table, we choose to mark the // request as "expired" by setting `expires_at` into the past. This will @@ -523,9 +520,9 @@ func (r *Registry) InsertStatementDiagnostics( ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) // nolint:context defer cancel() } - err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + err := r.db.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { if requestID != 0 { - row, err := r.ie.QueryRowEx(ctx, "stmt-diag-check-completed", txn, + row, err := txn.QueryRowEx(ctx, "stmt-diag-check-completed", txn.KV(), sessiondata.RootUserSessionDataOverride, "SELECT count(1) FROM system.statement_diagnostics_requests WHERE id = $1 AND completed = false", requestID) @@ -560,8 +557,8 @@ func (r *Registry) InsertStatementDiagnostics( bundle = bundle[len(chunk):] // Insert the chunk into system.statement_bundle_chunks. - row, err := r.ie.QueryRowEx( - ctx, "stmt-bundle-chunks-insert", txn, + row, err := txn.QueryRowEx( + ctx, "stmt-bundle-chunks-insert", txn.KV(), sessiondata.RootUserSessionDataOverride, "INSERT INTO system.statement_bundle_chunks(description, data) VALUES ($1, $2) RETURNING id", "statement diagnostics bundle", @@ -582,8 +579,8 @@ func (r *Registry) InsertStatementDiagnostics( collectionTime := timeutil.Now() // Insert the collection metadata into system.statement_diagnostics. - row, err := r.ie.QueryRowEx( - ctx, "stmt-diag-insert", txn, + row, err := txn.QueryRowEx( + ctx, "stmt-diag-insert", txn.KV(), sessiondata.RootUserSessionDataOverride, "INSERT INTO system.statement_diagnostics "+ "(statement_fingerprint, statement, collected_at, bundle_chunks, error) "+ @@ -616,7 +613,7 @@ func (r *Registry) InsertStatementDiagnostics( shouldMarkCompleted = false } } - _, err := r.ie.ExecEx(ctx, "stmt-diag-mark-completed", txn, + _, err := txn.ExecEx(ctx, "stmt-diag-mark-completed", txn.KV(), sessiondata.RootUserSessionDataOverride, "UPDATE system.statement_diagnostics_requests "+ "SET completed = $1, statement_diagnostics_id = $2 WHERE id = $3", @@ -628,7 +625,7 @@ func (r *Registry) InsertStatementDiagnostics( // Insert a completed request into system.statement_diagnostics_request. // This is necessary because the UI uses this table to discover completed // diagnostics. - _, err := r.ie.ExecEx(ctx, "stmt-diag-add-completed", txn, + _, err := txn.ExecEx(ctx, "stmt-diag-add-completed", txn.KV(), sessiondata.RootUserSessionDataOverride, "INSERT INTO system.statement_diagnostics_requests"+ " (completed, statement_fingerprint, statement_diagnostics_id, requested_at)"+ @@ -662,7 +659,7 @@ func (r *Registry) pollRequests(ctx context.Context) error { if isSamplingProbabilitySupported { extraColumns = ", sampling_probability" } - it, err := r.ie.QueryIteratorEx(ctx, "stmt-diag-poll", nil, /* txn */ + it, err := r.db.Executor().QueryIteratorEx(ctx, "stmt-diag-poll", nil, /* txn */ sessiondata.RootUserSessionDataOverride, fmt.Sprintf(`SELECT id, statement_fingerprint, min_execution_latency, expires_at%s FROM system.statement_diagnostics_requests diff --git a/pkg/sql/syntheticprivilegecache/BUILD.bazel b/pkg/sql/syntheticprivilegecache/BUILD.bazel index 80b0bb5b47bf..b60dcbaab12f 100644 --- a/pkg/sql/syntheticprivilegecache/BUILD.bazel +++ b/pkg/sql/syntheticprivilegecache/BUILD.bazel @@ -19,11 +19,11 @@ go_library( "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/descpb", "//pkg/sql/catalog/descs", + "//pkg/sql/isql", "//pkg/sql/privilege", "//pkg/sql/sem/catconstants", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/sql/syntheticprivilege", "//pkg/util/log", "//pkg/util/log/logcrash", diff --git a/pkg/sql/syntheticprivilegecache/cache.go b/pkg/sql/syntheticprivilegecache/cache.go index a7fac8e0adc9..4c131f876af7 100644 --- a/pkg/sql/syntheticprivilegecache/cache.go +++ b/pkg/sql/syntheticprivilegecache/cache.go @@ -23,11 +23,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/syntheticprivilege" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/log/logcrash" @@ -43,7 +43,7 @@ type Cache struct { db *kv.DB c *cacheutil.Cache virtualSchemas catalog.VirtualSchemas - ief descs.TxnManager + ief descs.DB warmed chan struct{} stopper *stop.Stopper } @@ -55,7 +55,7 @@ func New( db *kv.DB, account mon.BoundAccount, virtualSchemas catalog.VirtualSchemas, - ief descs.TxnManager, + ief descs.DB, ) *Cache { return &Cache{ settings: settings, @@ -69,9 +69,9 @@ func New( } func (c *Cache) Get( - ctx context.Context, txn *kv.Txn, col *descs.Collection, spo syntheticprivilege.Object, + ctx context.Context, txn isql.Txn, col *descs.Collection, spo syntheticprivilege.Object, ) (*catpb.PrivilegeDescriptor, error) { - _, desc, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn).Get(), syntheticprivilege.SystemPrivilegesTableName) + _, desc, err := descs.PrefixAndTable(ctx, col.ByNameWithLeased(txn.KV()).Get(), syntheticprivilege.SystemPrivilegesTableName) if err != nil { return nil, err } @@ -123,7 +123,7 @@ func (c *Cache) getFromCache( // corresponding privilege object. This is only used if the we cannot // resolve the PrivilegeDescriptor from the cache. func (c *Cache) readFromStorage( - ctx context.Context, txn *kv.Txn, spo syntheticprivilege.Object, + ctx context.Context, txn isql.Txn, spo syntheticprivilege.Object, ) (_ *catpb.PrivilegeDescriptor, retErr error) { query := fmt.Sprintf( @@ -131,9 +131,8 @@ func (c *Cache) readFromStorage( catconstants.SystemPrivilegeTableName, ) // TODO(ajwerner): Use an internal executor bound to the transaction. - ie := c.ief.MakeInternalExecutorWithoutTxn() - it, err := ie.QueryIteratorEx( - ctx, `get-system-privileges`, txn, sessiondata.NodeUserSessionDataOverride, query, spo.GetPath(), + it, err := txn.QueryIteratorEx( + ctx, `get-system-privileges`, txn.KV(), sessiondata.NodeUserSessionDataOverride, query, spo.GetPath(), ) if err != nil { return nil, err @@ -210,9 +209,10 @@ func (c *Cache) start(ctx context.Context) error { `SELECT path, username, privileges, grant_options FROM system.%s WHERE path LIKE $1`, catconstants.SystemPrivilegeTableName, ) - if err := c.ief.DescsTxnWithExecutor(ctx, c.db, nil /* sessionData */, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, ie sqlutil.InternalExecutor) (retErr error) { - _, systemPrivDesc, err := descs.PrefixAndTable(ctx, descsCol.ByNameWithLeased(txn).Get(), syntheticprivilege.SystemPrivilegesTableName) + if err := c.ief.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, + ) (retErr error) { + _, systemPrivDesc, err := descs.PrefixAndTable(ctx, txn.Descriptors().ByNameWithLeased(txn.KV()).Get(), syntheticprivilege.SystemPrivilegesTableName) if err != nil { return err } @@ -230,8 +230,8 @@ func (c *Cache) start(ctx context.Context) error { } tableVersions = []descpb.DescriptorVersion{systemPrivDesc.GetVersion()} - it, err := ie.QueryIteratorEx( - ctx, `get-vtable-privileges`, txn, sessiondata.NodeUserSessionDataOverride, + it, err := txn.QueryIteratorEx( + ctx, `get-vtable-privileges`, txn.KV(), sessiondata.NodeUserSessionDataOverride, query, fmt.Sprintf("/%s/%%", syntheticprivilege.VirtualTablePathPrefix), ) if err != nil { diff --git a/pkg/sql/table.go b/pkg/sql/table.go index 1c9c06f29527..0c7a9747bbcd 100644 --- a/pkg/sql/table.go +++ b/pkg/sql/table.go @@ -68,7 +68,7 @@ func (p *planner) createDropDatabaseJob( Progress: jobspb.SchemaChangeProgress{}, NonCancelable: true, } - newJob, err := p.extendedEvalCtx.QueueJob(ctx, p.Txn(), jobRecord) + newJob, err := p.extendedEvalCtx.QueueJob(ctx, p.InternalSQLTxn(), jobRecord) if err != nil { return err } @@ -93,7 +93,7 @@ func (p *planner) createNonDropDatabaseChangeJob( Progress: jobspb.SchemaChangeProgress{}, NonCancelable: true, } - newJob, err := p.extendedEvalCtx.QueueJob(ctx, p.Txn(), jobRecord) + newJob, err := p.extendedEvalCtx.QueueJob(ctx, p.InternalSQLTxn(), jobRecord) if err != nil { return err } diff --git a/pkg/sql/table_test.go b/pkg/sql/table_test.go index 33c24f725238..ae9b102e41d6 100644 --- a/pkg/sql/table_test.go +++ b/pkg/sql/table_test.go @@ -19,7 +19,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catenumpb" @@ -29,6 +28,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/tests" @@ -441,8 +441,8 @@ CREATE TABLE test.tt (x test.t); desc := desctestutils.TestingGetPublicTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "tt") typLookup := func(ctx context.Context, id descpb.ID) (tree.TypeName, catalog.TypeDescriptor, error) { var typeDesc catalog.TypeDescriptor - if err := TestingDescsTxn(ctx, s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) (err error) { - typeDesc, err = col.ByID(txn).Get().Type(ctx, id) + if err := TestingDescsTxn(ctx, s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) (err error) { + typeDesc, err = col.ByID(txn.KV()).Get().Type(ctx, id) return err }); err != nil { return tree.TypeName{}, nil, err diff --git a/pkg/sql/temporary_schema.go b/pkg/sql/temporary_schema.go index eae625eaa53e..d465dc8082de 100644 --- a/pkg/sql/temporary_schema.go +++ b/pkg/sql/temporary_schema.go @@ -29,10 +29,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/nstree" "github.com/cockroachdb/cockroach/pkg/sql/clusterunique" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/intsets" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -154,27 +154,22 @@ func temporarySchemaSessionID(scName string) (bool, clusterunique.ID, error) { // cleanupSessionTempObjects removes all temporary objects (tables, sequences, // views, temporary schema) created by the session. func cleanupSessionTempObjects( - ctx context.Context, - settings *cluster.Settings, - ief sqlutil.InternalExecutorFactory, - db *kv.DB, - codec keys.SQLCodec, - sessionID clusterunique.ID, + ctx context.Context, db descs.DB, codec keys.SQLCodec, sessionID clusterunique.ID, ) error { tempSchemaName := temporarySchemaName(sessionID) - return ief.(descs.TxnManager).DescsTxnWithExecutor( - ctx, db, nil /* sessionData */, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, - ie sqlutil.InternalExecutor, + return db.DescsTxn( + ctx, func( + ctx context.Context, txn descs.Txn, ) error { // We are going to read all database descriptor IDs, then for each database // we will drop all the objects under the temporary schema. - allDbDescs, err := descsCol.GetAllDatabaseDescriptors(ctx, txn) + descsCol := txn.Descriptors() + allDbDescs, err := descsCol.GetAllDatabaseDescriptors(ctx, txn.KV()) if err != nil { return err } for _, dbDesc := range allDbDescs { - tempSchema, err := descsCol.ByName(txn).MaybeGet().Schema(ctx, dbDesc, tempSchemaName) + tempSchema, err := descsCol.ByName(txn.KV()).MaybeGet().Schema(ctx, dbDesc, tempSchemaName) if err != nil { return err } @@ -186,7 +181,6 @@ func cleanupSessionTempObjects( txn, descsCol, codec, - ie, dbDesc, tempSchema, ); err != nil { @@ -196,14 +190,14 @@ func cleanupSessionTempObjects( // itself may still exist (eg. a temporary table was created and then // dropped). So we remove the namespace table entry of the temporary // schema. - b := txn.NewBatch() + b := txn.KV().NewBatch() const kvTrace = false if err := descsCol.DeleteTempSchemaToBatch( ctx, kvTrace, dbDesc, tempSchemaName, b, ); err != nil { return err } - if err := txn.Run(ctx, b); err != nil { + if err := txn.KV().Run(ctx, b); err != nil { return err } } @@ -220,14 +214,13 @@ func cleanupSessionTempObjects( // API or avoid it entirely. func cleanupTempSchemaObjects( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, descsCol *descs.Collection, codec keys.SQLCodec, - ie sqlutil.InternalExecutor, db catalog.DatabaseDescriptor, sc catalog.SchemaDescriptor, ) error { - objects, err := descsCol.GetAllObjectsInSchema(ctx, txn, db, sc) + objects, err := descsCol.GetAllObjectsInSchema(ctx, txn.KV(), db, sc) if err != nil { return err } @@ -305,15 +298,15 @@ func cleanupTempSchemaObjects( if _, ok := tblDescsByID[d.ID]; ok { return nil } - dTableDesc, err := descsCol.ByID(txn).WithoutNonPublic().Get().Table(ctx, d.ID) + dTableDesc, err := descsCol.ByID(txn.KV()).WithoutNonPublic().Get().Table(ctx, d.ID) if err != nil { return err } - db, err := descsCol.ByID(txn).WithoutNonPublic().Get().Database(ctx, dTableDesc.GetParentID()) + db, err := descsCol.ByID(txn.KV()).WithoutNonPublic().Get().Database(ctx, dTableDesc.GetParentID()) if err != nil { return err } - sc, err := descsCol.ByID(txn).WithoutNonPublic().Get().Schema(ctx, dTableDesc.GetParentSchemaID()) + sc, err := descsCol.ByID(txn.KV()).WithoutNonPublic().Get().Schema(ctx, dTableDesc.GetParentSchemaID()) if err != nil { return err } @@ -328,10 +321,10 @@ func cleanupTempSchemaObjects( tree.Name(sc.GetName()), tree.Name(dTableDesc.GetName()), ) - _, err = ie.ExecEx( + _, err = txn.ExecEx( ctx, "delete-temp-dependent-col", - txn, + txn.KV(), override, fmt.Sprintf( "ALTER TABLE %s ALTER COLUMN %s DROP DEFAULT", @@ -371,7 +364,7 @@ func cleanupTempSchemaObjects( query.WriteString(tbName.FQString()) } query.WriteString(" CASCADE") - _, err = ie.ExecEx(ctx, "delete-temp-"+toDelete.typeName, txn, override, query.String()) + _, err = txn.ExecEx(ctx, "delete-temp-"+toDelete.typeName, txn.KV(), override, query.String()) if err != nil { return err } @@ -388,15 +381,13 @@ type isMeta1LeaseholderFunc func(context.Context, hlc.ClockTimestamp) (bool, err // down cleanly. type TemporaryObjectCleaner struct { settings *cluster.Settings - db *kv.DB + db descs.DB codec keys.SQLCodec // statusServer gives access to the SQLStatus service. - statusServer serverpb.SQLStatusServer - isMeta1LeaseholderFunc isMeta1LeaseholderFunc - testingKnobs ExecutorTestingKnobs - metrics *temporaryObjectCleanerMetrics - collectionFactory *descs.CollectionFactory - internalExecutorFactory sqlutil.InternalExecutorFactory + statusServer serverpb.SQLStatusServer + isMeta1LeaseholderFunc isMeta1LeaseholderFunc + testingKnobs ExecutorTestingKnobs + metrics *temporaryObjectCleanerMetrics // waitForInstances is a function to ensure that the status server will know // about the set of live instances at least as of the time of startup. This @@ -422,29 +413,25 @@ func (m *temporaryObjectCleanerMetrics) MetricStruct() {} // required arguments, but does not start it. func NewTemporaryObjectCleaner( settings *cluster.Settings, - db *kv.DB, + db descs.DB, codec keys.SQLCodec, registry *metric.Registry, statusServer serverpb.SQLStatusServer, isMeta1LeaseholderFunc isMeta1LeaseholderFunc, testingKnobs ExecutorTestingKnobs, - ief sqlutil.InternalExecutorFactory, - cf *descs.CollectionFactory, waitForInstances func(ctx context.Context) error, ) *TemporaryObjectCleaner { metrics := makeTemporaryObjectCleanerMetrics() registry.AddMetricStruct(metrics) return &TemporaryObjectCleaner{ - settings: settings, - db: db, - codec: codec, - statusServer: statusServer, - isMeta1LeaseholderFunc: isMeta1LeaseholderFunc, - testingKnobs: testingKnobs, - metrics: metrics, - internalExecutorFactory: ief, - collectionFactory: cf, - waitForInstances: waitForInstances, + settings: settings, + db: db, + codec: codec, + statusServer: statusServer, + isMeta1LeaseholderFunc: isMeta1LeaseholderFunc, + testingKnobs: testingKnobs, + metrics: metrics, + waitForInstances: waitForInstances, } } @@ -490,7 +477,7 @@ func (c *TemporaryObjectCleaner) doTemporaryObjectCleanup( if c.codec.ForSystemTenant() { // We only want to perform the cleanup if we are holding the meta1 lease. // This ensures only one server can perform the job at a time. - isLeaseHolder, err := c.isMeta1LeaseholderFunc(ctx, c.db.Clock().NowAsClockTimestamp()) + isLeaseHolder, err := c.isMeta1LeaseholderFunc(ctx, c.db.KV().Clock().NowAsClockTimestamp()) if err != nil { return err } @@ -516,55 +503,55 @@ func (c *TemporaryObjectCleaner) doTemporaryObjectCleanup( defer c.metrics.ActiveCleaners.Dec(1) log.Infof(ctx, "running temporary object cleanup background job") - // TODO(sumeer): this is not using NewTxnWithSteppingEnabled and so won't be - // classified as FROM_SQL for purposes of admission control. Fix. - txn := kv.NewTxn(ctx, c.db, 0) - // Only see temporary schemas after some delay as safety - // mechanism. - waitTimeForCreation := TempObjectWaitInterval.Get(&c.settings.SV) - descsCol := c.collectionFactory.NewCollection(ctx) - // Build a set of all databases with temporary objects. - var dbs nstree.Catalog - if err := retryFunc(ctx, func() (err error) { - dbs, err = descsCol.GetAllDatabases(ctx, txn) - return err - }); err != nil { - return err - } - - sessionIDs := make(map[clusterunique.ID]struct{}) - if err := dbs.ForEachDescriptor(func(dbDesc catalog.Descriptor) error { - db, err := catalog.AsDatabaseDescriptor(dbDesc) - if err != nil { - return err - } - var schemas nstree.Catalog + var sessionIDs map[clusterunique.ID]struct{} + if err := c.db.DescsTxn(ctx, func(ctx context.Context, txn descs.Txn) error { + sessionIDs = make(map[clusterunique.ID]struct{}) + // Only see temporary schemas after some delay as safety + // mechanism. + waitTimeForCreation := TempObjectWaitInterval.Get(&c.settings.SV) + descsCol := txn.Descriptors() + // Build a set of all databases with temporary objects. + var dbs nstree.Catalog if err := retryFunc(ctx, func() (err error) { - schemas, err = descsCol.GetAllSchemasInDatabase(ctx, txn, db) + dbs, err = descsCol.GetAllDatabases(ctx, txn.KV()) return err }); err != nil { return err } - return schemas.ForEachNamespaceEntry(func(e nstree.NamespaceEntry) error { - if e.GetParentSchemaID() != descpb.InvalidID { - return nil - } - // Skip over any temporary objects that are not old enough, - // we intentionally use a delay to avoid problems. - if !e.GetMVCCTimestamp().Less(txn.ReadTimestamp().Add(-waitTimeForCreation.Nanoseconds(), 0)) { - return nil + return dbs.ForEachDescriptor(func(dbDesc catalog.Descriptor) error { + db, err := catalog.AsDatabaseDescriptor(dbDesc) + if err != nil { + return err } - if isTempSchema, sessionID, err := temporarySchemaSessionID(e.GetName()); err != nil { - // This should not cause an error. - log.Warningf(ctx, "could not parse %q as temporary schema name", e.GetName()) - } else if isTempSchema { - sessionIDs[sessionID] = struct{}{} + var schemas nstree.Catalog + if err := retryFunc(ctx, func() (err error) { + schemas, err = descsCol.GetAllSchemasInDatabase(ctx, txn.KV(), db) + return err + }); err != nil { + return err } - return nil + return schemas.ForEachNamespaceEntry(func(e nstree.NamespaceEntry) error { + if e.GetParentSchemaID() != descpb.InvalidID { + return nil + } + // Skip over any temporary objects that are not old enough, + // we intentionally use a delay to avoid problems. + if !e.GetMVCCTimestamp().Less(txn.KV().ReadTimestamp().Add(-waitTimeForCreation.Nanoseconds(), 0)) { + return nil + } + if isTempSchema, sessionID, err := temporarySchemaSessionID(e.GetName()); err != nil { + // This should not cause an error. + log.Warningf(ctx, "could not parse %q as temporary schema name", e.GetName()) + } else if isTempSchema { + sessionIDs[sessionID] = struct{}{} + } + return nil + }) }) }); err != nil { return err } + log.Infof(ctx, "found %d temporary schemas", len(sessionIDs)) if len(sessionIDs) == 0 { @@ -606,8 +593,6 @@ func (c *TemporaryObjectCleaner) doTemporaryObjectCleanup( if err := retryFunc(ctx, func() error { return cleanupSessionTempObjects( ctx, - c.settings, - c.internalExecutorFactory, c.db, c.codec, sessionID, diff --git a/pkg/sql/temporary_schema_test.go b/pkg/sql/temporary_schema_test.go index 94773b0045a0..f05d918603e4 100644 --- a/pkg/sql/temporary_schema_test.go +++ b/pkg/sql/temporary_schema_test.go @@ -19,13 +19,11 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -48,7 +46,7 @@ func TestCleanupSchemaObjects(t *testing.T) { ctx := context.Background() params, _ := tests.CreateTestServerParams() - s, db, kvDB := serverutils.StartServer(t, params) + s, db, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop(ctx) conn, err := db.Conn(ctx) @@ -96,27 +94,25 @@ INSERT INTO perm_table VALUES (DEFAULT, 1); require.NoError(t, rows.Close()) } execCfg := s.ExecutorConfig().(ExecutorConfig) - ief := execCfg.InternalExecutorFactory - require.NoError(t, ief.DescsTxnWithExecutor(ctx, kvDB, nil /* sessionData */, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, - ie sqlutil.InternalExecutor, + ief := execCfg.InternalDB + require.NoError(t, ief.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { // Add a hack to not wait for one version on the descriptors. - defer descsCol.ReleaseAll(ctx) - defaultDB, err := descsCol.ByID(txn).WithoutNonPublic().Get().Database(ctx, namesToID["defaultdb"]) + defer txn.Descriptors().ReleaseAll(ctx) + defaultDB, err := txn.Descriptors().ByID(txn.KV()).WithoutNonPublic().Get().Database(ctx, namesToID["defaultdb"]) if err != nil { return err } - tempSchema, err := descsCol.ByName(txn).Get().Schema(ctx, defaultDB, tempSchemaName) + tempSchema, err := txn.Descriptors().ByName(txn.KV()).Get().Schema(ctx, defaultDB, tempSchemaName) if err != nil { return err } return cleanupTempSchemaObjects( ctx, txn, - descsCol, + txn.Descriptors(), execCfg.Codec, - ie, defaultDB, tempSchema, ) diff --git a/pkg/sql/tenant_accessors.go b/pkg/sql/tenant_accessors.go index 8f7666370f1a..de4bfdae051d 100644 --- a/pkg/sql/tenant_accessors.go +++ b/pkg/sql/tenant_accessors.go @@ -15,9 +15,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -51,11 +52,9 @@ func rejectIfSystemTenant(tenID uint64, op string) error { // GetAllNonDropTenantIDs returns all tenants in the system table, excluding // those in the DROP state. -func GetAllNonDropTenantIDs( - ctx context.Context, execCfg *ExecutorConfig, txn *kv.Txn, -) ([]roachpb.TenantID, error) { - rows, err := execCfg.InternalExecutor.QueryBuffered( - ctx, "get-tenant-ids", txn, ` +func GetAllNonDropTenantIDs(ctx context.Context, txn isql.Txn) ([]roachpb.TenantID, error) { + rows, err := txn.QueryBuffered( + ctx, "get-tenant-ids", txn.KV(), ` SELECT id FROM system.tenants WHERE crdb_internal.pb_to_json('cockroach.sql.sqlbase.TenantInfo', info, true)->>'state' != 'DROP' @@ -82,14 +81,14 @@ func GetAllNonDropTenantIDs( // GetTenantRecordByName retrieves a tenant with the provided name from // system.tenants. func GetTenantRecordByName( - ctx context.Context, execCfg *ExecutorConfig, txn *kv.Txn, tenantName roachpb.TenantName, + ctx context.Context, settings *cluster.Settings, txn isql.Txn, tenantName roachpb.TenantName, ) (*descpb.TenantInfo, error) { - if !execCfg.Settings.Version.IsActive(ctx, clusterversion.V23_1TenantNames) { + if !settings.Version.IsActive(ctx, clusterversion.V23_1TenantNames) { return nil, errors.Newf("tenant names not supported until upgrade to %s or higher is completed", clusterversion.V23_1TenantNames.String()) } - row, err := execCfg.InternalExecutor.QueryRowEx( - ctx, "get-tenant", txn, sessiondata.NodeUserSessionDataOverride, + row, err := txn.QueryRowEx( + ctx, "get-tenant", txn.KV(), sessiondata.NodeUserSessionDataOverride, `SELECT info FROM system.tenants WHERE name = $1`, tenantName, ) if err != nil { @@ -108,10 +107,10 @@ func GetTenantRecordByName( // GetTenantRecordByID retrieves a tenant in system.tenants. func GetTenantRecordByID( - ctx context.Context, execCfg *ExecutorConfig, txn *kv.Txn, tenID roachpb.TenantID, + ctx context.Context, txn isql.Txn, tenID roachpb.TenantID, ) (*descpb.TenantInfo, error) { - row, err := execCfg.InternalExecutor.QueryRowEx( - ctx, "get-tenant", txn, sessiondata.NodeUserSessionDataOverride, + row, err := txn.QueryRowEx( + ctx, "get-tenant", txn.KV(), sessiondata.NodeUserSessionDataOverride, `SELECT info FROM system.tenants WHERE id = $1`, tenID.ToUint64(), ) if err != nil { @@ -141,7 +140,7 @@ func (p *planner) LookupTenantID( return tid, err } - rec, err := GetTenantRecordByName(ctx, p.execCfg, p.Txn(), tenantName) + rec, err := GetTenantRecordByName(ctx, p.execCfg.Settings, p.InternalSQLTxn(), tenantName) if err != nil { return tid, err } diff --git a/pkg/sql/tenant_creation.go b/pkg/sql/tenant_creation.go index bd93659acd5b..b42e5ecd6549 100644 --- a/pkg/sql/tenant_creation.go +++ b/pkg/sql/tenant_creation.go @@ -20,14 +20,15 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/bootstrap" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" @@ -102,7 +103,15 @@ func (p *planner) createTenantInternal( // Create the record. This also auto-allocates an ID if the // tenantID was zero. - if _, err := CreateTenantRecord(ctx, p.ExecCfg(), p.Txn(), info, initialTenantZoneConfig); err != nil { + if _, err := CreateTenantRecord( + ctx, + p.ExecCfg().Codec, + p.ExecCfg().Settings, + p.InternalSQLTxn(), + p.ExecCfg().SpanConfigKVAccessor.WithTxn(ctx, p.Txn()), + info, + initialTenantZoneConfig, + ); err != nil { return tid, err } // Retrieve the possibly auto-generated ID. @@ -181,20 +190,22 @@ func (p *planner) createTenantInternal( // consulting the system.tenants table. func CreateTenantRecord( ctx context.Context, - execCfg *ExecutorConfig, - txn *kv.Txn, + codec keys.SQLCodec, + settings *cluster.Settings, + txn isql.Txn, + spanConfigs spanconfig.KVAccessor, info *descpb.TenantInfoWithUsage, initialTenantZoneConfig *zonepb.ZoneConfig, ) (roachpb.TenantID, error) { const op = "create" - if err := rejectIfCantCoordinateMultiTenancy(execCfg.Codec, op); err != nil { + if err := rejectIfCantCoordinateMultiTenancy(codec, op); err != nil { return roachpb.TenantID{}, err } if err := rejectIfSystemTenant(info.ID, op); err != nil { return roachpb.TenantID{}, err } if info.Name != "" { - if !execCfg.Settings.Version.IsActive(ctx, clusterversion.V23_1TenantNames) { + if !settings.Version.IsActive(ctx, clusterversion.V23_1TenantNames) { return roachpb.TenantID{}, pgerror.Newf(pgcode.FeatureNotSupported, "cannot use tenant names") } if err := info.Name.IsValid(); err != nil { @@ -204,7 +215,7 @@ func CreateTenantRecord( tenID := info.ID if tenID == 0 { - tenantID, err := getAvailableTenantID(ctx, info.Name, execCfg, txn) + tenantID, err := getAvailableTenantID(ctx, info.Name, txn) if err != nil { return roachpb.TenantID{}, err } @@ -214,7 +225,7 @@ func CreateTenantRecord( if info.Name == "" { // No name: generate one if we are at the appropriate version. - if execCfg.Settings.Version.IsActive(ctx, clusterversion.V23_1TenantNames) { + if settings.Version.IsActive(ctx, clusterversion.V23_1TenantNames) { info.Name = roachpb.TenantName(fmt.Sprintf("tenant-%d", info.ID)) } } @@ -227,12 +238,12 @@ func CreateTenantRecord( // Insert into the tenant table and detect collisions. if info.Name != "" { - if !execCfg.Settings.Version.IsActive(ctx, clusterversion.V23_1TenantNames) { + if !settings.Version.IsActive(ctx, clusterversion.V23_1TenantNames) { return roachpb.TenantID{}, pgerror.Newf(pgcode.FeatureNotSupported, "cannot use tenant names") } } - if num, err := execCfg.InternalExecutor.ExecEx( - ctx, "create-tenant", txn, sessiondata.NodeUserSessionDataOverride, + if num, err := txn.ExecEx( + ctx, "create-tenant", txn.KV(), sessiondata.NodeUserSessionDataOverride, `INSERT INTO system.tenants (id, active, info) VALUES ($1, $2, $3)`, tenID, active, infoBytes, ); err != nil { @@ -245,7 +256,7 @@ func CreateTenantRecord( } return roachpb.TenantID{}, errors.Wrap(err, "inserting new tenant") } else if num != 1 { - logcrash.ReportOrPanic(ctx, &execCfg.Settings.SV, "inserting tenant %+v: unexpected number of rows affected: %d", info, num) + logcrash.ReportOrPanic(ctx, &settings.SV, "inserting tenant %+v: unexpected number of rows affected: %d", info, num) } if u := info.Usage; u != nil { @@ -253,8 +264,8 @@ func CreateTenantRecord( if err != nil { return roachpb.TenantID{}, errors.Wrap(err, "marshaling tenant usage data") } - if num, err := execCfg.InternalExecutor.ExecEx( - ctx, "create-tenant-usage", txn, sessiondata.NodeUserSessionDataOverride, + if num, err := txn.ExecEx( + ctx, "create-tenant-usage", txn.KV(), sessiondata.NodeUserSessionDataOverride, `INSERT INTO system.tenant_usage ( tenant_id, instance_id, next_instance_id, last_update, ru_burst_limit, ru_refill_rate, ru_current, current_share_sum, @@ -272,7 +283,7 @@ func CreateTenantRecord( } return roachpb.TenantID{}, errors.Wrap(err, "inserting tenant usage data") } else if num != 1 { - logcrash.ReportOrPanic(ctx, &execCfg.Settings.SV, "inserting usage %+v for %v: unexpected number of rows affected: %d", u, tenID, num) + logcrash.ReportOrPanic(ctx, &settings.SV, "inserting usage %+v for %v: unexpected number of rows affected: %d", u, tenID, num) } } @@ -312,8 +323,7 @@ func CreateTenantRecord( return roachpb.TenantID{}, err } toUpsert := []spanconfig.Record{record} - scKVAccessor := execCfg.SpanConfigKVAccessor.WithTxn(ctx, txn) - return roachpb.MustMakeTenantID(tenID), scKVAccessor.UpdateSpanConfigRecords( + return roachpb.MustMakeTenantID(tenID), spanConfigs.UpdateSpanConfigRecords( ctx, nil, toUpsert, hlc.MinTimestamp, hlc.MaxTimestamp, ) } @@ -322,19 +332,19 @@ func CreateTenantRecord( func (p *planner) GetAvailableTenantID( ctx context.Context, tenantName roachpb.TenantName, ) (roachpb.TenantID, error) { - return getAvailableTenantID(ctx, tenantName, p.ExecCfg(), p.Txn()) + return getAvailableTenantID(ctx, tenantName, p.InternalSQLTxn()) } // getAvailableTenantID returns the first available ID that can be assigned to // the created tenant. Note, this ID could have previously belonged to another // tenant that has since been dropped and gc'ed. func getAvailableTenantID( - ctx context.Context, tenantName roachpb.TenantName, execCfg *ExecutorConfig, txn *kv.Txn, + ctx context.Context, tenantName roachpb.TenantName, txn isql.Txn, ) (roachpb.TenantID, error) { // Find the first available ID that can be assigned to the created tenant. // Note, this ID could have previously belonged to another tenant that has // since been dropped and gc'ed. - row, err := execCfg.InternalExecutor.QueryRowEx(ctx, "next-tenant-id", txn, + row, err := txn.QueryRowEx(ctx, "next-tenant-id", txn.KV(), sessiondata.NodeUserSessionDataOverride, ` SELECT id+1 AS newid FROM (VALUES (1) UNION ALL SELECT id FROM system.tenants) AS u(id) diff --git a/pkg/sql/tenant_deletion.go b/pkg/sql/tenant_deletion.go index 334518e3554d..74ec422b116b 100644 --- a/pkg/sql/tenant_deletion.go +++ b/pkg/sql/tenant_deletion.go @@ -16,10 +16,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" ) @@ -32,11 +33,20 @@ func (p *planner) DropTenantByID( return err } - info, err := GetTenantRecordByID(ctx, p.execCfg, p.txn, roachpb.MustMakeTenantID(tenID)) + info, err := GetTenantRecordByID(ctx, p.InternalSQLTxn(), roachpb.MustMakeTenantID(tenID)) if err != nil { return errors.Wrap(err, "destroying tenant") } - return dropTenantInternal(ctx, p.txn, p.execCfg, &p.extendedEvalCtx, p.User(), info, synchronousImmediateDrop) + return dropTenantInternal( + ctx, + p.ExecCfg().Settings, + p.InternalSQLTxn(), + p.ExecCfg().JobRegistry, + p.extendedEvalCtx.Jobs, + p.User(), + info, + synchronousImmediateDrop, + ) } func (p *planner) validateDropTenant(ctx context.Context) error { @@ -53,9 +63,10 @@ func (p *planner) validateDropTenant(ctx context.Context) error { func dropTenantInternal( ctx context.Context, - txn *kv.Txn, - execCfg *ExecutorConfig, - extendedEvalCtx *extendedEvalContext, + settings *cluster.Settings, + txn isql.Txn, + jobRegistry *jobs.Registry, + sessionJobs *jobsCollection, user username.SQLUsername, info *descpb.TenantInfo, synchronousImmediateDrop bool, @@ -75,7 +86,11 @@ func dropTenantInternal( // Cancel any running replication job on this tenant record. // The GCJob will wait for this job to enter a terminal state. if info.TenantReplicationJobID != 0 { - if err := execCfg.JobRegistry.CancelRequested(ctx, txn, info.TenantReplicationJobID); err != nil { + job, err := jobRegistry.LoadJobWithTxn(ctx, info.TenantReplicationJobID, txn) + if err != nil { + return errors.Wrap(err, "loading tenant replication job for cancelation") + } + if err := job.WithTxn(txn).CancelRequested(ctx); err != nil { return errors.Wrapf(err, "canceling tenant replication job %d", info.TenantReplicationJobID) } } @@ -85,16 +100,16 @@ func dropTenantInternal( info.State = descpb.TenantInfo_DROP info.DroppedName = info.Name info.Name = "" - if err := UpdateTenantRecord(ctx, execCfg, txn, info); err != nil { + if err := UpdateTenantRecord(ctx, settings, txn, info); err != nil { return errors.Wrap(err, "destroying tenant") } - jobID, err := createGCTenantJob(ctx, execCfg, txn, user, tenID, synchronousImmediateDrop) + jobID, err := createGCTenantJob(ctx, jobRegistry, txn, user, tenID, synchronousImmediateDrop) if err != nil { return errors.Wrap(err, "scheduling gc job") } if synchronousImmediateDrop { - extendedEvalCtx.Jobs.add(jobID) + sessionJobs.add(jobID) } return nil } @@ -103,8 +118,8 @@ func dropTenantInternal( // data and removes its tenant record. func createGCTenantJob( ctx context.Context, - execCfg *ExecutorConfig, - txn *kv.Txn, + jobRegistry *jobs.Registry, + txn isql.Txn, user username.SQLUsername, tenID uint64, dropImmediately bool, @@ -129,8 +144,8 @@ func createGCTenantJob( Progress: progress, NonCancelable: true, } - jobID := execCfg.JobRegistry.MakeJobID() - if _, err := execCfg.JobRegistry.CreateJobWithTxn( + jobID := jobRegistry.MakeJobID() + if _, err := jobRegistry.CreateJobWithTxn( ctx, gcJobRecord, jobID, txn, ); err != nil { return 0, err diff --git a/pkg/sql/tenant_gc.go b/pkg/sql/tenant_gc.go index c12e06de61a6..49aba9cb4180 100644 --- a/pkg/sql/tenant_gc.go +++ b/pkg/sql/tenant_gc.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -42,9 +43,9 @@ func GCTenantSync(ctx context.Context, execCfg *ExecutorConfig, info *descpb.Ten return errors.Wrap(err, "clear tenant") } - err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - if num, err := execCfg.InternalExecutor.ExecEx( - ctx, "delete-tenant", txn, sessiondata.NodeUserSessionDataOverride, + err := execCfg.InternalDB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + if num, err := txn.ExecEx( + ctx, "delete-tenant", txn.KV(), sessiondata.NodeUserSessionDataOverride, `DELETE FROM system.tenants WHERE id = $1`, info.ID, ); err != nil { return errors.Wrapf(err, "deleting tenant %d", info.ID) @@ -54,15 +55,15 @@ func GCTenantSync(ctx context.Context, execCfg *ExecutorConfig, info *descpb.Ten log.Warningf(ctx, "tenant GC: no record to delete for %d", info.ID) } - if _, err := execCfg.InternalExecutor.ExecEx( - ctx, "delete-tenant-usage", txn, sessiondata.NodeUserSessionDataOverride, + if _, err := txn.ExecEx( + ctx, "delete-tenant-usage", txn.KV(), sessiondata.NodeUserSessionDataOverride, `DELETE FROM system.tenant_usage WHERE tenant_id = $1`, info.ID, ); err != nil { return errors.Wrapf(err, "deleting tenant %d usage", info.ID) } - if _, err := execCfg.InternalExecutor.ExecEx( - ctx, "delete-tenant-settings", txn, sessiondata.NodeUserSessionDataOverride, + if _, err := txn.ExecEx( + ctx, "delete-tenant-settings", txn.KV(), sessiondata.NodeUserSessionDataOverride, `DELETE FROM system.tenant_settings WHERE tenant_id = $1`, info.ID, ); err != nil { return errors.Wrapf(err, "deleting tenant %d settings", info.ID) @@ -80,7 +81,7 @@ func GCTenantSync(ctx context.Context, execCfg *ExecutorConfig, info *descpb.Ten if err != nil { return err } - scKVAccessor := execCfg.SpanConfigKVAccessor.WithTxn(ctx, txn) + scKVAccessor := execCfg.SpanConfigKVAccessor.WithTxn(ctx, txn.KV()) records, err := scKVAccessor.GetSpanConfigRecords( ctx, []spanconfig.Target{ spanconfig.MakeTargetFromSpan(tenantSpan), @@ -138,13 +139,9 @@ func (p *planner) GCTenant(ctx context.Context, tenID uint64) error { if err := p.RequireAdminRole(ctx, "gc tenant"); err != nil { return err } - var info *descpb.TenantInfo - if txnErr := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - var err error - info, err = GetTenantRecordByID(ctx, p.execCfg, p.txn, roachpb.MustMakeTenantID(tenID)) - return err - }); txnErr != nil { - return errors.Wrapf(txnErr, "retrieving tenant %d", tenID) + info, err := GetTenantRecordByID(ctx, p.InternalSQLTxn(), roachpb.MustMakeTenantID(tenID)) + if err != nil { + return errors.Wrapf(err, "retrieving tenant %d", tenID) } // Confirm tenant is ready to be cleared. @@ -152,8 +149,8 @@ func (p *planner) GCTenant(ctx context.Context, tenID uint64) error { return errors.Errorf("tenant %d is not in state DROP", info.ID) } - _, err := createGCTenantJob( - ctx, p.ExecCfg(), p.Txn(), p.User(), tenID, false, /* synchronous */ + _, err = createGCTenantJob( + ctx, p.ExecCfg().JobRegistry, p.InternalSQLTxn(), p.User(), tenID, false, /* synchronous */ ) return err } diff --git a/pkg/sql/tenant_settings.go b/pkg/sql/tenant_settings.go index 73abcd994aaf..5ca65f7b6334 100644 --- a/pkg/sql/tenant_settings.go +++ b/pkg/sql/tenant_settings.go @@ -122,7 +122,7 @@ func (n *alterTenantSetClusterSettingNode) startExec(params runParams) error { if n.value == nil { // TODO(radu,knz): DEFAULT might be confusing, we really want to say "NO OVERRIDE" reportedValue = "DEFAULT" - if _, err := params.p.execCfg.InternalExecutor.ExecEx( + if _, err := params.p.InternalSQLTxn().ExecEx( params.ctx, "reset-tenant-setting", params.p.Txn(), sessiondata.RootUserSessionDataOverride, "DELETE FROM system.tenant_settings WHERE tenant_id = $1 AND name = $2", tenantID, n.name, @@ -139,7 +139,7 @@ func (n *alterTenantSetClusterSettingNode) startExec(params runParams) error { if err != nil { return err } - if _, err := params.p.execCfg.InternalExecutor.ExecEx( + if _, err := params.p.InternalSQLTxn().ExecEx( params.ctx, "update-tenant-setting", params.p.Txn(), sessiondata.RootUserSessionDataOverride, `UPSERT INTO system.tenant_settings (tenant_id, name, value, last_updated, value_type) VALUES ($1, $2, $3, now(), $4)`, @@ -248,7 +248,7 @@ FROM LEFT JOIN system.tenant_settings AS overrideall ON setting.variable = overrideall.name AND overrideall.tenant_id = 0` - datums, err := p.ExecCfg().InternalExecutor.QueryRowEx( + datums, err := p.InternalSQLTxn().QueryRowEx( ctx, "get-tenant-setting-value", p.txn, sessiondata.RootUserSessionDataOverride, lookupEncodedTenantSetting, diff --git a/pkg/sql/tenant_spec.go b/pkg/sql/tenant_spec.go index 8c5700fce195..c5fbdf93707a 100644 --- a/pkg/sql/tenant_spec.go +++ b/pkg/sql/tenant_spec.go @@ -133,7 +133,7 @@ func (ts *tenantSpecName) getTenantInfo( if err != nil { return nil, err } - return GetTenantRecordByName(ctx, p.ExecCfg(), p.Txn(), tenantName) + return GetTenantRecordByName(ctx, p.ExecCfg().Settings, p.InternalSQLTxn(), tenantName) } func (ts *tenantSpecId) getTenantInfo( @@ -143,7 +143,7 @@ func (ts *tenantSpecId) getTenantInfo( if err != nil { return nil, err } - return GetTenantRecordByID(ctx, p.ExecCfg(), p.Txn(), tid) + return GetTenantRecordByID(ctx, p.InternalSQLTxn(), tid) } // LookupTenantInfo implements PlanHookState for the benefits of CCL statements. diff --git a/pkg/sql/tenant_test.go b/pkg/sql/tenant_test.go index 11a2f2f4b3d5..4012b560203c 100644 --- a/pkg/sql/tenant_test.go +++ b/pkg/sql/tenant_test.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -68,15 +69,18 @@ func TestGetTenantIds(t *testing.T) { ctx := context.Background() s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) - execCfg := s.ExecutorConfig().(sql.ExecutorConfig) + idb := s.ExecutorConfig().(sql.ExecutorConfig).InternalDB tdb := sqlutils.MakeSQLRunner(sqlDB) // Create 2 tenants in addition to the system tenant. tdb.Exec(t, "CREATE TENANT t1") tdb.Exec(t, "CREATE TENANT t2") - ids, err := sql.GetAllNonDropTenantIDs(ctx, &execCfg, nil) - require.NoError(t, err) + var ids []roachpb.TenantID + require.NoError(t, idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { + ids, err = sql.GetAllNonDropTenantIDs(ctx, txn) + return err + })) expectedIds := []roachpb.TenantID{ roachpb.MustMakeTenantID(1), roachpb.MustMakeTenantID(2), @@ -87,8 +91,10 @@ func TestGetTenantIds(t *testing.T) { // Drop tenant 2. tdb.Exec(t, "DROP TENANT t1") - ids, err = sql.GetAllNonDropTenantIDs(ctx, &execCfg, nil) - require.NoError(t, err) + require.NoError(t, idb.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { + ids, err = sql.GetAllNonDropTenantIDs(ctx, txn) + return err + })) expectedIds = []roachpb.TenantID{ roachpb.MustMakeTenantID(1), roachpb.MustMakeTenantID(3), diff --git a/pkg/sql/tenant_update.go b/pkg/sql/tenant_update.go index 8a5b6b0821a8..e37bcaa5175c 100644 --- a/pkg/sql/tenant_update.go +++ b/pkg/sql/tenant_update.go @@ -15,13 +15,14 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/kv" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/log/logcrash" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/errors" @@ -31,7 +32,7 @@ import ( // // Caller is expected to check the user's permission. func UpdateTenantRecord( - ctx context.Context, execCfg *ExecutorConfig, txn *kv.Txn, info *descpb.TenantInfo, + ctx context.Context, settings *cluster.Settings, txn isql.Txn, info *descpb.TenantInfo, ) error { if err := validateTenantInfo(info); err != nil { return err @@ -44,14 +45,14 @@ func UpdateTenantRecord( return err } - if num, err := execCfg.InternalExecutor.ExecEx( - ctx, "activate-tenant", txn, sessiondata.NodeUserSessionDataOverride, + if num, err := txn.ExecEx( + ctx, "activate-tenant", txn.KV(), sessiondata.NodeUserSessionDataOverride, `UPDATE system.tenants SET active = $2, info = $3 WHERE id = $1`, tenID, active, infoBytes, ); err != nil { return errors.Wrap(err, "activating tenant") } else if num != 1 { - logcrash.ReportOrPanic(ctx, &execCfg.Settings.SV, "unexpected number of rows affected: %d", num) + logcrash.ReportOrPanic(ctx, &settings.SV, "unexpected number of rows affected: %d", num) } return nil } @@ -69,9 +70,9 @@ func validateTenantInfo(info *descpb.TenantInfo) error { // TestingUpdateTenantRecord is a public wrapper around updateTenantRecord // intended for testing purposes. func TestingUpdateTenantRecord( - ctx context.Context, execCfg *ExecutorConfig, txn *kv.Txn, info *descpb.TenantInfo, + ctx context.Context, settings *cluster.Settings, txn isql.Txn, info *descpb.TenantInfo, ) error { - return UpdateTenantRecord(ctx, execCfg, txn, info) + return UpdateTenantRecord(ctx, settings, txn, info) } // UpdateTenantResourceLimits implements the tree.TenantOperator interface. @@ -95,23 +96,22 @@ func (p *planner) UpdateTenantResourceLimits( if err := rejectIfSystemTenant(tenantID, op); err != nil { return err } - return p.WithInternalExecutor(ctx, func( - ctx context.Context, txn *kv.Txn, ie sqlutil.InternalExecutor, - ) error { - return p.ExecCfg().TenantUsageServer.ReconfigureTokenBucket( - ctx, p.Txn(), ie, roachpb.MustMakeTenantID(tenantID), availableRU, refillRate, - maxBurstRU, asOf, asOfConsumedRequestUnits, - ) - }) + + return p.ExecCfg().TenantUsageServer.ReconfigureTokenBucket( + ctx, p.InternalSQLTxn(), roachpb.MustMakeTenantID(tenantID), availableRU, refillRate, + maxBurstRU, asOf, asOfConsumedRequestUnits, + ) } // ActivateTenant marks a tenant active. // // The caller is responsible for checking that the user is authorized // to take this action. -func ActivateTenant(ctx context.Context, execCfg *ExecutorConfig, txn *kv.Txn, tenID uint64) error { +func ActivateTenant( + ctx context.Context, settings *cluster.Settings, codec keys.SQLCodec, txn isql.Txn, tenID uint64, +) error { const op = "activate" - if err := rejectIfCantCoordinateMultiTenancy(execCfg.Codec, op); err != nil { + if err := rejectIfCantCoordinateMultiTenancy(codec, op); err != nil { return err } if err := rejectIfSystemTenant(tenID, op); err != nil { @@ -119,14 +119,14 @@ func ActivateTenant(ctx context.Context, execCfg *ExecutorConfig, txn *kv.Txn, t } // Retrieve the tenant's info. - info, err := GetTenantRecordByID(ctx, execCfg, txn, roachpb.MustMakeTenantID(tenID)) + info, err := GetTenantRecordByID(ctx, txn, roachpb.MustMakeTenantID(tenID)) if err != nil { return errors.Wrap(err, "activating tenant") } // Mark the tenant as active. info.State = descpb.TenantInfo_ACTIVE - if err := UpdateTenantRecord(ctx, execCfg, txn, info); err != nil { + if err := UpdateTenantRecord(ctx, settings, txn, info); err != nil { return errors.Wrap(err, "activating tenant") } @@ -160,7 +160,7 @@ func (p *planner) renameTenant( } } - if num, err := p.ExecCfg().InternalExecutor.ExecEx( + if num, err := p.InternalSQLTxn().ExecEx( ctx, "rename-tenant", p.txn, sessiondata.NodeUserSessionDataOverride, `UPDATE system.public.tenants SET info = diff --git a/pkg/sql/truncate.go b/pkg/sql/truncate.go index 90804ecf4d16..3eccbf0ea1bb 100644 --- a/pkg/sql/truncate.go +++ b/pkg/sql/truncate.go @@ -261,7 +261,8 @@ func (p *planner) truncateTable(ctx context.Context, id descpb.ID, jobDesc strin ), ) if _, err := p.ExecCfg().JobRegistry.CreateAdoptableJobWithTxn( - ctx, record, p.ExecCfg().JobRegistry.MakeJobID(), p.txn); err != nil { + ctx, record, p.ExecCfg().JobRegistry.MakeJobID(), p.InternalSQLTxn(), + ); err != nil { return err } @@ -291,7 +292,7 @@ func (p *planner) truncateTable(ctx context.Context, id descpb.ID, jobDesc strin NewIndexes: newIndexIDs[1:], } if err := maybeUpdateZoneConfigsForPKChange( - ctx, p.txn, p.ExecCfg(), p.ExtendedEvalContext().Tracing.KVTracingEnabled(), p.Descriptors(), tableDesc, swapInfo, + ctx, p.InternalSQLTxn(), p.ExecCfg(), p.ExtendedEvalContext().Tracing.KVTracingEnabled(), p.Descriptors(), tableDesc, swapInfo, ); err != nil { return err } diff --git a/pkg/sql/ttl/ttljob/BUILD.bazel b/pkg/sql/ttl/ttljob/BUILD.bazel index 00c54488c28d..d72816dd3477 100644 --- a/pkg/sql/ttl/ttljob/BUILD.bazel +++ b/pkg/sql/ttl/ttljob/BUILD.bazel @@ -31,6 +31,7 @@ go_library( "//pkg/sql/catalog/descs", "//pkg/sql/execinfra", "//pkg/sql/execinfrapb", + "//pkg/sql/isql", "//pkg/sql/physicalplan", "//pkg/sql/rowenc", "//pkg/sql/rowexec", @@ -38,9 +39,9 @@ go_library( "//pkg/sql/sessiondata", "//pkg/sql/sessiondatapb", "//pkg/sql/sqltelemetry", - "//pkg/sql/sqlutil", "//pkg/sql/ttl/ttlbase", "//pkg/sql/types", + "//pkg/util/admission/admissionpb", "//pkg/util/ctxgroup", "//pkg/util/log", "//pkg/util/metric", diff --git a/pkg/sql/ttl/ttljob/ttljob.go b/pkg/sql/ttl/ttljob/ttljob.go index dd0f01af1bde..5f499b998853 100644 --- a/pkg/sql/ttl/ttljob/ttljob.go +++ b/pkg/sql/ttl/ttljob/ttljob.go @@ -28,6 +28,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/physicalplan" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" @@ -254,7 +255,7 @@ func (t rowLevelTTLResumer) Resume(ctx context.Context, execCtx interface{}) err jobID, nil, /* txn */ true, /* useReadLock */ - func(_ *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + func(_ isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { progress := md.Progress rowLevelTTL := progress.Details.(*jobspb.Progress_RowLevelTTL).RowLevelTTL rowLevelTTL.JobSpanCount = int64(jobSpanCount) diff --git a/pkg/sql/ttl/ttljob/ttljob_metrics.go b/pkg/sql/ttl/ttljob/ttljob_metrics.go index 547239a65e59..d9b450a8d481 100644 --- a/pkg/sql/ttl/ttljob/ttljob_metrics.go +++ b/pkg/sql/ttl/ttljob/ttljob_metrics.go @@ -214,7 +214,7 @@ func (m *rowLevelTTLMetrics) fetchStatistics( // really care if statistics gets left behind and prefer the TTL job to // have priority. qosLevel := sessiondatapb.SystemLow - datums, err := execCfg.InternalExecutor.QueryRowEx( + datums, err := execCfg.InternalDB.Executor().QueryRowEx( ctx, c.opName, nil, diff --git a/pkg/sql/ttl/ttljob/ttljob_processor.go b/pkg/sql/ttl/ttljob/ttljob_processor.go index a9739cfd0cbb..e2f101c13991 100644 --- a/pkg/sql/ttl/ttljob/ttljob_processor.go +++ b/pkg/sql/ttl/ttljob/ttljob_processor.go @@ -18,16 +18,16 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/rowenc" "github.com/cockroachdb/cockroach/pkg/sql/rowexec" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" "github.com/cockroachdb/cockroach/pkg/sql/types" + "github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/quotapool" @@ -68,8 +68,8 @@ func (t *ttlProcessor) work(ctx context.Context) error { var pkColumns []string var pkTypes []*types.T var labelMetrics bool - if err := serverCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - desc, err := descsCol.ByIDWithLeased(txn).WithoutNonPublic().Get().Table(ctx, details.TableID) + if err := serverCfg.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { + desc, err := descsCol.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Table(ctx, details.TableID) if err != nil { return err } @@ -91,7 +91,7 @@ func (t *ttlProcessor) work(ctx context.Context) error { rowLevelTTL := desc.GetRowLevelTTL() labelMetrics = rowLevelTTL.LabelMetrics - tn, err := descs.GetObjectName(ctx, txn, descsCol, desc) + tn, err := descs.GetObjectName(ctx, txn.KV(), descsCol, desc) if err != nil { return errors.Wrapf(err, "error fetching table relation name for TTL") } @@ -177,7 +177,7 @@ func (t *ttlProcessor) work(ctx context.Context) error { jobID, nil, /* txn */ true, /* useReadLock */ - func(_ *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { + func(_ isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error { progress := md.Progress rowLevelTTL := progress.Details.(*jobspb.Progress_RowLevelTTL).RowLevelTTL rowLevelTTL.JobRowCount += processorRowCount @@ -222,7 +222,7 @@ func (t *ttlProcessor) runTTLOnSpan( ttlExpr := ttlSpec.TTLExpr flowCtx := t.FlowCtx serverCfg := flowCtx.Cfg - ie := serverCfg.Executor + ie := serverCfg.DB.Executor() selectBatchSize := ttlSpec.SelectBatchSize @@ -294,10 +294,10 @@ func (t *ttlProcessor) runTTLOnSpan( until = numExpiredRows } deleteBatch := expiredRowsPKs[startRowIdx:until] - if err := serverCfg.DB.TxnWithSteppingEnabled(ctx, sessiondatapb.TTLLow, func(ctx context.Context, txn *kv.Txn) error { + do := func(ctx context.Context, txn isql.Txn) error { // If we detected a schema change here, the DELETE will not succeed // (the SELECT still will because of the AOST). Early exit here. - desc, err := flowCtx.Descriptors.ByIDWithLeased(txn).WithoutNonPublic().Get().Table(ctx, details.TableID) + desc, err := flowCtx.Descriptors.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Table(ctx, details.TableID) if err != nil { return err } @@ -314,7 +314,7 @@ func (t *ttlProcessor) runTTLOnSpan( defer tokens.Consume() start := timeutil.Now() - batchRowCount, err := deleteBuilder.run(ctx, ie, txn, deleteBatch) + batchRowCount, err := deleteBuilder.run(ctx, txn, deleteBatch) if err != nil { return err } @@ -323,7 +323,10 @@ func (t *ttlProcessor) runTTLOnSpan( metrics.RowDeletions.Inc(batchRowCount) spanRowCount += batchRowCount return nil - }); err != nil { + } + if err := serverCfg.DB.Txn( + ctx, do, isql.SteppingEnabled(), isql.WithPriority(admissionpb.UserLowPri), + ); err != nil { return spanRowCount, errors.Wrapf(err, "error during row deletion") } } diff --git a/pkg/sql/ttl/ttljob/ttljob_query_builder.go b/pkg/sql/ttl/ttljob/ttljob_query_builder.go index a475e16d8036..e09943d689e1 100644 --- a/pkg/sql/ttl/ttljob/ttljob_query_builder.go +++ b/pkg/sql/ttl/ttljob/ttljob_query_builder.go @@ -15,14 +15,13 @@ import ( "fmt" "time" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/ttl/ttlbase" "github.com/cockroachdb/errors" ) @@ -166,9 +165,7 @@ func (b *selectQueryBuilder) nextQuery() (string, []interface{}) { return b.cachedQuery, b.cachedArgs } -func (b *selectQueryBuilder) run( - ctx context.Context, ie sqlutil.InternalExecutor, -) ([]tree.Datums, error) { +func (b *selectQueryBuilder) run(ctx context.Context, ie isql.Executor) ([]tree.Datums, error) { q, args := b.nextQuery() // Use a nil txn so that the AOST clause is handled correctly. Currently, @@ -294,14 +291,14 @@ func (b *deleteQueryBuilder) buildQueryAndArgs(rows []tree.Datums) (string, []in } func (b *deleteQueryBuilder) run( - ctx context.Context, ie sqlutil.InternalExecutor, txn *kv.Txn, rows []tree.Datums, + ctx context.Context, txn isql.Txn, rows []tree.Datums, ) (int64, error) { q, deleteArgs := b.buildQueryAndArgs(rows) qosLevel := sessiondatapb.TTLLow - rowCount, err := ie.ExecEx( + rowCount, err := txn.ExecEx( ctx, b.deleteOpName, - txn, + txn.KV(), sessiondata.InternalExecutorOverride{ User: username.RootUserName(), QualityOfService: &qosLevel, diff --git a/pkg/sql/ttl/ttlschedule/BUILD.bazel b/pkg/sql/ttl/ttlschedule/BUILD.bazel index 6ec47978d9ee..db32c73a6aa1 100644 --- a/pkg/sql/ttl/ttlschedule/BUILD.bazel +++ b/pkg/sql/ttl/ttlschedule/BUILD.bazel @@ -16,11 +16,11 @@ go_library( "//pkg/sql/catalog", "//pkg/sql/catalog/catpb", "//pkg/sql/catalog/descs", + "//pkg/sql/isql", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/sem/tree", "//pkg/sql/sqlerrors", - "//pkg/sql/sqlutil", "//pkg/sql/ttl/ttlbase", "//pkg/util/metric", "//pkg/util/timeutil", diff --git a/pkg/sql/ttl/ttlschedule/ttlschedule.go b/pkg/sql/ttl/ttlschedule/ttlschedule.go index 6ef8e4853f3d..3d4517a1cdbf 100644 --- a/pkg/sql/ttl/ttlschedule/ttlschedule.go +++ b/pkg/sql/ttl/ttlschedule/ttlschedule.go @@ -23,11 +23,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlerrors" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/ttl/ttlbase" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/cockroach/pkg/util/timeutil" @@ -56,7 +56,7 @@ func (s rowLevelTTLExecutor) OnDrop( scheduleControllerEnv scheduledjobs.ScheduleControllerEnv, env scheduledjobs.JobSchedulerEnv, schedule *jobs.ScheduledJob, - txn *kv.Txn, + txn isql.Txn, descsCol *descs.Collection, ) (int, error) { @@ -65,17 +65,17 @@ func (s rowLevelTTLExecutor) OnDrop( return 0, err } - canDrop, err := canDropTTLSchedule(ctx, txn, descsCol, schedule, args) + canDrop, err := canDropTTLSchedule(ctx, txn.KV(), descsCol, schedule, args) if err != nil { return 0, err } if !canDrop { - tbl, err := descsCol.ByIDWithLeased(txn).WithoutNonPublic().Get().Table(ctx, args.TableID) + tbl, err := descsCol.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Table(ctx, args.TableID) if err != nil { return 0, err } - tn, err := descs.GetObjectName(ctx, txn, descsCol, tbl) + tn, err := descs.GetObjectName(ctx, txn.KV(), descsCol, tbl) if err != nil { return 0, err } @@ -126,10 +126,10 @@ func canDropTTLSchedule( // ExecuteJob implements the jobs.ScheduledJobController interface. func (s rowLevelTTLExecutor) ExecuteJob( ctx context.Context, + txn isql.Txn, cfg *scheduledjobs.JobExecutionConfig, env scheduledjobs.JobSchedulerEnv, sj *jobs.ScheduledJob, - txn *kv.Txn, ) error { args := &catpb.ScheduledRowLevelTTLArgs{} if err := pbtypes.UnmarshalAny(sj.ExecutionArgs().Args, args); err != nil { @@ -138,7 +138,7 @@ func (s rowLevelTTLExecutor) ExecuteJob( p, cleanup := cfg.PlanHookMaker( fmt.Sprintf("invoke-row-level-ttl-%d", args.TableID), - txn, + txn.KV(), username.NodeUserName(), ) defer cleanup() @@ -150,7 +150,6 @@ func (s rowLevelTTLExecutor) ExecuteJob( Name: jobs.CreatedByScheduledJobs, }, txn, - p.(sql.PlanHookState).ExtendedEvalContext().Descs, p.(sql.PlanHookState).ExecCfg().JobRegistry, *args, ); err != nil { @@ -164,13 +163,12 @@ func (s rowLevelTTLExecutor) ExecuteJob( // NotifyJobTermination implements the jobs.ScheduledJobController interface. func (s rowLevelTTLExecutor) NotifyJobTermination( ctx context.Context, + txn isql.Txn, jobID jobspb.JobID, jobStatus jobs.Status, details jobspb.Details, env scheduledjobs.JobSchedulerEnv, sj *jobs.ScheduledJob, - ex sqlutil.InternalExecutor, - txn *kv.Txn, ) error { if jobStatus == jobs.StatusFailed { jobs.DefaultHandleFailedRun( @@ -197,22 +195,18 @@ func (s rowLevelTTLExecutor) Metrics() metric.Struct { // GetCreateScheduleStatement implements the jobs.ScheduledJobController interface. func (s rowLevelTTLExecutor) GetCreateScheduleStatement( - ctx context.Context, - env scheduledjobs.JobSchedulerEnv, - txn *kv.Txn, - descsCol *descs.Collection, - sj *jobs.ScheduledJob, - ex sqlutil.InternalExecutor, + ctx context.Context, txn isql.Txn, env scheduledjobs.JobSchedulerEnv, sj *jobs.ScheduledJob, ) (string, error) { + descsCol := descs.FromTxn(txn) args := &catpb.ScheduledRowLevelTTLArgs{} if err := pbtypes.UnmarshalAny(sj.ExecutionArgs().Args, args); err != nil { return "", err } - tbl, err := descsCol.ByIDWithLeased(txn).WithoutNonPublic().Get().Table(ctx, args.TableID) + tbl, err := descsCol.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Table(ctx, args.TableID) if err != nil { return "", err } - tn, err := descs.GetObjectName(ctx, txn, descsCol, tbl) + tn, err := descs.GetObjectName(ctx, txn.KV(), descsCol, tbl) if err != nil { return "", err } @@ -249,16 +243,16 @@ func makeTTLJobDescription(tableDesc catalog.TableDescriptor, tn tree.ObjectName func createRowLevelTTLJob( ctx context.Context, createdByInfo *jobs.CreatedByInfo, - txn *kv.Txn, - descsCol *descs.Collection, + txn isql.Txn, jobRegistry *jobs.Registry, ttlArgs catpb.ScheduledRowLevelTTLArgs, ) (jobspb.JobID, error) { - tableDesc, err := descsCol.ByIDWithLeased(txn).WithoutNonPublic().Get().Table(ctx, ttlArgs.TableID) + descsCol := descs.FromTxn(txn) + tableDesc, err := descsCol.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Table(ctx, ttlArgs.TableID) if err != nil { return 0, err } - tn, err := descs.GetObjectName(ctx, txn, descsCol, tableDesc) + tn, err := descs.GetObjectName(ctx, txn.KV(), descsCol, tableDesc) if err != nil { return 0, err } diff --git a/pkg/sql/type_change.go b/pkg/sql/type_change.go index 584aeeeca495..29dbd80fe450 100644 --- a/pkg/sql/type_change.go +++ b/pkg/sql/type_change.go @@ -21,7 +21,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog" @@ -31,6 +30,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/typedesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -196,10 +196,10 @@ func (t *typeSchemaChanger) getTypeDescFromStore( ctx context.Context, ) (catalog.TypeDescriptor, error) { var typeDesc catalog.TypeDescriptor - if err := DescsTxn(ctx, t.execCfg, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { + if err := DescsTxn(ctx, t.execCfg, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { // Avoid GetImmutableTypeByID, downstream logic relies on // catalog.ErrDescriptorNotFound. - desc, err := col.ByID(txn).Get().Desc(ctx, t.typeID) + desc, err := col.ByID(txn.KV()).Get().Desc(ctx, t.typeID) if err != nil { return err } @@ -299,10 +299,10 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { // (it just cleans up non-public states). var multiRegionPreDropIsNecessary bool withDatabaseRegionChangeFinalizer := func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn descs.Txn, f func(finalizer *databaseRegionChangeFinalizer) error, ) error { - typeDesc, err := descsCol.MutableByID(txn).Type(ctx, t.typeID) + typeDesc, err := txn.Descriptors().MutableByID(txn.KV()).Type(ctx, t.typeID) if err != nil { return err } @@ -310,7 +310,6 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { ctx, txn, t.execCfg, - descsCol, typeDesc.GetParentID(), typeDesc.GetID(), ) @@ -321,20 +320,20 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { return f(regionChangeFinalizer) } prepareRepartitionedRegionalByRowTables := func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn descs.Txn, ) (repartitioned []*tabledesc.Mutable, err error) { - err = withDatabaseRegionChangeFinalizer(ctx, txn, descsCol, func( + err = withDatabaseRegionChangeFinalizer(ctx, txn, func( finalizer *databaseRegionChangeFinalizer, ) (err error) { - repartitioned, _, err = finalizer.repartitionRegionalByRowTables(ctx, txn) + repartitioned, _, err = finalizer.repartitionRegionalByRowTables(ctx, txn.KV()) return err }) return repartitioned, err } repartitionRegionalByRowTables := func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + ctx context.Context, txn descs.Txn, ) error { - return withDatabaseRegionChangeFinalizer(ctx, txn, descsCol, func( + return withDatabaseRegionChangeFinalizer(ctx, txn, func( finalizer *databaseRegionChangeFinalizer, ) error { return finalizer.preDrop(ctx, txn) @@ -344,8 +343,8 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { // First, we check if any of the enum values that are being removed are in // use and fail. This is done in a separate txn to the one that mutates the // descriptor, as this validation can take arbitrarily long. - validateDrops := func(ctx context.Context, txn *kv.Txn, descsCol *descs.Collection) error { - typeDesc, err := descsCol.MutableByID(txn).Type(ctx, t.typeID) + validateDrops := func(ctx context.Context, txn descs.Txn) error { + typeDesc, err := txn.Descriptors().MutableByID(txn.KV()).Type(ctx, t.typeID) if err != nil { return err } @@ -372,7 +371,7 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { // a lot of data to refresh. We instead defer the repartitioning until // after this checking confirms the safety of the change. if multiRegionPreDropIsNecessary { - repartitioned, err := prepareRepartitionedRegionalByRowTables(ctx, txn, descsCol) + repartitioned, err := prepareRepartitionedRegionalByRowTables(ctx, txn) if err != nil { return err } @@ -380,20 +379,20 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { for i, d := range repartitioned { synthetic[i] = d } - descsCol.SetSyntheticDescriptors(synthetic) + txn.Descriptors().SetSyntheticDescriptors(synthetic) } for _, member := range toDrop { - if err := t.canRemoveEnumValue(ctx, typeDesc, txn, &member, descsCol); err != nil { + if err := t.canRemoveEnumValue(ctx, typeDesc, txn, &member, txn.Descriptors()); err != nil { return err } } return nil } - if err := DescsTxn(ctx, t.execCfg, validateDrops); err != nil { + if err := t.execCfg.InternalDB.DescsTxn(ctx, validateDrops); err != nil { return err } if multiRegionPreDropIsNecessary { - if err := DescsTxn(ctx, t.execCfg, repartitionRegionalByRowTables); err != nil { + if err := t.execCfg.InternalDB.DescsTxn(ctx, repartitionRegionalByRowTables); err != nil { return err } } @@ -401,8 +400,8 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { // Now that we've ascertained that the enum values can be removed, and // have performed any necessary pre-drop work, we can actually go about // modifying the type descriptor. - run := func(ctx context.Context, txn *kv.Txn, descsCol *descs.Collection) error { - typeDesc, err := descsCol.MutableByID(txn).Type(ctx, t.typeID) + run := func(ctx context.Context, txn descs.Txn) error { + typeDesc, err := txn.Descriptors().MutableByID(txn.KV()).Type(ctx, t.typeID) if err != nil { return err } @@ -429,7 +428,6 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { ctx, txn, t.execCfg, - descsCol, typeDesc.GetParentID(), typeDesc.GetID(), ) @@ -438,8 +436,8 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { } defer regionChangeFinalizer.cleanup() - b := txn.NewBatch() - if err := descsCol.WriteDescToBatch( + b := txn.KV().NewBatch() + if err := txn.Descriptors().WriteDescToBatch( ctx, kvTrace, typeDesc, b, ); err != nil { return err @@ -448,17 +446,17 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { // The version of the array type needs to get bumped as well so that // changes to the underlying type are picked up. Simply reading the // mutable descriptor and writing it back should do the trick. - arrayTypeDesc, err := descsCol.MutableByID(txn).Type(ctx, typeDesc.ArrayTypeID) + arrayTypeDesc, err := txn.Descriptors().MutableByID(txn.KV()).Type(ctx, typeDesc.ArrayTypeID) if err != nil { return err } - if err := descsCol.WriteDescToBatch( + if err := txn.Descriptors().WriteDescToBatch( ctx, kvTrace, arrayTypeDesc, b, ); err != nil { return err } - if err := txn.Run(ctx, b); err != nil { + if err := txn.KV().Run(ctx, b); err != nil { return err } @@ -479,7 +477,7 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { return nil } - if err := DescsTxn(ctx, t.execCfg, run); err != nil { + if err := t.execCfg.InternalDB.DescsTxn(ctx, run); err != nil { return err } @@ -492,12 +490,12 @@ func (t *typeSchemaChanger) exec(ctx context.Context) error { // If the type is being dropped, remove the descriptor here only // if the declarative schema changer is not in use. if typeDesc.Dropped() && typeDesc.GetDeclarativeSchemaChangerState() == nil { - if err := DescsTxn(ctx, t.execCfg, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { - b := txn.NewBatch() + if err := DescsTxn(ctx, t.execCfg, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { + b := txn.KV().NewBatch() if err := col.DeleteDescToBatch(ctx, kvTrace, typeDesc.GetID(), b); err != nil { return err } - return txn.Run(ctx, b) + return txn.KV().Run(ctx, b) }); err != nil { return err } @@ -545,8 +543,8 @@ func applyFilterOnEnumMembers( func (t *typeSchemaChanger) cleanupEnumValues(ctx context.Context) error { var regionChangeFinalizer *databaseRegionChangeFinalizer // Cleanup: - cleanup := func(ctx context.Context, txn *kv.Txn, descsCol *descs.Collection) error { - typeDesc, err := descsCol.MutableByID(txn).Type(ctx, t.typeID) + cleanup := func(ctx context.Context, txn descs.Txn) error { + typeDesc, err := txn.Descriptors().MutableByID(txn.KV()).Type(ctx, t.typeID) if err != nil { return err } @@ -560,7 +558,6 @@ func (t *typeSchemaChanger) cleanupEnumValues(ctx context.Context) error { ctx, txn, t.execCfg, - descsCol, typeDesc.GetParentID(), typeDesc.GetID(), ) @@ -585,7 +582,7 @@ func (t *typeSchemaChanger) cleanupEnumValues(ctx context.Context) error { return t.isTransitioningInCurrentJob(member) && enumMemberIsAdding(member) }) - if err := descsCol.WriteDesc(ctx, true /* kvTrace */, typeDesc, txn); err != nil { + if err := txn.Descriptors().WriteDesc(ctx, true /* kvTrace */, typeDesc, txn.KV()); err != nil { return err } @@ -597,7 +594,7 @@ func (t *typeSchemaChanger) cleanupEnumValues(ctx context.Context) error { return nil } - return DescsTxn(ctx, t.execCfg, cleanup) + return t.execCfg.InternalDB.DescsTxn(ctx, cleanup) } // convertToSQLStringRepresentation takes an array of bytes (the physical @@ -752,12 +749,12 @@ func findUsagesOfEnumValueInViewQuery( func (t *typeSchemaChanger) canRemoveEnumValue( ctx context.Context, typeDesc *typedesc.Mutable, - txn *kv.Txn, + txn isql.Txn, member *descpb.TypeDescriptor_EnumMember, descsCol *descs.Collection, ) error { for _, ID := range typeDesc.ReferencingDescriptorIDs { - desc, err := descsCol.ByID(txn).WithoutNonPublic().Get().Table(ctx, ID) + desc, err := descsCol.ByID(txn.KV()).WithoutNonPublic().Get().Table(ctx, ID) if err != nil { return errors.Wrapf(err, "could not validate enum value removal for %q", member.LogicalRepresentation) @@ -912,7 +909,7 @@ func (t *typeSchemaChanger) canRemoveEnumValue( // be unset by default) when executing the query constructed above. This is // because the enum value may be used in a view expression, which is // name resolved in the context of the type's database. - dbDesc, err := descsCol.ByID(txn).WithoutNonPublic().Get().Database(ctx, typeDesc.ParentID) + dbDesc, err := descsCol.ByID(txn.KV()).WithoutNonPublic().Get().Database(ctx, typeDesc.ParentID) const validationErr = "could not validate removal of enum value %q" if err != nil { return errors.Wrapf(err, validationErr, member.LogicalRepresentation) @@ -921,7 +918,7 @@ func (t *typeSchemaChanger) canRemoveEnumValue( User: username.RootUserName(), Database: dbDesc.GetName(), } - rows, err := t.execCfg.InternalExecutor.QueryRowEx(ctx, "count-value-usage", txn, override, query.String()) + rows, err := txn.QueryRowEx(ctx, "count-value-usage", txn.KV(), override, query.String()) if err != nil { return errors.Wrapf(err, validationErr, member.LogicalRepresentation) } @@ -950,7 +947,7 @@ func (t *typeSchemaChanger) canRemoveEnumValue( } // Do validation for the array type now. - arrayTypeDesc, err := descsCol.ByIDWithLeased(txn).WithoutNonPublic().Get().Type(ctx, typeDesc.ArrayTypeID) + arrayTypeDesc, err := descsCol.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Type(ctx, typeDesc.ArrayTypeID) if err != nil { return err } @@ -1078,13 +1075,13 @@ func (t *typeSchemaChanger) canRemoveEnumValueFromArrayUsages( ctx context.Context, arrayTypeDesc catalog.TypeDescriptor, member *descpb.TypeDescriptor_EnumMember, - txn *kv.Txn, + txn isql.Txn, descsCol *descs.Collection, ) error { const validationErr = "could not validate removal of enum value %q" for i := 0; i < arrayTypeDesc.NumReferencingDescriptors(); i++ { id := arrayTypeDesc.GetReferencingDescriptorID(i) - desc, err := descsCol.ByIDWithLeased(txn).WithoutNonPublic().Get().Table(ctx, id) + desc, err := descsCol.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Table(ctx, id) if err != nil { return errors.Wrapf(err, validationErr, member.LogicalRepresentation) } @@ -1133,7 +1130,7 @@ func (t *typeSchemaChanger) canRemoveEnumValueFromArrayUsages( } query.WriteString(fmt.Sprintf(") WHERE unnest = %s", sqlPhysRep)) - dbDesc, err := descsCol.ByID(txn).WithoutNonPublic().Get().Database(ctx, arrayTypeDesc.GetParentID()) + dbDesc, err := descsCol.ByID(txn.KV()).WithoutNonPublic().Get().Database(ctx, arrayTypeDesc.GetParentID()) if err != nil { return errors.Wrapf(err, validationErr, member.LogicalRepresentation) } @@ -1141,10 +1138,10 @@ func (t *typeSchemaChanger) canRemoveEnumValueFromArrayUsages( User: username.RootUserName(), Database: dbDesc.GetName(), } - rows, err := t.execCfg.InternalExecutor.QueryRowEx( + rows, err := txn.QueryRowEx( ctx, "count-array-type-value-usage", - txn, + txn.KV(), override, query.String(), ) @@ -1153,7 +1150,7 @@ func (t *typeSchemaChanger) canRemoveEnumValueFromArrayUsages( } if len(rows) > 0 { // Use an FQN in the error message. - parentSchema, err := descsCol.ByIDWithLeased(txn).WithoutNonPublic().Get().Schema(ctx, desc.GetParentSchemaID()) + parentSchema, err := descsCol.ByIDWithLeased(txn.KV()).WithoutNonPublic().Get().Schema(ctx, desc.GetParentSchemaID()) if err != nil { return err } diff --git a/pkg/sql/unsplit.go b/pkg/sql/unsplit.go index ecba7803a4a5..9cbf90c3009d 100644 --- a/pkg/sql/unsplit.go +++ b/pkg/sql/unsplit.go @@ -96,7 +96,7 @@ WHERE s.descriptor_id = $1 AND s.end_key > r.start_key AND r.start_key >= s.start_key -- only consider split points inside the table keyspace. AND split_enforced_until IS NOT NULL` - ie := params.p.ExecCfg().InternalExecutorFactory.NewInternalExecutor(params.SessionData()) + ie := params.p.ExecCfg().InternalDB.NewInternalExecutor(params.SessionData()) it, err := ie.QueryIteratorEx( params.ctx, "split points query", params.p.txn, sessiondata.NoSessionDataOverride, statement, diff --git a/pkg/sql/user.go b/pkg/sql/user.go index 79082b846eea..c07972aa11e0 100644 --- a/pkg/sql/user.go +++ b/pkg/sql/user.go @@ -15,7 +15,6 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/security/password" "github.com/cockroachdb/cockroach/pkg/security/username" @@ -23,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" @@ -30,7 +30,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" "github.com/cockroachdb/cockroach/pkg/sql/sessioninit" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/syntheticprivilege" "github.com/cockroachdb/cockroach/pkg/util/contextutil" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -77,11 +76,7 @@ import ( // lookup succeeded before and there haven't been any CREATE/ALTER/DROP ROLE // commands since, then the cache is used without a KV lookup. func GetUserSessionInitInfo( - ctx context.Context, - execCfg *ExecutorConfig, - ie *InternalExecutor, - user username.SQLUsername, - databaseName string, + ctx context.Context, execCfg *ExecutorConfig, user username.SQLUsername, databaseName string, ) ( exists bool, canLoginSQL bool, @@ -132,17 +127,10 @@ func GetUserSessionInitInfo( } // Find whether the user is an admin. - return execCfg.InternalExecutorFactory.DescsTxn(ctx, execCfg.DB, func( - ctx context.Context, txn *kv.Txn, descsCol *descs.Collection, + return execCfg.InternalDB.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { - memberships, err := MemberOfWithAdminOption( - ctx, - execCfg, - ie, - descsCol, - txn, - user, - ) + memberships, err := MemberOfWithAdminOption(ctx, execCfg, txn, user) if err != nil { return err } @@ -218,8 +206,7 @@ func retrieveSessionInitInfoWithCache( aInfo, retErr = execCfg.SessionInitCache.GetAuthInfo( ctx, execCfg.Settings, - execCfg.DB, - execCfg.InternalExecutorFactory, + execCfg.InternalDB, userName, retrieveAuthInfo, makePlanner, @@ -234,8 +221,7 @@ func retrieveSessionInitInfoWithCache( settingsEntries, retErr = execCfg.SessionInitCache.GetDefaultSettings( ctx, execCfg.Settings, - execCfg.DB, - execCfg.InternalExecutorFactory, + execCfg.InternalDB, userName, databaseName, retrieveDefaultSettings, @@ -251,7 +237,7 @@ func retrieveSessionInitInfoWithCache( func retrieveAuthInfo( ctx context.Context, - f descs.TxnManager, + f descs.DB, user username.SQLUsername, makePlanner func(opName string) (interface{}, func()), settings *cluster.Settings, @@ -261,7 +247,7 @@ func retrieveAuthInfo( // we should always look up the latest data. const getHashedPassword = `SELECT "hashedPassword" FROM system.public.users ` + `WHERE username=$1` - ie := f.MakeInternalExecutorWithoutTxn() + ie := f.Executor() values, err := ie.QueryRowEx( ctx, "get-hashed-pwd", nil, /* txn */ sessiondata.RootUserSessionDataOverride, @@ -360,7 +346,7 @@ func retrieveAuthInfo( } func retrieveDefaultSettings( - ctx context.Context, f descs.TxnManager, user username.SQLUsername, databaseID descpb.ID, + ctx context.Context, f descs.DB, user username.SQLUsername, databaseID descpb.ID, ) (settingsEntries []sessioninit.SettingsCacheEntry, retErr error) { // Add an empty slice for all the keys so that something gets cached and // prevents a lookup for the same key from happening later. @@ -392,7 +378,7 @@ WHERE ` // We use a nil txn as role settings are not tied to any transaction state, // and we should always look up the latest data. - ie := f.MakeInternalExecutorWithoutTxn() + ie := f.Executor() defaultSettingsIt, err := ie.QueryIteratorEx( ctx, "get-default-settings", nil, /* txn */ sessiondata.RootUserSessionDataOverride, @@ -446,7 +432,7 @@ var userLoginTimeout = settings.RegisterDurationSetting( // GetAllRoles returns a "set" (map) of Roles -> true. func (p *planner) GetAllRoles(ctx context.Context) (map[username.SQLUsername]bool, error) { query := `SELECT username FROM system.users` - it, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.QueryIteratorEx( + it, err := p.InternalSQLTxn().QueryIteratorEx( ctx, "read-users", p.txn, sessiondata.RootUserSessionDataOverride, query) @@ -469,16 +455,14 @@ func (p *planner) GetAllRoles(ctx context.Context) (map[username.SQLUsername]boo // RoleExists returns true if the role exists. func (p *planner) RoleExists(ctx context.Context, role username.SQLUsername) (bool, error) { - return RoleExists(ctx, p.execCfg.InternalExecutor, p.Txn(), role) + return RoleExists(ctx, p.InternalSQLTxn(), role) } // RoleExists returns true if the role exists. -func RoleExists( - ctx context.Context, ie sqlutil.InternalExecutor, txn *kv.Txn, role username.SQLUsername, -) (bool, error) { +func RoleExists(ctx context.Context, txn isql.Txn, role username.SQLUsername) (bool, error) { query := `SELECT username FROM system.users WHERE username = $1` - row, err := ie.QueryRowEx( - ctx, "read-users", txn, + row, err := txn.QueryRowEx( + ctx, "read-users", txn.KV(), sessiondata.RootUserSessionDataOverride, query, role, ) @@ -732,7 +716,7 @@ func updateUserPasswordHash( runFn := getUserInfoRunFn(execCfg, userName, "set-hash-timeout") return runFn(ctx, func(ctx context.Context) error { - return DescsTxn(ctx, execCfg, func(ctx context.Context, txn *kv.Txn, d *descs.Collection) error { + return DescsTxn(ctx, execCfg, func(ctx context.Context, txn isql.Txn, d *descs.Collection) error { // NB: we cannot use ALTER USER ... WITH PASSWORD here, // because it is not guaranteed to recognize the hash in the // WITH PASSWORD clause. @@ -756,10 +740,10 @@ func updateUserPasswordHash( // we'd be writing to system.users for all of them and queue // potentially many schema updates, creating a bottleneck. // - rowsAffected, err := execCfg.InternalExecutor.Exec( + rowsAffected, err := txn.Exec( ctx, "set-password-hash", - txn, + txn.KV(), `UPDATE system.users SET "hashedPassword" = $3 WHERE username = $1 AND "hashedPassword" = $2`, userName.Normalized(), prevHash, @@ -769,12 +753,12 @@ func updateUserPasswordHash( // Error, or no update took place. return err } - usersTable, err := d.MutableByID(txn).Table(ctx, keys.UsersTableID) + usersTable, err := d.MutableByID(txn.KV()).Table(ctx, keys.UsersTableID) if err != nil { return err } // WriteDesc will internally bump the version. - return d.WriteDesc(ctx, false /* kvTrace */, usersTable, txn) + return d.WriteDesc(ctx, false /* kvTrace */, usersTable, txn.KV()) }) }) } diff --git a/pkg/sql/zone_config.go b/pkg/sql/zone_config.go index dfdb2c649b4c..e5eb62577864 100644 --- a/pkg/sql/zone_config.go +++ b/pkg/sql/zone_config.go @@ -25,6 +25,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/catalog/zone" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -506,7 +507,7 @@ func prepareRemovedPartitionZoneConfigs( func deleteRemovedPartitionZoneConfigs( ctx context.Context, - txn *kv.Txn, + txn isql.Txn, tableDesc catalog.TableDescriptor, descriptors *descs.Collection, indexID descpb.IndexID, @@ -516,11 +517,11 @@ func deleteRemovedPartitionZoneConfigs( kvTrace bool, ) error { update, err := prepareRemovedPartitionZoneConfigs( - ctx, txn, tableDesc, indexID, oldPart, newPart, execCfg, descriptors, + ctx, txn.KV(), tableDesc, indexID, oldPart, newPart, execCfg, descriptors, ) if update == nil || err != nil { return err } - _, err = writeZoneConfigUpdate(ctx, txn, kvTrace, descriptors, update) + _, err = writeZoneConfigUpdate(ctx, txn.KV(), kvTrace, descriptors, update) return err } diff --git a/pkg/sql/zone_config_test.go b/pkg/sql/zone_config_test.go index f87010eee8a1..521c68106684 100644 --- a/pkg/sql/zone_config_test.go +++ b/pkg/sql/zone_config_test.go @@ -27,6 +27,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -138,9 +139,9 @@ func TestGetZoneConfig(t *testing.T) { // Verify sql.GetZoneConfigInTxn. dummyIndex := systemschema.CommentsTable.GetPrimaryIndex() - if err := sql.TestingDescsTxn(context.Background(), s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { + if err := sql.TestingDescsTxn(context.Background(), s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { _, zoneCfg, subzone, err := sql.GetZoneConfigInTxn( - ctx, txn, col, descpb.ID(tc.objectID), dummyIndex, tc.partitionName, false, + ctx, txn.KV(), col, descpb.ID(tc.objectID), dummyIndex, tc.partitionName, false, ) if err != nil { return err @@ -374,9 +375,9 @@ func TestCascadingZoneConfig(t *testing.T) { // Verify sql.GetZoneConfigInTxn. dummyIndex := systemschema.CommentsTable.GetPrimaryIndex() - if err := sql.TestingDescsTxn(context.Background(), s, func(ctx context.Context, txn *kv.Txn, col *descs.Collection) error { + if err := sql.TestingDescsTxn(context.Background(), s, func(ctx context.Context, txn isql.Txn, col *descs.Collection) error { _, zoneCfg, subzone, err := sql.GetZoneConfigInTxn( - ctx, txn, col, descpb.ID(tc.objectID), dummyIndex, tc.partitionName, false, + ctx, txn.KV(), col, descpb.ID(tc.objectID), dummyIndex, tc.partitionName, false, ) if err != nil { return err diff --git a/pkg/testutils/serverutils/test_server_shim.go b/pkg/testutils/serverutils/test_server_shim.go index 8480ec3f1053..2734ac59047b 100644 --- a/pkg/testutils/serverutils/test_server_shim.go +++ b/pkg/testutils/serverutils/test_server_shim.go @@ -131,12 +131,12 @@ type TestServerInterface interface { LeaseManager() interface{} // InternalExecutor returns a *sql.InternalExecutor as an interface{} (which - // also implements sqlutil.InternalExecutor if the test cannot depend on sql). + // also implements insql.InternalExecutor if the test cannot depend on sql). InternalExecutor() interface{} // InternalExecutorInternalExecutorFactory returns a - // sqlutil.InternalExecutorFactory as an interface{}. - InternalExecutorFactory() interface{} + // insql.InternalDB as an interface{}. + InternalDB() interface{} // TracerI returns a *tracing.Tracer as an interface{}. TracerI() interface{} diff --git a/pkg/upgrade/BUILD.bazel b/pkg/upgrade/BUILD.bazel index ea7a83c6cdc6..8fdca3e88d35 100644 --- a/pkg/upgrade/BUILD.bazel +++ b/pkg/upgrade/BUILD.bazel @@ -25,8 +25,8 @@ go_library( "//pkg/sql/catalog/descs", "//pkg/sql/catalog/lease", "//pkg/sql/catalog/resolver", + "//pkg/sql/isql", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/upgrade/upgradebase", "//pkg/util/log", "//pkg/util/stop", diff --git a/pkg/upgrade/migrationstable/BUILD.bazel b/pkg/upgrade/migrationstable/BUILD.bazel index 65a6d289b4c4..7412b28a76c0 100644 --- a/pkg/upgrade/migrationstable/BUILD.bazel +++ b/pkg/upgrade/migrationstable/BUILD.bazel @@ -9,9 +9,9 @@ go_library( deps = [ "//pkg/kv", "//pkg/roachpb", + "//pkg/sql/isql", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/util/timeutil", "@com_github_cockroachdb_errors//:errors", ], diff --git a/pkg/upgrade/migrationstable/migrations_table.go b/pkg/upgrade/migrationstable/migrations_table.go index 094a5117d2e3..97e4b50909d4 100644 --- a/pkg/upgrade/migrationstable/migrations_table.go +++ b/pkg/upgrade/migrationstable/migrations_table.go @@ -16,17 +16,15 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/errors" ) // MarkMigrationCompleted inserts a row in system.migrations. -func MarkMigrationCompleted( - ctx context.Context, ie sqlutil.InternalExecutor, v roachpb.Version, -) error { +func MarkMigrationCompleted(ctx context.Context, ie isql.Executor, v roachpb.Version) error { return markMigrationCompletedInner(ctx, ie, v, false /* ignoreExisting */) } @@ -35,13 +33,13 @@ func MarkMigrationCompleted( // failing if the respective row already exists. If the row already exists, is // not changed. func MarkMigrationCompletedIdempotent( - ctx context.Context, ie sqlutil.InternalExecutor, v roachpb.Version, + ctx context.Context, ie isql.Executor, v roachpb.Version, ) error { return markMigrationCompletedInner(ctx, ie, v, true /* ignoreExisting */) } func markMigrationCompletedInner( - ctx context.Context, ie sqlutil.InternalExecutor, v roachpb.Version, ignoreExisting bool, + ctx context.Context, ie isql.Executor, v roachpb.Version, ignoreExisting bool, ) error { query := ` INSERT @@ -93,7 +91,7 @@ func CheckIfMigrationCompleted( ctx context.Context, v roachpb.Version, txn *kv.Txn, - ie sqlutil.InternalExecutor, + ex isql.Executor, enterpriseEnabled bool, staleOpt StaleReadOpt, ) (alreadyCompleted bool, _ error) { @@ -117,7 +115,7 @@ SELECT count(*) query = fmt.Sprintf(queryFormat, "") } - row, err := ie.QueryRow( + row, err := ex.QueryRow( ctx, "migration-job-find-already-completed", txn, diff --git a/pkg/upgrade/system_upgrade.go b/pkg/upgrade/system_upgrade.go index e7386f2ee7a1..13c5a0e57168 100644 --- a/pkg/upgrade/system_upgrade.go +++ b/pkg/upgrade/system_upgrade.go @@ -15,11 +15,10 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/serverpb" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/logtags" ) @@ -106,11 +105,10 @@ type Cluster interface { // SystemDeps are the dependencies of upgrades which perform actions at the // KV layer on behalf of the system tenant. type SystemDeps struct { - Cluster Cluster - DB *kv.DB - InternalExecutor sqlutil.InternalExecutor - DistSender *kvcoord.DistSender - Stopper *stop.Stopper + Cluster Cluster + DB descs.DB + DistSender *kvcoord.DistSender + Stopper *stop.Stopper } // SystemUpgrade is an implementation of Upgrade for system-level diff --git a/pkg/upgrade/tenant_upgrade.go b/pkg/upgrade/tenant_upgrade.go index 19d7f9d9251d..f805cd8c4629 100644 --- a/pkg/upgrade/tenant_upgrade.go +++ b/pkg/upgrade/tenant_upgrade.go @@ -24,8 +24,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" "github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/upgrade/upgradebase" "github.com/cockroachdb/logtags" ) @@ -33,14 +33,16 @@ import ( // TenantDeps are the dependencies of upgrades which perform actions at the // SQL layer. type TenantDeps struct { - DB *kv.DB - Codec keys.SQLCodec - Settings *cluster.Settings - InternalExecutorFactory descs.TxnManager - LeaseManager *lease.Manager - JobRegistry *jobs.Registry - InternalExecutor sqlutil.InternalExecutor - SessionData *sessiondata.SessionData + KVDB *kv.DB + Codec keys.SQLCodec + Settings *cluster.Settings + DB descs.DB + LeaseManager *lease.Manager + JobRegistry *jobs.Registry + SessionData *sessiondata.SessionData + + // TODO(ajwerner): Remove this in favor of the descs.DB above. + InternalExecutor isql.Executor SpanConfig struct { // deps for span config upgrades; can be removed accordingly spanconfig.KVAccessor diff --git a/pkg/upgrade/upgradejob/upgrade_job.go b/pkg/upgrade/upgradejob/upgrade_job.go index 80cc6a72e105..b1b199a6db52 100644 --- a/pkg/upgrade/upgradejob/upgrade_job.go +++ b/pkg/upgrade/upgradejob/upgrade_job.go @@ -64,16 +64,20 @@ func (r resumer) Resume(ctx context.Context, execCtxI interface{}) error { execCtx := execCtxI.(sql.JobExecContext) pl := r.j.Payload() v := pl.GetMigration().ClusterVersion.Version - ie := execCtx.ExecCfg().InternalExecutor - + db := execCtx.ExecCfg().InternalDB + ex := db.Executor() enterpriseEnabled := base.CCLDistributionAndEnterpriseEnabled( execCtx.ExecCfg().Settings, execCtx.ExecCfg().NodeInfo.LogicalClusterID()) alreadyCompleted, err := migrationstable.CheckIfMigrationCompleted( - ctx, v, nil /* txn */, ie, enterpriseEnabled, migrationstable.ConsistentRead, + ctx, v, nil /* txn */, ex, + enterpriseEnabled, migrationstable.ConsistentRead, ) if alreadyCompleted || err != nil { return errors.Wrapf(err, "checking migration completion for %v", v) } + if alreadyCompleted { + return nil + } mc := execCtx.MigrationJobDeps() m, ok := mc.GetUpgrade(v) if !ok { @@ -84,15 +88,14 @@ func (r resumer) Resume(ctx context.Context, execCtxI interface{}) error { err = m.Run(ctx, v, mc.SystemDeps()) case *upgrade.TenantUpgrade: tenantDeps := upgrade.TenantDeps{ - DB: execCtx.ExecCfg().DB, - Codec: execCtx.ExecCfg().Codec, - Settings: execCtx.ExecCfg().Settings, - InternalExecutorFactory: execCtx.ExecCfg().InternalExecutorFactory, - LeaseManager: execCtx.ExecCfg().LeaseManager, - InternalExecutor: execCtx.ExecCfg().InternalExecutor, - JobRegistry: execCtx.ExecCfg().JobRegistry, - TestingKnobs: execCtx.ExecCfg().UpgradeTestingKnobs, - SessionData: execCtx.SessionData(), + Codec: execCtx.ExecCfg().Codec, + Settings: execCtx.ExecCfg().Settings, + DB: execCtx.ExecCfg().InternalDB, + LeaseManager: execCtx.ExecCfg().LeaseManager, + InternalExecutor: ex, + JobRegistry: execCtx.ExecCfg().JobRegistry, + TestingKnobs: execCtx.ExecCfg().UpgradeTestingKnobs, + SessionData: execCtx.SessionData(), } tenantDeps.SpanConfig.KVAccessor = execCtx.ExecCfg().SpanConfigKVAccessor tenantDeps.SpanConfig.Splitter = execCtx.ExecCfg().SpanConfigSplitter @@ -127,7 +130,7 @@ func (r resumer) Resume(ctx context.Context, execCtxI interface{}) error { // Mark the upgrade as having been completed so that subsequent iterations // no-op and new jobs are not created. - if err := migrationstable.MarkMigrationCompleted(ctx, ie, v); err != nil { + if err := migrationstable.MarkMigrationCompleted(ctx, ex, v); err != nil { return errors.Wrapf(err, "marking migration complete for %v", v) } return nil diff --git a/pkg/upgrade/upgrademanager/BUILD.bazel b/pkg/upgrade/upgrademanager/BUILD.bazel index 07e199d5ba31..0ccbf6cd9d1c 100644 --- a/pkg/upgrade/upgrademanager/BUILD.bazel +++ b/pkg/upgrade/upgrademanager/BUILD.bazel @@ -19,11 +19,10 @@ go_library( "//pkg/server/settingswatcher", "//pkg/settings/cluster", "//pkg/sql", - "//pkg/sql/catalog/descs", "//pkg/sql/catalog/lease", + "//pkg/sql/isql", "//pkg/sql/protoreflect", "//pkg/sql/sem/tree", - "//pkg/sql/sqlutil", "//pkg/upgrade", "//pkg/upgrade/migrationstable", "//pkg/upgrade/upgradebase", @@ -58,7 +57,7 @@ go_test( "//pkg/server", "//pkg/settings/cluster", "//pkg/sql/execinfra", - "//pkg/sql/sqlutil", + "//pkg/sql/isql", "//pkg/testutils", "//pkg/testutils/serverutils", "//pkg/testutils/sqlutils", diff --git a/pkg/upgrade/upgrademanager/manager.go b/pkg/upgrade/upgrademanager/manager.go index d79e1b849bea..74456eecae1e 100644 --- a/pkg/upgrade/upgrademanager/manager.go +++ b/pkg/upgrade/upgrademanager/manager.go @@ -28,11 +28,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/server/settingswatcher" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/protoreflect" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/upgrade" "github.com/cockroachdb/cockroach/pkg/upgrade/migrationstable" "github.com/cockroachdb/cockroach/pkg/upgrade/upgradebase" @@ -50,8 +49,7 @@ import ( type Manager struct { deps upgrade.SystemDeps lm *lease.Manager - ie sqlutil.InternalExecutor - ief descs.TxnManager + ie isql.Executor jr *jobs.Registry codec keys.SQLCodec settings *cluster.Settings @@ -77,11 +75,12 @@ func (m *Manager) SystemDeps() upgrade.SystemDeps { // NewManager constructs a new Manager. The SystemDeps parameter may be zero in // secondary tenants. The testingKnobs parameter may be nil. +// +// TODO(ajwerner): Remove the ie argument given the isql.DB in deps. func NewManager( deps upgrade.SystemDeps, lm *lease.Manager, - ie sqlutil.InternalExecutor, - ief descs.TxnManager, + ie isql.Executor, jr *jobs.Registry, codec keys.SQLCodec, settings *cluster.Settings, @@ -96,7 +95,6 @@ func NewManager( deps: deps, lm: lm, ie: ie, - ief: ief, jr: jr, codec: codec, settings: settings, @@ -246,7 +244,7 @@ func (m *Manager) RunPermanentUpgrades(ctx context.Context, upToVersion roachpb. // // TODO(andrei): Get rid of this once compatibility with 22.2 is not necessary. startupMigrationAlreadyRan, err := checkOldStartupMigrationRan( - ctx, u.V22_2StartupMigrationName(), m.deps.DB, m.codec) + ctx, u.V22_2StartupMigrationName(), m.deps.DB.KV(), m.codec) if err != nil { return err } @@ -548,13 +546,12 @@ func (m *Manager) runMigration( // The TenantDeps used here are incomplete, but enough for the "permanent // upgrades" that run under this testing knob. if err := upg.Run(ctx, mig.Version(), upgrade.TenantDeps{ - DB: m.deps.DB, - Codec: m.codec, - Settings: m.settings, - LeaseManager: m.lm, - InternalExecutor: m.ie, - InternalExecutorFactory: m.ief, - JobRegistry: m.jr, + DB: m.deps.DB, + Codec: m.codec, + Settings: m.settings, + LeaseManager: m.lm, + InternalExecutor: m.ie, + JobRegistry: m.jr, }); err != nil { return err } @@ -577,10 +574,10 @@ func (m *Manager) runMigration( } if alreadyExisting { log.Infof(ctx, "waiting for %s", mig.Name()) - return m.jr.WaitForJobs(ctx, m.ie, []jobspb.JobID{id}) + return m.jr.WaitForJobs(ctx, []jobspb.JobID{id}) } else { log.Infof(ctx, "running %s", mig.Name()) - return m.jr.Run(ctx, m.ie, []jobspb.JobID{id}) + return m.jr.Run(ctx, []jobspb.JobID{id}) } } } @@ -589,10 +586,10 @@ func (m *Manager) getOrCreateMigrationJob( ctx context.Context, user username.SQLUsername, version roachpb.Version, name string, ) (alreadyCompleted, alreadyExisting bool, jobID jobspb.JobID, _ error) { newJobID := m.jr.MakeJobID() - if err := m.deps.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { + if err := m.deps.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) (err error) { enterpriseEnabled := base.CCLDistributionAndEnterpriseEnabled(m.settings, m.clusterID) alreadyCompleted, err = migrationstable.CheckIfMigrationCompleted( - ctx, version, txn, m.ie, enterpriseEnabled, migrationstable.ConsistentRead, + ctx, version, txn.KV(), txn, enterpriseEnabled, migrationstable.ConsistentRead, ) if err != nil && ctx.Err() == nil { log.Warningf(ctx, "failed to check if migration already completed: %v", err) @@ -615,7 +612,7 @@ func (m *Manager) getOrCreateMigrationJob( } func (m *Manager) getRunningMigrationJob( - ctx context.Context, txn *kv.Txn, version roachpb.Version, + ctx context.Context, txn isql.Txn, version roachpb.Version, ) (found bool, jobID jobspb.JobID, _ error) { // Wrap the version into a ClusterVersion so that the JSON looks like what the // Payload proto has inside. @@ -638,7 +635,7 @@ SELECT id, status if err != nil { return false, 0, errors.Wrap(err, "failed to marshal version to JSON") } - rows, err := m.ie.QueryBuffered(ctx, "migration-manager-find-jobs", txn, query, jsonMsg.String()) + rows, err := txn.QueryBuffered(ctx, "migration-manager-find-jobs", txn.KV(), query, jsonMsg.String()) if err != nil { return false, 0, err } @@ -688,13 +685,12 @@ func (m *Manager) checkPreconditions(ctx context.Context, versions []roachpb.Ver continue } if err := tm.Precondition(ctx, clusterversion.ClusterVersion{Version: v}, upgrade.TenantDeps{ - DB: m.deps.DB, - Codec: m.codec, - Settings: m.settings, - LeaseManager: m.lm, - InternalExecutor: m.ie, - InternalExecutorFactory: m.ief, - JobRegistry: m.jr, + DB: m.deps.DB, + Codec: m.codec, + Settings: m.settings, + LeaseManager: m.lm, + InternalExecutor: m.ie, + JobRegistry: m.jr, }); err != nil { return errors.Wrapf( err, diff --git a/pkg/upgrade/upgrademanager/manager_external_test.go b/pkg/upgrade/upgrademanager/manager_external_test.go index ca291175cde4..587d9050d5f1 100644 --- a/pkg/upgrade/upgrademanager/manager_external_test.go +++ b/pkg/upgrade/upgrademanager/manager_external_test.go @@ -29,7 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" @@ -145,11 +145,7 @@ RETURNING id;`).Scan(&secondID)) runErr := make(chan error) go func() { runErr <- tc.Server(0).JobRegistry().(*jobs.Registry). - Run( - ctx, - tc.Server(0).InternalExecutor().(sqlutil.InternalExecutor), - []jobspb.JobID{secondID}, - ) + Run(ctx, []jobspb.JobID{secondID}) }() fakeJobBlockChan := <-ch @@ -174,7 +170,7 @@ RETURNING id;`).Scan(&secondID)) upgrade2Err := make(chan error, 1) go func() { // Use an internal executor to get access to the trace as it happens. - _, err := tc.Server(0).InternalExecutor().(sqlutil.InternalExecutor).Exec( + _, err := tc.Server(0).InternalExecutor().(isql.Executor).Exec( recCtx, "test", nil /* txn */, `SET CLUSTER SETTING version = $1`, endCV.String()) upgrade2Err <- err }() @@ -234,7 +230,7 @@ func TestMigrateUpdatesReplicaVersion(t *testing.T) { return upgrade.NewSystemUpgrade("test", cv, func( ctx context.Context, version clusterversion.ClusterVersion, d upgrade.SystemDeps, ) error { - return d.DB.Migrate(ctx, desc.StartKey, desc.EndKey, cv) + return d.DB.KV().Migrate(ctx, desc.StartKey, desc.EndKey, cv) }), true }, }, diff --git a/pkg/upgrade/upgrades/BUILD.bazel b/pkg/upgrade/upgrades/BUILD.bazel index fbc62e6ed8c0..0a2cbb247ade 100644 --- a/pkg/upgrade/upgrades/BUILD.bazel +++ b/pkg/upgrade/upgrades/BUILD.bazel @@ -54,13 +54,13 @@ go_library( "//pkg/sql/catalog/seqexpr", "//pkg/sql/catalog/systemschema", "//pkg/sql/catalog/tabledesc", + "//pkg/sql/isql", "//pkg/sql/parser", "//pkg/sql/pgwire/pgcode", "//pkg/sql/pgwire/pgerror", "//pkg/sql/sem/catconstants", "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", - "//pkg/sql/sqlutil", "//pkg/sql/types", "//pkg/storage", "//pkg/upgrade", @@ -133,6 +133,7 @@ go_test( "//pkg/sql/catalog/schematelemetry/schematelemetrycontroller", "//pkg/sql/catalog/systemschema", "//pkg/sql/catalog/tabledesc", + "//pkg/sql/isql", "//pkg/sql/privilege", "//pkg/sql/roleoption", "//pkg/sql/sem/builtins/builtinconstants", diff --git a/pkg/upgrade/upgrades/alter_jobs_add_job_type.go b/pkg/upgrade/upgrades/alter_jobs_add_job_type.go index e36026b0e84a..bc0f81badf89 100644 --- a/pkg/upgrade/upgrades/alter_jobs_add_job_type.go +++ b/pkg/upgrade/upgrades/alter_jobs_add_job_type.go @@ -67,7 +67,7 @@ func alterSystemJobsAddJobType( func backfillJobTypeColumn( ctx context.Context, cs clusterversion.ClusterVersion, d upgrade.TenantDeps, ) error { - ie := d.InternalExecutorFactory.MakeInternalExecutorWithoutTxn() + ie := d.InternalExecutor _, err := ie.Exec(ctx, "backfill-jobs-type-column", nil /* txn */, backfillTypeColumnStmt, username.RootUser) if err != nil { return err diff --git a/pkg/upgrade/upgrades/desc_id_sequence_for_system_tenant.go b/pkg/upgrade/upgrades/desc_id_sequence_for_system_tenant.go index bf5f56e86c38..58248c5ede82 100644 --- a/pkg/upgrade/upgrades/desc_id_sequence_for_system_tenant.go +++ b/pkg/upgrade/upgrades/desc_id_sequence_for_system_tenant.go @@ -26,7 +26,7 @@ func descIDSequenceForSystemTenant( if !d.Codec.ForSystemTenant() { return nil } - return d.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + return d.DB.KV().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { oldEntry, err := txn.GetForUpdate(ctx, keys.LegacyDescIDGenerator) if err != nil { return err diff --git a/pkg/upgrade/upgrades/ensure_sql_schema_telemetry_schedule.go b/pkg/upgrade/upgrades/ensure_sql_schema_telemetry_schedule.go index 429b0c129e12..e0ce9fa41518 100644 --- a/pkg/upgrade/upgrades/ensure_sql_schema_telemetry_schedule.go +++ b/pkg/upgrade/upgrades/ensure_sql_schema_telemetry_schedule.go @@ -14,8 +14,8 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schematelemetry/schematelemetrycontroller" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/upgrade" "github.com/cockroachdb/errors" ) @@ -23,9 +23,9 @@ import ( func ensureSQLSchemaTelemetrySchedule( ctx context.Context, cs clusterversion.ClusterVersion, d upgrade.TenantDeps, ) error { - return d.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + return d.DB.Txn(ctx, func(ctx context.Context, txn isql.Txn) error { _, err := schematelemetrycontroller.CreateSchemaTelemetrySchedule( - ctx, d.InternalExecutor, txn, d.Settings, + ctx, txn, d.Settings, ) // If the schedule already exists, we have nothing more to do. This // logic makes the upgrade idempotent. diff --git a/pkg/upgrade/upgrades/fix_userfile_descriptor_corruption.go b/pkg/upgrade/upgrades/fix_userfile_descriptor_corruption.go index 827bda23f3c5..e50f7802306e 100644 --- a/pkg/upgrade/upgrades/fix_userfile_descriptor_corruption.go +++ b/pkg/upgrade/upgrades/fix_userfile_descriptor_corruption.go @@ -20,7 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/upgrade" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -41,41 +40,42 @@ import ( func fixInvalidObjectsThatLookLikeBadUserfileConstraint( ctx context.Context, _ clusterversion.ClusterVersion, d upgrade.TenantDeps, ) error { - return d.InternalExecutorFactory.DescsTxnWithExecutor(ctx, d.DB, nil, - func(ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor) error { - query := `SELECT * FROM crdb_internal.invalid_objects` - rows, err := ie.QueryIterator(ctx, "find-invalid-descriptors", txn, query) - if err != nil { - return err - } - defer func() { _ = rows.Close() }() + return d.DB.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, + ) error { + query := `SELECT * FROM crdb_internal.invalid_objects` + rows, err := txn.QueryIterator(ctx, "find-invalid-descriptors", txn.KV(), query) + if err != nil { + return err + } + defer func() { _ = rows.Close() }() - var hasNext bool - for hasNext, err = rows.Next(ctx); hasNext && err == nil; hasNext, err = rows.Next(ctx) { - // crdb_internal.invalid_objects has five columns: id, database name, schema name, table name, error. - row := rows.Cur() - tableID := descpb.ID(tree.MustBeDInt(row[0])) - errString := string(tree.MustBeDString(row[4])) - if veryLikelyKnownUserfileBreakage(ctx, txn, descriptors, tableID, errString) { - log.Infof(ctx, "attempting to fix invalid table descriptor %d assuming it is a userfile-related table", tableID) - mutTableDesc, err := descriptors.MutableByID(txn).Table(ctx, tableID) - if err != nil { - return err - } - mutTableDesc.Mutations = nil - mutTableDesc.MutationJobs = nil - if err := descriptors.WriteDesc(ctx, false, mutTableDesc, txn); err != nil { - return err - } + var hasNext bool + for hasNext, err = rows.Next(ctx); hasNext && err == nil; hasNext, err = rows.Next(ctx) { + // crdb_internal.invalid_objects has five columns: id, database name, schema name, table name, error. + row := rows.Cur() + tableID := descpb.ID(tree.MustBeDInt(row[0])) + errString := string(tree.MustBeDString(row[4])) + if veryLikelyKnownUserfileBreakage(ctx, txn.KV(), txn.Descriptors(), tableID, errString) { + log.Infof(ctx, "attempting to fix invalid table descriptor %d assuming it is a userfile-related table", tableID) + mutTableDesc, err := txn.Descriptors().MutableByID(txn.KV()).Table(ctx, tableID) + if err != nil { + return err + } + mutTableDesc.Mutations = nil + mutTableDesc.MutationJobs = nil + if err := txn.Descriptors().WriteDesc(ctx, false, mutTableDesc, txn.KV()); err != nil { + return err } } - if err != nil { - // TODO(ssd): We always return a nil error here because I'm not sure that this - // would be worth failing an upgrade for. - log.Warningf(ctx, "could not fix broken userfile: %v", err) - } - return nil - }) + } + if err != nil { + // TODO(ssd): We always return a nil error here because I'm not sure that this + // would be worth failing an upgrade for. + log.Warningf(ctx, "could not fix broken userfile: %v", err) + } + return nil + }) } // veryLikelyKnownUserfileBreakage returns true if the given descriptor id and diff --git a/pkg/upgrade/upgrades/helpers_test.go b/pkg/upgrade/upgrades/helpers_test.go index 4e9aecee3164..1a3e86d67346 100644 --- a/pkg/upgrade/upgrades/helpers_test.go +++ b/pkg/upgrade/upgrades/helpers_test.go @@ -16,7 +16,6 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" @@ -74,11 +73,11 @@ func InjectLegacyTable( table catalog.TableDescriptor, getDeprecatedDescriptor func() *descpb.TableDescriptor, ) { - err := s.InternalExecutorFactory().(descs.TxnManager).DescsTxn(ctx, s.DB(), func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + err := s.InternalDB().(descs.DB).DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { id := table.GetID() - tab, err := descriptors.MutableByID(txn).Table(ctx, id) + tab, err := txn.Descriptors().MutableByID(txn.KV()).Table(ctx, id) if err != nil { return err } @@ -88,7 +87,7 @@ func InjectLegacyTable( } tab.TableDescriptor = builder.BuildCreatedMutableTable().TableDescriptor tab.Version = tab.ClusterVersion().Version + 1 - return descriptors.WriteDesc(ctx, false /* kvTrace */, tab, txn) + return txn.Descriptors().WriteDesc(ctx, false /* kvTrace */, tab, txn.KV()) }) require.NoError(t, err) } @@ -139,10 +138,10 @@ func GetTable( ) catalog.TableDescriptor { var table catalog.TableDescriptor // Retrieve the table. - err := s.InternalExecutorFactory().(descs.TxnManager).DescsTxn(ctx, s.DB(), func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + err := s.InternalDB().(descs.DB).DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) (err error) { - table, err = descriptors.ByID(txn).WithoutNonPublic().Get().Table(ctx, tableID) + table, err = txn.Descriptors().ByID(txn.KV()).WithoutNonPublic().Get().Table(ctx, tableID) return err }) require.NoError(t, err) diff --git a/pkg/upgrade/upgrades/permanent_upgrades.go b/pkg/upgrade/upgrades/permanent_upgrades.go index 24824527cd11..e4cbee053df1 100644 --- a/pkg/upgrade/upgrades/permanent_upgrades.go +++ b/pkg/upgrade/upgrades/permanent_upgrades.go @@ -80,7 +80,7 @@ func populateVersionSetting( ctx context.Context, _ clusterversion.ClusterVersion, deps upgrade.SystemDeps, ) error { var v roachpb.Version - if err := deps.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + if err := deps.DB.KV().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return txn.GetProto(ctx, keys.BootstrapVersionKey, &v) }); err != nil { return err @@ -101,7 +101,8 @@ func populateVersionSetting( // (overwriting also seems reasonable, but what for). // We don't allow users to perform version changes until we have run // the insert below. - _, err = deps.InternalExecutor.Exec( + ie := deps.DB.Executor() + _, err = ie.Exec( ctx, "insert-setting", nil, /* txn */ fmt.Sprintf(`INSERT INTO system.settings (name, value, "lastUpdated", "valueType") VALUES ('version', x'%x', now(), 'm') ON CONFLICT(name) DO NOTHING`, b), ) @@ -112,7 +113,7 @@ func populateVersionSetting( // Tenant ID 0 indicates that we're overriding the value for all // tenants. tenantID := tree.NewDInt(0) - _, err = deps.InternalExecutor.Exec( + _, err = ie.Exec( ctx, "insert-setting", nil, /* txn */ fmt.Sprintf(`INSERT INTO system.tenant_settings (tenant_id, name, value, "last_updated", "value_type") VALUES (%d, 'version', x'%x', now(), 'm') ON CONFLICT(tenant_id, name) DO NOTHING`, tenantID, b), diff --git a/pkg/upgrade/upgrades/precondition_before_starting_an_upgrade.go b/pkg/upgrade/upgrades/precondition_before_starting_an_upgrade.go index 0cdbcd9a6241..e30b88053b54 100644 --- a/pkg/upgrade/upgrades/precondition_before_starting_an_upgrade.go +++ b/pkg/upgrade/upgrades/precondition_before_starting_an_upgrade.go @@ -16,13 +16,11 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/upgrade" "github.com/cockroachdb/cockroach/pkg/util/log" ) @@ -49,36 +47,35 @@ func preconditionNoInvalidDescriptorsBeforeUpgrading( ctx context.Context, cs clusterversion.ClusterVersion, d upgrade.TenantDeps, ) error { var errMsg strings.Builder - err := d.InternalExecutorFactory.DescsTxnWithExecutor(ctx, d.DB, d.SessionData, - func(ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor) error { - query := `SELECT * FROM crdb_internal.invalid_objects` - rows, err := ie.QueryIterator( - ctx, "check-if-there-are-any-invalid-descriptors", txn /* txn */, query) - if err != nil { - return err - } - var hasNext bool - for hasNext, err = rows.Next(ctx); hasNext && err == nil; hasNext, err = rows.Next(ctx) { - // There exists invalid objects; Accumulate their information into `errMsg`. - // `crdb_internal.invalid_objects` has five columns: id, database name, schema name, table name, error. - row := rows.Cur() - descName := tree.MakeTableNameWithSchema( - tree.Name(tree.MustBeDString(row[1])), - tree.Name(tree.MustBeDString(row[2])), - tree.Name(tree.MustBeDString(row[3])), - ) - tableID := descpb.ID(tree.MustBeDInt(row[0])) - errString := string(tree.MustBeDString(row[4])) - // TODO(ssd): Remove in 23.1 once we are sure that the migration which fixes this corruption has run. - if veryLikelyKnownUserfileBreakage(ctx, txn, descriptors, tableID, errString) { - log.Infof(ctx, "ignoring invalid descriptor %v (%v) with error %q because it looks like known userfile-related corruption", - descName.String(), tableID, errString) - } else { - errMsg.WriteString(fmt.Sprintf("invalid descriptor: %v (%v) because %v\n", descName.String(), row[0], row[4])) - } - } + err := d.DB.DescsTxn(ctx, func(ctx context.Context, txn descs.Txn) error { + query := `SELECT * FROM crdb_internal.invalid_objects` + rows, err := txn.QueryIterator( + ctx, "check-if-there-are-any-invalid-descriptors", txn.KV(), query) + if err != nil { return err - }) + } + var hasNext bool + for hasNext, err = rows.Next(ctx); hasNext && err == nil; hasNext, err = rows.Next(ctx) { + // There exists invalid objects; Accumulate their information into `errMsg`. + // `crdb_internal.invalid_objects` has five columns: id, database name, schema name, table name, error. + row := rows.Cur() + descName := tree.MakeTableNameWithSchema( + tree.Name(tree.MustBeDString(row[1])), + tree.Name(tree.MustBeDString(row[2])), + tree.Name(tree.MustBeDString(row[3])), + ) + tableID := descpb.ID(tree.MustBeDInt(row[0])) + errString := string(tree.MustBeDString(row[4])) + // TODO(ssd): Remove in 23.1 once we are sure that the migration which fixes this corruption has run. + if veryLikelyKnownUserfileBreakage(ctx, txn.KV(), txn.Descriptors(), tableID, errString) { + log.Infof(ctx, "ignoring invalid descriptor %v (%v) with error %q because it looks like known userfile-related corruption", + descName.String(), tableID, errString) + } else { + errMsg.WriteString(fmt.Sprintf("invalid descriptor: %v (%v) because %v\n", descName.String(), row[0], row[4])) + } + } + return err + }) if err != nil { return err } diff --git a/pkg/upgrade/upgrades/role_id_sequence_migration.go b/pkg/upgrade/upgrades/role_id_sequence_migration.go index 554f36f7f9f2..e806e1cc95aa 100644 --- a/pkg/upgrade/upgrades/role_id_sequence_migration.go +++ b/pkg/upgrade/upgrades/role_id_sequence_migration.go @@ -23,6 +23,6 @@ func roleIDSequenceMigration( ctx context.Context, _ clusterversion.ClusterVersion, d upgrade.TenantDeps, ) error { return createSystemTable( - ctx, d.DB, d.Settings, d.Codec, systemschema.RoleIDSequence, + ctx, d.DB.KV(), d.Settings, d.Codec, systemschema.RoleIDSequence, ) } diff --git a/pkg/upgrade/upgrades/role_members_ids_migration.go b/pkg/upgrade/upgrades/role_members_ids_migration.go index 1e03af74ab81..238531df33cf 100644 --- a/pkg/upgrade/upgrades/role_members_ids_migration.go +++ b/pkg/upgrade/upgrades/role_members_ids_migration.go @@ -100,7 +100,7 @@ ALTER COLUMN member_id SET NOT NULL func backfillSystemRoleMembersIDColumns( ctx context.Context, cs clusterversion.ClusterVersion, d upgrade.TenantDeps, ) error { - ie := d.InternalExecutorFactory.MakeInternalExecutorWithoutTxn() + ie := d.DB.Executor() for _, backfillStmt := range []string{backfillRoleIDColumnRoleMemberStmt, backfillMemberIDColumnRoleMembersStmt} { for { rowsAffected, err := ie.ExecEx(ctx, "backfill-id-columns-system-role-members", nil, /* txn */ diff --git a/pkg/upgrade/upgrades/schema_changes.go b/pkg/upgrade/upgrades/schema_changes.go index 7d5dcd70913f..25b69170a32c 100644 --- a/pkg/upgrade/upgrades/schema_changes.go +++ b/pkg/upgrade/upgrades/schema_changes.go @@ -17,7 +17,6 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" @@ -141,10 +140,10 @@ func readTableDescriptor( ) (catalog.TableDescriptor, error) { var t catalog.TableDescriptor - if err := d.InternalExecutorFactory.DescsTxn(ctx, d.DB, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + if err := d.DB.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) (err error) { - t, err = descriptors.ByID(txn).WithoutNonPublic().Get().Table(ctx, tableID) + t, err = txn.Descriptors().ByID(txn.KV()).WithoutNonPublic().Get().Table(ctx, tableID) return err }); err != nil { return nil, err diff --git a/pkg/upgrade/upgrades/schema_changes_external_test.go b/pkg/upgrade/upgrades/schema_changes_external_test.go index 8fe0e5db9634..898a600da07a 100644 --- a/pkg/upgrade/upgrades/schema_changes_external_test.go +++ b/pkg/upgrade/upgrades/schema_changes_external_test.go @@ -21,7 +21,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -29,6 +28,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -381,11 +381,11 @@ func testMigrationWithFailures( tdb.Exec(t, "CREATE DATABASE test") tdb.Exec(t, createTableAfter) var desc catalog.TableDescriptor - require.NoError(t, s.InternalExecutorFactory().(descs.TxnManager).DescsTxn(ctx, s.DB(), func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + require.NoError(t, s.InternalDB().(descs.DB).DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) (err error) { tn := tree.MakeTableNameWithSchema("test", "public", "test_table") - _, desc, err = descs.PrefixAndTable(ctx, descriptors.ByName(txn).Get(), &tn) + _, desc, err = descs.PrefixAndTable(ctx, txn.Descriptors().ByName(txn.KV()).Get(), &tn) return err })) tdb.Exec(t, "DROP TABLE test.test_table") @@ -523,11 +523,12 @@ func testMigrationWithFailures( func cancelJob( t *testing.T, ctx context.Context, s serverutils.TestServerInterface, jobID jobspb.JobID, ) { - err := s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + err := s.InternalDB().(isql.DB).Txn(ctx, func(ctx context.Context, txn isql.Txn) error { // Using this way of canceling because the migration job us non-cancelable. // Canceling in this way skips the check. return s.JobRegistry().(*jobs.Registry).UpdateJobWithTxn( - ctx, jobID, txn, false /* useReadLock */, func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, + ctx, jobID, txn, false /* useReadLock */, func( + txn isql.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater, ) error { ju.UpdateStatus(jobs.StatusCancelRequested) return nil diff --git a/pkg/upgrade/upgrades/system_external_connections.go b/pkg/upgrade/upgrades/system_external_connections.go index d57882c56e26..ad051739c4b8 100644 --- a/pkg/upgrade/upgrades/system_external_connections.go +++ b/pkg/upgrade/upgrades/system_external_connections.go @@ -24,6 +24,6 @@ func systemExternalConnectionsTableMigration( ctx context.Context, _ clusterversion.ClusterVersion, d upgrade.TenantDeps, ) error { return createSystemTable( - ctx, d.DB, d.Settings, d.Codec, systemschema.SystemExternalConnectionsTable, + ctx, d.DB.KV(), d.Settings, d.Codec, systemschema.SystemExternalConnectionsTable, ) } diff --git a/pkg/upgrade/upgrades/system_job_info.go b/pkg/upgrade/upgrades/system_job_info.go index f84ad4d073c3..42886d0ec4d8 100644 --- a/pkg/upgrade/upgrades/system_job_info.go +++ b/pkg/upgrade/upgrades/system_job_info.go @@ -23,6 +23,6 @@ func systemJobInfoTableMigration( ctx context.Context, _ clusterversion.ClusterVersion, d upgrade.TenantDeps, ) error { return createSystemTable( - ctx, d.DB, d.Settings, d.Codec, systemschema.SystemJobInfoTable, + ctx, d.DB.KV(), d.Settings, d.Codec, systemschema.SystemJobInfoTable, ) } diff --git a/pkg/upgrade/upgrades/system_users_role_id_migration.go b/pkg/upgrade/upgrades/system_users_role_id_migration.go index 35b66f8e25d4..c51326bed497 100644 --- a/pkg/upgrade/upgrades/system_users_role_id_migration.go +++ b/pkg/upgrade/upgrades/system_users_role_id_migration.go @@ -110,7 +110,7 @@ func backfillSystemUsersIDColumn( if numUsersToUpdate-i < batchSize { numIDs = numUsersToUpdate - i } - startID, err := descidgen.IncrementUniqueRoleID(ctx, d.DB, d.Codec, int64(numIDs)) + startID, err := descidgen.IncrementUniqueRoleID(ctx, d.DB.KV(), d.Codec, int64(numIDs)) if err != nil { return err } diff --git a/pkg/upgrade/upgrades/update_invalid_column_ids_in_sequence_back_references.go b/pkg/upgrade/upgrades/update_invalid_column_ids_in_sequence_back_references.go index dcbdb8098988..d17cf8d25d54 100644 --- a/pkg/upgrade/upgrades/update_invalid_column_ids_in_sequence_back_references.go +++ b/pkg/upgrade/upgrades/update_invalid_column_ids_in_sequence_back_references.go @@ -19,7 +19,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/upgrade" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" @@ -35,12 +34,12 @@ func updateInvalidColumnIDsInSequenceBackReferences( for { var currSeqID descpb.ID var done bool - if err := d.InternalExecutorFactory.DescsTxnWithExecutor(ctx, d.DB, d.SessionData, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor, + if err := d.DB.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) (err error) { currSeqID = lastSeqID for { - done, currSeqID, err = findNextTableToUpgrade(ctx, ie, txn, currSeqID, + done, currSeqID, err = findNextTableToUpgrade(ctx, txn, txn.KV(), currSeqID, func(table *descpb.TableDescriptor) bool { return table.IsSequence() }) @@ -50,7 +49,9 @@ func updateInvalidColumnIDsInSequenceBackReferences( // Sequence `nextIdToUpgrade` might contain back reference with invalid column IDs. If so, we need to // update them with valid column IDs. - hasUpgrade, err := maybeUpdateInvalidColumnIdsInSequenceBackReferences(ctx, txn, currSeqID, descriptors) + hasUpgrade, err := maybeUpdateInvalidColumnIdsInSequenceBackReferences( + ctx, txn.KV(), currSeqID, txn.Descriptors(), + ) if err != nil { return err } diff --git a/pkg/upgrade/upgrades/upgrade_sequence_to_be_referenced_by_ID.go b/pkg/upgrade/upgrades/upgrade_sequence_to_be_referenced_by_ID.go index 0e42c5a79295..2976f7309cf1 100644 --- a/pkg/upgrade/upgrades/upgrade_sequence_to_be_referenced_by_ID.go +++ b/pkg/upgrade/upgrades/upgrade_sequence_to_be_referenced_by_ID.go @@ -23,9 +23,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver" "github.com/cockroachdb/cockroach/pkg/sql/catalog/seqexpr" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/upgrade" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/errors" @@ -34,29 +34,26 @@ import ( func upgradeSequenceToBeReferencedByID( ctx context.Context, _ clusterversion.ClusterVersion, d upgrade.TenantDeps, ) error { - return d.InternalExecutorFactory.DescsTxnWithExecutor(ctx, d.DB, d.SessionData, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, ie sqlutil.InternalExecutor, - ) (err error) { - var lastUpgradedID descpb.ID - // Upgrade each table/view, one at a time, until we exhaust all of them. - for { - done, idToUpgrade, err := findNextTableToUpgrade(ctx, d.InternalExecutor, txn, lastUpgradedID, - func(table *descpb.TableDescriptor) bool { - return table.IsTable() || table.IsView() - }) - if err != nil || done { - return err - } + var lastUpgradedID descpb.ID + // Upgrade each table/view, one at a time, until we exhaust all of them. + for { + done, idToUpgrade, err := findNextTableToUpgrade( + ctx, d.DB.Executor(), nil, /* kvTxn */ + lastUpgradedID, func(table *descpb.TableDescriptor) bool { + return table.IsTable() || table.IsView() + }) + if err != nil || done { + return err + } - // Table/View `idToUpgrade` might contain reference to sequences by name. If so, we need to upgrade - // those references to be by ID. - err = maybeUpgradeSeqReferencesInTableOrView(ctx, idToUpgrade, d) - if err != nil { - return err - } - lastUpgradedID = idToUpgrade + // Table/View `idToUpgrade` might contain reference to sequences by name. If so, we need to upgrade + // those references to be by ID. + err = maybeUpgradeSeqReferencesInTableOrView(ctx, idToUpgrade, d) + if err != nil { + return err } - }) + lastUpgradedID = idToUpgrade + } } // Find the next table descriptor ID that is > `lastUpgradedID` @@ -64,13 +61,13 @@ func upgradeSequenceToBeReferencedByID( // If no such ID exists, `done` will be true. func findNextTableToUpgrade( ctx context.Context, - ie sqlutil.InternalExecutor, - txn *kv.Txn, + ex isql.Executor, + kvTxn *kv.Txn, lastUpgradedID descpb.ID, tableSelector func(table *descpb.TableDescriptor) bool, ) (done bool, idToUpgrade descpb.ID, err error) { - var rows sqlutil.InternalRows - rows, err = ie.QueryIterator(ctx, "upgrade-seq-find-desc", txn, + var rows isql.Rows + rows, err = ex.QueryIterator(ctx, "upgrade-seq-find-desc", kvTxn, `SELECT id, descriptor, crdb_internal_mvcc_timestamp FROM system.descriptor WHERE id > $1 ORDER BY ID ASC`, lastUpgradedID) if err != nil { return false, 0, err @@ -111,11 +108,12 @@ func findNextTableToUpgrade( func maybeUpgradeSeqReferencesInTableOrView( ctx context.Context, idToUpgrade descpb.ID, d upgrade.TenantDeps, ) error { - return d.InternalExecutorFactory.DescsTxn(ctx, d.DB, func( - ctx context.Context, txn *kv.Txn, descriptors *descs.Collection, + return d.DB.DescsTxn(ctx, func( + ctx context.Context, txn descs.Txn, ) error { // Set up: retrieve table desc for `idToUpgrade` and a schema resolver - tableDesc, sc, cleanup, err := upgradeSetUpForTableOrView(ctx, d, txn, descriptors, idToUpgrade) + kvTxn, descriptors := txn.KV(), txn.Descriptors() + tableDesc, sc, cleanup, err := upgradeSetUpForTableOrView(ctx, d, kvTxn, descriptors, idToUpgrade) if err != nil { return err } @@ -123,11 +121,11 @@ func maybeUpgradeSeqReferencesInTableOrView( // Act: upgrade the table's (or view's) sequence references accordingly. if tableDesc.IsTable() { - if err = upgradeSequenceReferenceInTable(ctx, txn, tableDesc, sc, descriptors); err != nil { + if err = upgradeSequenceReferenceInTable(ctx, kvTxn, tableDesc, sc, descriptors); err != nil { return err } } else if tableDesc.IsView() { - if err = upgradeSequenceReferenceInView(ctx, txn, tableDesc, sc, descriptors); err != nil { + if err = upgradeSequenceReferenceInView(ctx, kvTxn, tableDesc, sc, descriptors); err != nil { return err } } else { diff --git a/pkg/upgrade/upgrades/wait_for_del_range_in_gc_job.go b/pkg/upgrade/upgrades/wait_for_del_range_in_gc_job.go index 24957b10656f..ce0e7bc9241a 100644 --- a/pkg/upgrade/upgrades/wait_for_del_range_in_gc_job.go +++ b/pkg/upgrade/upgrades/wait_for_del_range_in_gc_job.go @@ -16,11 +16,11 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/upgrade" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -120,7 +120,7 @@ SELECT job_id // collectJobIDsFromQuery is a helper to execute a query which returns rows // where the first column is a jobID and returns the job IDs from those rows. func collectJobIDsFromQuery( - ctx context.Context, ie sqlutil.InternalExecutor, opName string, query string, + ctx context.Context, ie isql.Executor, opName string, query string, ) (jobIDs []jobspb.JobID, retErr error) { it, err := ie.QueryIteratorEx(ctx, opName, nil, /* txn */ sessiondata.NodeUserSessionDataOverride, query) diff --git a/pkg/upgrade/upgrades/wait_for_schema_changes.go b/pkg/upgrade/upgrades/wait_for_schema_changes.go index 88ef87951639..04611c67142f 100644 --- a/pkg/upgrade/upgrades/wait_for_schema_changes.go +++ b/pkg/upgrade/upgrades/wait_for_schema_changes.go @@ -74,5 +74,5 @@ WHERE } jobList[i] = jobspb.JobID(*id) } - return d.JobRegistry.WaitForJobsIgnoringJobErrors(ctx, d.InternalExecutor, jobList) + return d.JobRegistry.WaitForJobsIgnoringJobErrors(ctx, jobList) } diff --git a/pkg/util/stop/stopper.go b/pkg/util/stop/stopper.go index 21b081e7106b..870219716a35 100644 --- a/pkg/util/stop/stopper.go +++ b/pkg/util/stop/stopper.go @@ -283,7 +283,7 @@ func (s *Stopper) WithCancelOnQuiesce(ctx context.Context) (context.Context, fun // RunTask adds one to the count of tasks left to quiesce in the system. // Any worker which is a "first mover" when starting tasks must call this method // before starting work on a new task. First movers include goroutines launched -// to do periodic work and the kv/db.go gateway which accepts external client +// to do periodic work and the kv/isql_db.go gateway which accepts external client // requests. // // taskName is used as the "operation" field of the span opened for this task diff --git a/pkg/util/tracing/zipper/BUILD.bazel b/pkg/util/tracing/zipper/BUILD.bazel index a3e69fd0cfac..aa389cc8b49b 100644 --- a/pkg/util/tracing/zipper/BUILD.bazel +++ b/pkg/util/tracing/zipper/BUILD.bazel @@ -7,8 +7,8 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/util/tracing/zipper", visibility = ["//visibility:public"], deps = [ + "//pkg/sql/isql", "//pkg/sql/sem/tree", - "//pkg/sql/sqlutil", "//pkg/util/log", "//pkg/util/memzipper", "//pkg/util/tracing/tracingpb", diff --git a/pkg/util/tracing/zipper/zipper.go b/pkg/util/tracing/zipper/zipper.go index 1be2d098adfd..5a16f4910e3e 100644 --- a/pkg/util/tracing/zipper/zipper.go +++ b/pkg/util/tracing/zipper/zipper.go @@ -18,8 +18,8 @@ import ( "fmt" "io" + "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" - "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/memzipper" "github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb" @@ -53,7 +53,7 @@ type InflightTraceZipper interface { type InternalInflightTraceZipper struct { traceStrBuf *bytes.Buffer nodeTraceCollection *tracingpb.TraceCollection - ie sqlutil.InternalExecutor + ie isql.Executor z *memzipper.Zipper } @@ -186,9 +186,7 @@ func (i *InternalInflightTraceZipper) populateInflightTraceRow( // MakeInternalExecutorInflightTraceZipper returns an instance of // InternalInflightTraceZipper. -func MakeInternalExecutorInflightTraceZipper( - ie sqlutil.InternalExecutor, -) *InternalInflightTraceZipper { +func MakeInternalExecutorInflightTraceZipper(ie isql.Executor) *InternalInflightTraceZipper { t := &InternalInflightTraceZipper{ traceStrBuf: &bytes.Buffer{}, nodeTraceCollection: nil,