diff --git a/pkg/sql/catalog/descs/collection_test.go b/pkg/sql/catalog/descs/collection_test.go index ba2bc5f675ff..d3ca71e751c3 100644 --- a/pkg/sql/catalog/descs/collection_test.go +++ b/pkg/sql/catalog/descs/collection_test.go @@ -236,7 +236,7 @@ func TestAddUncommittedDescriptorAndMutableResolution(t *testing.T) { require.Same(t, immByName, immByID) // Don't write the descriptor, just write the namespace entry. - b := &kv.Batch{} + b := txn.KV().NewBatch() err = descriptors.InsertNamespaceEntryToBatch(ctx, false /* kvTrace */, mut, b) require.NoError(t, err) err = txn.KV().Run(ctx, b) diff --git a/pkg/sql/crdb_internal.go b/pkg/sql/crdb_internal.go index 0d88accd2a55..5529d102afa2 100644 --- a/pkg/sql/crdb_internal.go +++ b/pkg/sql/crdb_internal.go @@ -31,7 +31,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobsauth" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock" @@ -6123,12 +6122,12 @@ CREATE TABLE crdb_internal.lost_descriptors_with_data ( hasData := func(startID, endID descpb.ID) (found bool, _ error) { startPrefix := p.extendedEvalCtx.Codec.TablePrefix(uint32(startID)) endPrefix := p.extendedEvalCtx.Codec.TablePrefix(uint32(endID - 1)).PrefixEnd() - var b kv.Batch + b := p.Txn().NewBatch() b.Header.MaxSpanRequestKeys = 1 scanRequest := kvpb.NewScan(startPrefix, endPrefix, false).(*kvpb.ScanRequest) scanRequest.ScanFormat = kvpb.BATCH_RESPONSE b.AddRawRequest(scanRequest) - err = p.execCfg.DB.Run(ctx, &b) + err = p.execCfg.DB.Run(ctx, b) if err != nil { return false, err } @@ -7529,7 +7528,7 @@ func genClusterLocksGenerator( var resumeSpan *roachpb.Span fetchLocks := func(key, endKey roachpb.Key) error { - b := kv.Batch{} + b := p.Txn().NewBatch() queryLocksRequest := &kvpb.QueryLocksRequest{ RequestHeader: kvpb.RequestHeader{ Key: key, @@ -7546,7 +7545,7 @@ func genClusterLocksGenerator( b.Header.MaxSpanRequestKeys = int64(rowinfra.ProductionKVBatchSize) b.Header.TargetBytes = int64(rowinfra.GetDefaultBatchBytesLimit(p.extendedEvalCtx.TestingKnobs.ForceProductionValues)) - err := p.txn.Run(ctx, &b) + err := p.txn.Run(ctx, b) if err != nil { return err } diff --git a/pkg/sql/create_sequence.go b/pkg/sql/create_sequence.go index 403beb033fb4..e07b31388774 100644 --- a/pkg/sql/create_sequence.go +++ b/pkg/sql/create_sequence.go @@ -15,7 +15,6 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server/telemetry" clustersettings "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog" @@ -155,7 +154,7 @@ func doCreateSequence( // Initialize the sequence value. seqValueKey := p.ExecCfg().Codec.SequenceKey(uint32(id)) - b := &kv.Batch{} + b := p.Txn().NewBatch() startVal := desc.SequenceOpts.Start for _, option := range opts { diff --git a/pkg/sql/descriptor.go b/pkg/sql/descriptor.go index c8074f1e4303..c48a84024073 100644 --- a/pkg/sql/descriptor.go +++ b/pkg/sql/descriptor.go @@ -16,7 +16,6 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/settings" @@ -227,7 +226,7 @@ func (p *planner) createDescriptor( "expected new descriptor, not a modification of version %d", descriptor.OriginalVersion()) } - b := &kv.Batch{} + b := p.Txn().NewBatch() kvTrace := p.ExtendedEvalContext().Tracing.KVTracingEnabled() if err := p.Descriptors().WriteDescToBatch(ctx, kvTrace, descriptor, b); err != nil { return err diff --git a/pkg/sql/drop_database.go b/pkg/sql/drop_database.go index 7c5e69519aa8..ef25cffd057c 100644 --- a/pkg/sql/drop_database.go +++ b/pkg/sql/drop_database.go @@ -13,7 +13,6 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" @@ -141,7 +140,7 @@ func (n *dropDatabaseNode) startExec(params runParams) error { schemaToDelete := schemaWithDbDesc.schema switch schemaToDelete.SchemaKind() { case catalog.SchemaPublic: - b := &kv.Batch{} + b := p.Txn().NewBatch() if err := p.Descriptors().DeleteDescriptorlessPublicSchemaToBatch( ctx, p.ExtendedEvalContext().Tracing.KVTracingEnabled(), n.dbDesc, b, ); err != nil { @@ -151,7 +150,7 @@ func (n *dropDatabaseNode) startExec(params runParams) error { return err } case catalog.SchemaTemporary: - b := &kv.Batch{} + b := p.Txn().NewBatch() if err := p.Descriptors().DeleteTempSchemaToBatch( ctx, p.ExtendedEvalContext().Tracing.KVTracingEnabled(), n.dbDesc, schemaToDelete.GetName(), b, ); err != nil { diff --git a/pkg/sql/gcjob/descriptor_utils.go b/pkg/sql/gcjob/descriptor_utils.go index 935830df88f7..fc4fc7f4416d 100644 --- a/pkg/sql/gcjob/descriptor_utils.go +++ b/pkg/sql/gcjob/descriptor_utils.go @@ -32,7 +32,7 @@ func deleteDatabaseZoneConfig( return nil } return db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - b := &kv.Batch{} + b := txn.NewBatch() // Delete the zone config entry for the dropped database associated with the // job, if it exists. diff --git a/pkg/sql/importer/import_job.go b/pkg/sql/importer/import_job.go index d14d60c005f9..aeea28bd7099 100644 --- a/pkg/sql/importer/import_job.go +++ b/pkg/sql/importer/import_job.go @@ -686,7 +686,7 @@ func (r *importResumer) prepareSchemasForIngestion( // Finally create the schemas on disk. for i, mutDesc := range mutableSchemaDescs { - b := &kv.Batch{} + b := txn.KV().NewBatch() kvTrace := p.ExtendedEvalContext().Tracing.KVTracingEnabled() if err := descsCol.WriteDescToBatch(ctx, kvTrace, mutDesc, b); err != nil { return nil, err diff --git a/pkg/sql/repair.go b/pkg/sql/repair.go index 4c53a7244279..4b35cc778d81 100644 --- a/pkg/sql/repair.go +++ b/pkg/sql/repair.go @@ -20,7 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/cloud" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog" @@ -755,7 +754,7 @@ func (p *planner) ForceDeleteTableData(ctx context.Context, descID int64) error requestHeader := kvpb.RequestHeader{ Key: tableSpan.Key, EndKey: tableSpan.EndKey, } - b := &kv.Batch{} + b := p.Txn().NewBatch() if storage.CanUseMVCCRangeTombstones(ctx, p.execCfg.Settings) { b.AddRawRequest(&kvpb.DeleteRangeRequest{ RequestHeader: requestHeader, diff --git a/pkg/sql/sem/builtins/builtins.go b/pkg/sql/sem/builtins/builtins.go index 676ea0206fcc..83978bdfd177 100644 --- a/pkg/sql/sem/builtins/builtins.go +++ b/pkg/sql/sem/builtins/builtins.go @@ -40,7 +40,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -5537,17 +5536,17 @@ SELECT Types: tree.ParamTypes{{Name: "key", Typ: types.Bytes}}, ReturnType: tree.FixedReturnType(types.Int), Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) { + if evalCtx.Txn == nil { // can occur during backfills + return nil, pgerror.Newf(pgcode.FeatureNotSupported, + "cannot use crdb_internal.lease_holder in this context") + } key := []byte(tree.MustBeDBytes(args[0])) - b := &kv.Batch{} + b := evalCtx.Txn.NewBatch() b.AddRawRequest(&kvpb.LeaseInfoRequest{ RequestHeader: kvpb.RequestHeader{ Key: key, }, }) - if evalCtx.Txn == nil { // can occur during backfills - return nil, pgerror.Newf(pgcode.FeatureNotSupported, - "cannot use crdb_internal.lease_holder in this context") - } if err := evalCtx.Txn.Run(ctx, b); err != nil { return nil, pgerror.Wrap(err, pgcode.InvalidParameterValue, "error fetching leaseholder") } diff --git a/pkg/sql/temporary_schema.go b/pkg/sql/temporary_schema.go index d465dc8082de..f609453eec59 100644 --- a/pkg/sql/temporary_schema.go +++ b/pkg/sql/temporary_schema.go @@ -18,7 +18,6 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" @@ -108,7 +107,7 @@ func (p *planner) getOrCreateTemporarySchema( if err != nil { return nil, err } - b := &kv.Batch{} + b := p.Txn().NewBatch() if err := p.Descriptors().InsertTempSchemaToBatch( ctx, p.ExtendedEvalContext().Tracing.KVTracingEnabled(), db, tempSchemaName, id, b, ); err != nil { diff --git a/pkg/sql/truncate.go b/pkg/sql/truncate.go index c7eedafeb661..08075349996d 100644 --- a/pkg/sql/truncate.go +++ b/pkg/sql/truncate.go @@ -15,7 +15,6 @@ import ( "math/rand" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -417,7 +416,7 @@ func (p *planner) copySplitPointsToNewIndexes( // Re-split the new set of indexes along the same split points as the old // indexes. - var b kv.Batch + b := p.Txn().NewBatch() tablePrefix := execCfg.Codec.TablePrefix(uint32(tableID)) // Fetch all of the range descriptors for this index. @@ -512,12 +511,12 @@ func (p *planner) copySplitPointsToNewIndexes( }) } - if err = p.txn.DB().Run(ctx, &b); err != nil { + if err = p.txn.DB().Run(ctx, b); err != nil { return err } // Now scatter the ranges, after we've finished splitting them. - b = kv.Batch{} + b = p.Txn().NewBatch() b.AddRawRequest(&kvpb.AdminScatterRequest{ // Scatter all of the data between the start key of the first new index, and // the PrefixEnd of the last new index. @@ -528,7 +527,7 @@ func (p *planner) copySplitPointsToNewIndexes( RandomizeLeases: true, }) - return p.txn.DB().Run(ctx, &b) + return p.txn.DB().Run(ctx, b) } func (p *planner) reassignIndexComments(