Skip to content

Commit

Permalink
sql: use txn.NewBatch instead of &kv.Batch{}
Browse files Browse the repository at this point in the history
This will make these requests properly passes along the admission
control headers.

There are more usages in addition to these ones that are not trivial to
address.

Release note: None
  • Loading branch information
rafiss committed Jul 21, 2023
1 parent b4dfdc0 commit 3bd2698
Show file tree
Hide file tree
Showing 11 changed files with 22 additions and 30 deletions.
2 changes: 1 addition & 1 deletion pkg/sql/catalog/descs/collection_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ func TestAddUncommittedDescriptorAndMutableResolution(t *testing.T) {
require.Same(t, immByName, immByID)

// Don't write the descriptor, just write the namespace entry.
b := &kv.Batch{}
b := txn.KV().NewBatch()
err = descriptors.InsertNamespaceEntryToBatch(ctx, false /* kvTrace */, mut, b)
require.NoError(t, err)
err = txn.KV().Run(ctx, b)
Expand Down
9 changes: 4 additions & 5 deletions pkg/sql/crdb_internal.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/jobs/jobsauth"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord"
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock"
Expand Down Expand Up @@ -6123,12 +6122,12 @@ CREATE TABLE crdb_internal.lost_descriptors_with_data (
hasData := func(startID, endID descpb.ID) (found bool, _ error) {
startPrefix := p.extendedEvalCtx.Codec.TablePrefix(uint32(startID))
endPrefix := p.extendedEvalCtx.Codec.TablePrefix(uint32(endID - 1)).PrefixEnd()
var b kv.Batch
b := p.Txn().NewBatch()
b.Header.MaxSpanRequestKeys = 1
scanRequest := kvpb.NewScan(startPrefix, endPrefix, false).(*kvpb.ScanRequest)
scanRequest.ScanFormat = kvpb.BATCH_RESPONSE
b.AddRawRequest(scanRequest)
err = p.execCfg.DB.Run(ctx, &b)
err = p.execCfg.DB.Run(ctx, b)
if err != nil {
return false, err
}
Expand Down Expand Up @@ -7529,7 +7528,7 @@ func genClusterLocksGenerator(
var resumeSpan *roachpb.Span

fetchLocks := func(key, endKey roachpb.Key) error {
b := kv.Batch{}
b := p.Txn().NewBatch()
queryLocksRequest := &kvpb.QueryLocksRequest{
RequestHeader: kvpb.RequestHeader{
Key: key,
Expand All @@ -7546,7 +7545,7 @@ func genClusterLocksGenerator(
b.Header.MaxSpanRequestKeys = int64(rowinfra.ProductionKVBatchSize)
b.Header.TargetBytes = int64(rowinfra.GetDefaultBatchBytesLimit(p.extendedEvalCtx.TestingKnobs.ForceProductionValues))

err := p.txn.Run(ctx, &b)
err := p.txn.Run(ctx, b)
if err != nil {
return err
}
Expand Down
3 changes: 1 addition & 2 deletions pkg/sql/create_sequence.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ import (
"fmt"

"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
clustersettings "github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
Expand Down Expand Up @@ -155,7 +154,7 @@ func doCreateSequence(

// Initialize the sequence value.
seqValueKey := p.ExecCfg().Codec.SequenceKey(uint32(id))
b := &kv.Batch{}
b := p.Txn().NewBatch()

startVal := desc.SequenceOpts.Start
for _, option := range opts {
Expand Down
3 changes: 1 addition & 2 deletions pkg/sql/descriptor.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ import (
"strings"

"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/settings"
Expand Down Expand Up @@ -227,7 +226,7 @@ func (p *planner) createDescriptor(
"expected new descriptor, not a modification of version %d",
descriptor.OriginalVersion())
}
b := &kv.Batch{}
b := p.Txn().NewBatch()
kvTrace := p.ExtendedEvalContext().Tracing.KVTracingEnabled()
if err := p.Descriptors().WriteDescToBatch(ctx, kvTrace, descriptor, b); err != nil {
return err
Expand Down
5 changes: 2 additions & 3 deletions pkg/sql/drop_database.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ package sql
import (
"context"

"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys"
Expand Down Expand Up @@ -141,7 +140,7 @@ func (n *dropDatabaseNode) startExec(params runParams) error {
schemaToDelete := schemaWithDbDesc.schema
switch schemaToDelete.SchemaKind() {
case catalog.SchemaPublic:
b := &kv.Batch{}
b := p.Txn().NewBatch()
if err := p.Descriptors().DeleteDescriptorlessPublicSchemaToBatch(
ctx, p.ExtendedEvalContext().Tracing.KVTracingEnabled(), n.dbDesc, b,
); err != nil {
Expand All @@ -151,7 +150,7 @@ func (n *dropDatabaseNode) startExec(params runParams) error {
return err
}
case catalog.SchemaTemporary:
b := &kv.Batch{}
b := p.Txn().NewBatch()
if err := p.Descriptors().DeleteTempSchemaToBatch(
ctx, p.ExtendedEvalContext().Tracing.KVTracingEnabled(), n.dbDesc, schemaToDelete.GetName(), b,
); err != nil {
Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/gcjob/descriptor_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ func deleteDatabaseZoneConfig(
return nil
}
return db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
b := &kv.Batch{}
b := txn.NewBatch()

// Delete the zone config entry for the dropped database associated with the
// job, if it exists.
Expand Down
2 changes: 1 addition & 1 deletion pkg/sql/importer/import_job.go
Original file line number Diff line number Diff line change
Expand Up @@ -686,7 +686,7 @@ func (r *importResumer) prepareSchemasForIngestion(

// Finally create the schemas on disk.
for i, mutDesc := range mutableSchemaDescs {
b := &kv.Batch{}
b := txn.KV().NewBatch()
kvTrace := p.ExtendedEvalContext().Tracing.KVTracingEnabled()
if err := descsCol.WriteDescToBatch(ctx, kvTrace, mutDesc, b); err != nil {
return nil, err
Expand Down
3 changes: 1 addition & 2 deletions pkg/sql/repair.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/cloud"
"github.com/cockroachdb/cockroach/pkg/config/zonepb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
Expand Down Expand Up @@ -755,7 +754,7 @@ func (p *planner) ForceDeleteTableData(ctx context.Context, descID int64) error
requestHeader := kvpb.RequestHeader{
Key: tableSpan.Key, EndKey: tableSpan.EndKey,
}
b := &kv.Batch{}
b := p.Txn().NewBatch()
if storage.CanUseMVCCRangeTombstones(ctx, p.execCfg.Settings) {
b.AddRawRequest(&kvpb.DeleteRangeRequest{
RequestHeader: requestHeader,
Expand Down
11 changes: 5 additions & 6 deletions pkg/sql/sem/builtins/builtins.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/config/zonepb"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
"github.com/cockroachdb/cockroach/pkg/roachpb"
Expand Down Expand Up @@ -5537,17 +5536,17 @@ SELECT
Types: tree.ParamTypes{{Name: "key", Typ: types.Bytes}},
ReturnType: tree.FixedReturnType(types.Int),
Fn: func(ctx context.Context, evalCtx *eval.Context, args tree.Datums) (tree.Datum, error) {
if evalCtx.Txn == nil { // can occur during backfills
return nil, pgerror.Newf(pgcode.FeatureNotSupported,
"cannot use crdb_internal.lease_holder in this context")
}
key := []byte(tree.MustBeDBytes(args[0]))
b := &kv.Batch{}
b := evalCtx.Txn.NewBatch()
b.AddRawRequest(&kvpb.LeaseInfoRequest{
RequestHeader: kvpb.RequestHeader{
Key: key,
},
})
if evalCtx.Txn == nil { // can occur during backfills
return nil, pgerror.Newf(pgcode.FeatureNotSupported,
"cannot use crdb_internal.lease_holder in this context")
}
if err := evalCtx.Txn.Run(ctx, b); err != nil {
return nil, pgerror.Wrap(err, pgcode.InvalidParameterValue, "error fetching leaseholder")
}
Expand Down
3 changes: 1 addition & 2 deletions pkg/sql/temporary_schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ import (
"time"

"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
Expand Down Expand Up @@ -108,7 +107,7 @@ func (p *planner) getOrCreateTemporarySchema(
if err != nil {
return nil, err
}
b := &kv.Batch{}
b := p.Txn().NewBatch()
if err := p.Descriptors().InsertTempSchemaToBatch(
ctx, p.ExtendedEvalContext().Tracing.KVTracingEnabled(), db, tempSchemaName, id, b,
); err != nil {
Expand Down
9 changes: 4 additions & 5 deletions pkg/sql/truncate.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ import (
"math/rand"

"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvpb"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase"
"github.com/cockroachdb/cockroach/pkg/roachpb"
Expand Down Expand Up @@ -417,7 +416,7 @@ func (p *planner) copySplitPointsToNewIndexes(

// Re-split the new set of indexes along the same split points as the old
// indexes.
var b kv.Batch
b := p.Txn().NewBatch()
tablePrefix := execCfg.Codec.TablePrefix(uint32(tableID))

// Fetch all of the range descriptors for this index.
Expand Down Expand Up @@ -512,12 +511,12 @@ func (p *planner) copySplitPointsToNewIndexes(
})
}

if err = p.txn.DB().Run(ctx, &b); err != nil {
if err = p.txn.DB().Run(ctx, b); err != nil {
return err
}

// Now scatter the ranges, after we've finished splitting them.
b = kv.Batch{}
b = p.Txn().NewBatch()
b.AddRawRequest(&kvpb.AdminScatterRequest{
// Scatter all of the data between the start key of the first new index, and
// the PrefixEnd of the last new index.
Expand All @@ -528,7 +527,7 @@ func (p *planner) copySplitPointsToNewIndexes(
RandomizeLeases: true,
})

return p.txn.DB().Run(ctx, &b)
return p.txn.DB().Run(ctx, b)
}

func (p *planner) reassignIndexComments(
Expand Down

0 comments on commit 3bd2698

Please sign in to comment.