Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

kvserver: v21.1.7: cannot handle WriteIntentError × for request without lockTableGuard; were lock spans declared for this request? #79630

Closed
cockroach-teamcity opened this issue Apr 8, 2022 · 1 comment
Labels
C-bug Code not up to spec/doc, specs & docs deemed correct. Solution expected to change code/behavior. O-sentry Originated from an in-the-wild panic report. T-kv KV Team

Comments

@cockroach-teamcity
Copy link
Member

cockroach-teamcity commented Apr 8, 2022

This issue was autofiled by Sentry. It represents a crash or reported error on a live cluster with telemetry enabled.

Sentry link: https://sentry.io/organizations/cockroach-labs/issues/3172012307/?referrer=webhooks_plugin

Panic message:

concurrency_manager.go:257: log.Fatal: cannot handle WriteIntentError × for request without lockTableGuard; were lock spans declared for this request?
--
*errutil.leafError: log.Fatal: cannot handle WriteIntentError × for request without lockTableGuard; were lock spans declared for this request? (1)
*secondary.withSecondaryError
concurrency_manager.go:257: *withstack.withStack (top exception)
(check the extra data payloads)

Stacktrace (expand for inline code snippets):

if g.ltg == nil {
log.Fatalf(ctx, "cannot handle WriteIntentError %v for request without "+
"lockTableGuard; were lock spans declared for this request?", t)
in pkg/kv/kvserver/concurrency.(*managerImpl).HandleWriterIntentError
// g's latches will be dropped, but it retains its spot in lock wait-queues.
return r.concMgr.HandleWriterIntentError(ctx, g, t.LeaseSequence, t)
}
in pkg/kv/kvserver.(*Replica).handleWriteIntentError
// Drop latches, but retain lock wait-queues.
if g, pErr = r.handleWriteIntentError(ctx, ba, g, pErr, t); pErr != nil {
return nil, pErr
in pkg/kv/kvserver.(*Replica).executeBatchWithConcurrencyRetries
fn := (*Replica).executeWriteBatch
br, pErr = r.executeBatchWithConcurrencyRetries(ctx, ba, fn)
} else if ba.IsAdmin() {
in pkg/kv/kvserver.(*Replica).sendWithRangeID
) (*roachpb.BatchResponse, *roachpb.Error) {
return r.sendWithRangeID(ctx, r.RangeID, &ba)
}
in pkg/kv/kvserver.(*Replica).Send
br, pErr = repl.Send(ctx, ba)
if pErr == nil {
in pkg/kv/kvserver.(*Store).Send
br, pErr := store.Send(ctx, ba)
if br != nil && br.Error != nil {
in pkg/kv/kvserver.(*Stores).Send
var pErr *roachpb.Error
br, pErr = n.stores.Send(ctx, *args)
if pErr != nil {
in pkg/server.(*Node).batchInternal.func1
return f(ctx)
}
in pkg/util/stop.(*Stopper).RunTaskWithErr
var br *roachpb.BatchResponse
if err := n.stopper.RunTaskWithErr(ctx, "node.Node: batch", func(ctx context.Context) error {
var finishSpan func(*roachpb.BatchResponse)
in pkg/server.(*Node).batchInternal
br, err := n.batchInternal(ctx, args)
in pkg/server.(*Node).Batch
) (*roachpb.BatchResponse, error) {
return a.InternalServer.Batch(ctx, ba)
}
in pkg/rpc.internalClientAdapter.Batch
}
reply, err := iface.Batch(ctx, &ba)
// If we queried a remote node, perform extra validation and
in pkg/kv/kvclient/kvcoord.(*grpcTransport).sendBatch
ba.Replica = r
return gt.sendBatch(ctx, r.NodeID, iface, ba)
}
in pkg/kv/kvclient/kvcoord.(*grpcTransport).SendNext
}
br, err = transport.SendNext(ctx, ba)
ds.maybeIncrementErrCounters(br, err)
in pkg/kv/kvclient/kvcoord.(*DistSender).sendToReplicas
prevTok = routingTok
reply, err = ds.sendToReplicas(ctx, ba, routingTok, withCommit)
in pkg/kv/kvclient/kvcoord.(*DistSender).sendPartialBatch
if !ri.NeedAnother(rs) {
resp := ds.sendPartialBatch(
ctx, ba, rs, ri.Token(), withCommit, batchIdx, false, /* needsTruncate */
in pkg/kv/kvclient/kvcoord.(*DistSender).divideAndSendBatchToRanges
} else {
rpl, pErr = ds.divideAndSendBatchToRanges(ctx, ba, rs, withCommit, 0 /* batchIdx */)
}
in pkg/kv/kvclient/kvcoord.(*DistSender).Send

cockroach/pkg/kv/db.go

Lines 218 to 220 in 1fac61a

br, pErr := s.wrapped.Send(ctx, ba)
if _, ok := pErr.GetDetail().(*roachpb.OpRequiresTxnError); !ok {
in pkg/kv.(*CrossRangeTxnWrapperSender).Send

cockroach/pkg/kv/db.go

Lines 808 to 810 in 1fac61a

br, pErr := sender.Send(ctx, ba)
if pErr != nil {
in pkg/kv.(*DB).sendUsingSender

cockroach/pkg/kv/db.go

Lines 791 to 793 in 1fac61a

) (*roachpb.BatchResponse, *roachpb.Error) {
return db.sendUsingSender(ctx, ba, db.NonTransactionalSender())
}
in pkg/kv.(*DB).send

cockroach/pkg/kv/db.go

Lines 719 to 721 in 1fac61a

ba.Header = b.Header
b.response, b.pErr = send(ctx, ba)
b.fillResults(ctx)
in pkg/kv.sendAndFill

cockroach/pkg/kv/db.go

Lines 742 to 744 in 1fac61a

}
return sendAndFill(ctx, db.send, b)
}
in pkg/kv.(*DB).Run

cockroach/pkg/kv/db.go

Lines 694 to 696 in 1fac61a

b.addSSTable(begin, end, data, disallowShadowing, stats, ingestAsWrites)
return getOneErr(db.Run(ctx, b), b)
}
in pkg/kv.(*DB).AddSSTable
// This will fail if the range has split but we'll check for that below.
err = db.AddSSTable(ctx, item.start, item.end, item.sstBytes, item.disallowShadowing, &item.stats, ingestAsWriteBatch, batchTs)
if err == nil {
in pkg/kv/bulk.AddSSTable.func1
return errors.Wrapf(err, "addsstable [%s,%s)", item.start, item.end)
}(); err != nil {
return files, err
in pkg/kv/bulk.AddSSTable
beforeSend := timeutil.Now()
files, err := AddSSTable(ctx, b.db, start, end, b.sstFile.Data(), b.disallowShadowing, b.ms, b.settings, b.batchTS)
if err != nil {
in pkg/kv/bulk.(*SSTBatcher).doFlush
func (b *SSTBatcher) Flush(ctx context.Context) error {
return b.doFlush(ctx, manualFlush, nil)
}
in pkg/kv/bulk.(*SSTBatcher).Flush
}
if err := b.sink.Flush(ctx); err != nil {
return err
in pkg/kv/bulk.(*BufferingAdder).Flush
if err := indexAdder.Flush(ctx); err != nil {
if errors.HasType(err, (*kvserverbase.DuplicateKeyError)(nil)) {
in pkg/ccl/importccl.ingestKvs
group.GoCtx(func(ctx context.Context) error {
summary, err = ingestKvs(ctx, flowCtx, spec, progCh, kvCh)
if err != nil {
in pkg/ccl/importccl.runImport.func3
g.wrapped.Go(func() error {
return f(g.ctx)
})
in pkg/util/ctxgroup.Group.GoCtx.func1

pkg/kv/kvserver/concurrency/concurrency_manager.go in pkg/kv/kvserver/concurrency.(*managerImpl).HandleWriterIntentError at line 257
pkg/kv/kvserver/replica_send.go in pkg/kv/kvserver.(*Replica).handleWriteIntentError at line 491
pkg/kv/kvserver/replica_send.go in pkg/kv/kvserver.(*Replica).executeBatchWithConcurrencyRetries at line 385
pkg/kv/kvserver/replica_send.go in pkg/kv/kvserver.(*Replica).sendWithRangeID at line 98
pkg/kv/kvserver/replica_send.go in pkg/kv/kvserver.(*Replica).Send at line 34
pkg/kv/kvserver/store_send.go in pkg/kv/kvserver.(*Store).Send at line 180
pkg/kv/kvserver/stores.go in pkg/kv/kvserver.(*Stores).Send at line 191
pkg/server/node.go in pkg/server.(*Node).batchInternal.func1 at line 875
pkg/util/stop/stopper.go in pkg/util/stop.(*Stopper).RunTaskWithErr at line 330
pkg/server/node.go in pkg/server.(*Node).batchInternal at line 863
pkg/server/node.go in pkg/server.(*Node).Batch at line 901
pkg/rpc/context.go in pkg/rpc.internalClientAdapter.Batch at line 463
pkg/kv/kvclient/kvcoord/transport.go in pkg/kv/kvclient/kvcoord.(*grpcTransport).sendBatch at line 195
pkg/kv/kvclient/kvcoord/transport.go in pkg/kv/kvclient/kvcoord.(*grpcTransport).SendNext at line 177
pkg/kv/kvclient/kvcoord/dist_sender.go in pkg/kv/kvclient/kvcoord.(*DistSender).sendToReplicas at line 1889
pkg/kv/kvclient/kvcoord/dist_sender.go in pkg/kv/kvclient/kvcoord.(*DistSender).sendPartialBatch at line 1507
pkg/kv/kvclient/kvcoord/dist_sender.go in pkg/kv/kvclient/kvcoord.(*DistSender).divideAndSendBatchToRanges at line 1145
pkg/kv/kvclient/kvcoord/dist_sender.go in pkg/kv/kvclient/kvcoord.(*DistSender).Send at line 784
pkg/kv/db.go in pkg/kv.(*CrossRangeTxnWrapperSender).Send at line 219
pkg/kv/db.go in pkg/kv.(*DB).sendUsingSender at line 809
pkg/kv/db.go in pkg/kv.(*DB).send at line 792
pkg/kv/db.go in pkg/kv.sendAndFill at line 720
pkg/kv/db.go in pkg/kv.(*DB).Run at line 743
pkg/kv/db.go in pkg/kv.(*DB).AddSSTable at line 695
pkg/kv/bulk/sst_batcher.go in pkg/kv/bulk.AddSSTable.func1 at line 449
pkg/kv/bulk/sst_batcher.go in pkg/kv/bulk.AddSSTable at line 485
pkg/kv/bulk/sst_batcher.go in pkg/kv/bulk.(*SSTBatcher).doFlush at line 317
pkg/kv/bulk/sst_batcher.go in pkg/kv/bulk.(*SSTBatcher).Flush at line 258
pkg/kv/bulk/buffering_adder.go in pkg/kv/bulk.(*BufferingAdder).Flush at line 237
pkg/ccl/importccl/import_processor.go in pkg/ccl/importccl.ingestKvs at line 456
pkg/ccl/importccl/read_import_base.go in pkg/ccl/importccl.runImport.func3 at line 115
pkg/util/ctxgroup/ctxgroup.go in pkg/util/ctxgroup.Group.GoCtx.func1 at line 166
Tag Value
Cockroach Release v21.1.7
Cockroach SHA: 1fac61a
Platform linux amd64
Distribution CCL
Environment development
Command server
Go Version ``
# of CPUs
# of Goroutines

Jira issue: CRDB-14951

@cockroach-teamcity cockroach-teamcity added C-bug Code not up to spec/doc, specs & docs deemed correct. Solution expected to change code/behavior. O-sentry Originated from an in-the-wild panic report. labels Apr 8, 2022
@yuzefovich yuzefovich changed the title sentry: concurrency_manager.go:257: log.Fatal: cannot handle WriteIntentError × for request without lockTableGuard; were lock spans declared for this request? -- *errutil.leafError: log.Fatal: cannot handle W... kvserver: v21.1.7: cannot handle WriteIntentError × for request without lockTableGuard; were lock spans declared for this request? Apr 9, 2022
@blathers-crl blathers-crl bot added the T-kv KV Team label Apr 9, 2022
@nvanbenschoten
Copy link
Member

This is an instance of #71676. We backported that patch in #71982, which was first available in v21.1.12, so it's not surprising that we see this in v21.1.7. Closing.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
C-bug Code not up to spec/doc, specs & docs deemed correct. Solution expected to change code/behavior. O-sentry Originated from an in-the-wild panic report. T-kv KV Team
Projects
None yet
Development

No branches or pull requests

3 participants