Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

sentry: txn_coord_sender.go:825: log.Fatal: transaction unexpectedly committed: ×. ba: ×. txn: meta={id=bd5dddc2 pri=0.00807350 epo=0 ts=1624560919.629122902,0 min=1624560919.629122902,0 seq=7} lock=true stat=COMMITTED rts=0,0 wto=false max=0,0 int=6. -- *errutil.leafError: log.Fatal: transaction unexpectedly committed: ×. ba: ×. txn: meta={id=bd5dddc2 pri=0.00807350 epo=0 ts=1624560919.629122902,0 min=1624560919.629122902,0 seq=7} lock=true stat=COMMITTED rts=0,0 wto=false max=0,0 int=6. (1) txn_coord_sender.go:825: *withstack.withStack (top exception) (check the extra data payloads) #66860

Closed
cockroach-teamcity opened this issue Jun 24, 2021 · 1 comment
Labels
C-bug Code not up to spec/doc, specs & docs deemed correct. Solution expected to change code/behavior. O-sentry Originated from an in-the-wild panic report. T-kv KV Team

Comments

@cockroach-teamcity
Copy link
Member

This issue was autofiled by Sentry. It represents a crash or reported error on a live cluster with telemetry enabled.

Sentry link: https://sentry.io/organizations/cockroach-labs/issues/2474931431/?referrer=webhooks_plugin

Panic message:

txn_coord_sender.go:825: log.Fatal: transaction unexpectedly committed: ×. ba: ×. txn: meta={id=bd5dddc2 pri=0.00807350 epo=0 ts=1624560919.629122902,0 min=1624560919.629122902,0 seq=7} lock=true stat=COMMITTED rts=0,0 wto=false max=0,0 int=6.
--
*errutil.leafError: log.Fatal: transaction unexpectedly committed: ×. ba: ×. txn: meta={id=bd5dddc2 pri=0.00807350 epo=0 ts=1624560919.629122902,0 min=1624560919.629122902,0 seq=7} lock=true stat=COMMITTED rts=0,0 wto=false max=0,0 int=6. (1)
txn_coord_sender.go:825: *withstack.withStack (top exception)
(check the extra data payloads)

Stacktrace (expand for inline code snippets):

// committed.
log.Fatalf(ctx, "transaction unexpectedly committed: %s. ba: %s. txn: %s.", pErr, ba, errTxn)
}
in pkg/kv/kvclient/kvcoord.sanityCheckCommittedErr
if errTxn.Status == roachpb.COMMITTED {
sanityCheckCommittedErr(ctx, pErr, ba)
}
in pkg/kv/kvclient/kvcoord.(*TxnCoordSender).updateStateLocked
pErr = tc.updateStateLocked(ctx, ba, br, pErr)
in pkg/kv/kvclient/kvcoord.(*TxnCoordSender).Send

cockroach/pkg/kv/db.go

Lines 744 to 746 in 68a0fb9

tracing.AnnotateTrace()
br, pErr := sender.Send(ctx, ba)
if pErr != nil {
in pkg/kv.(*DB).sendUsingSender

cockroach/pkg/kv/txn.go

Lines 933 to 935 in 68a0fb9

txn.mu.Unlock()
br, pErr := txn.db.sendUsingSender(ctx, ba, sender)
if pErr == nil {
in pkg/kv.(*Txn).Send

cockroach/pkg/kv/db.go

Lines 654 to 656 in 68a0fb9

ba.Header = b.Header
b.response, b.pErr = send(ctx, ba)
b.fillResults(ctx)
in pkg/kv.sendAndFill

cockroach/pkg/kv/txn.go

Lines 602 to 604 in 68a0fb9

}
return sendAndFill(ctx, txn.Send, b)
}
in pkg/kv.(*Txn).Run

cockroach/pkg/kv/txn.go

Lines 664 to 666 in 68a0fb9

b.initResult(1 /* calls */, 0, b.raw, nil)
return txn.Run(ctx, b)
}
in pkg/kv.(*Txn).CommitInBatch
// coordinator.
err = tb.txn.CommitInBatch(ctx, tb.b)
} else {
in pkg/sql.(*tableWriterBase).finalize

cockroach/pkg/sql/update.go

Lines 194 to 196 in 68a0fb9

if lastBatch {
if err := u.run.tu.finalize(params.ctx); err != nil {
return false, err
in pkg/sql.(*updateNode).BatchedNext
for {
if next, err := r.source.BatchedNext(params); !next {
return err
in pkg/sql.(*rowCountNode).startExec

cockroach/pkg/sql/plan.go

Lines 505 to 507 in 68a0fb9

}
return n.startExec(params)
},
in pkg/sql.startExec.func2

cockroach/pkg/sql/walk.go

Lines 118 to 120 in 68a0fb9

}
v.err = v.observer.leaveNode(name, plan)
}()
in pkg/sql.(*planVisitor).visitInternal.func1

cockroach/pkg/sql/walk.go

Lines 297 to 299 in 68a0fb9

}
}
in pkg/sql.(*planVisitor).visitInternal
}
v.visitInternal(plan, name)
return plan
in pkg/sql.(*planVisitor).visit
v := makePlanVisitor(ctx, observer)
v.visit(plan)
return v.err
in pkg/sql.walkPlan

cockroach/pkg/sql/plan.go

Lines 508 to 510 in 68a0fb9

}
return walkPlan(params.ctx, plan, o)
}
in pkg/sql.startExec
// This starts all of the nodes below this node.
if err := startExec(p.params, p.node); err != nil {
p.MoveToDraining(err)
in pkg/sql.(*planNodeToRowSource).Start
}
ctx = pb.self.Start(ctx)
Run(ctx, pb.self, pb.Out.output)
in pkg/sql/execinfra.(*ProcessorBase).Run
log.VEventf(ctx, 1, "running %T in the flow's goroutine", headProc)
headProc.Run(ctx)
return nil
in pkg/sql/flowinfra.(*FlowBase).Run
// TODO(radu): this should go through the flow scheduler.
if err := flow.Run(ctx, func() {}); err != nil {
log.Fatalf(ctx, "unexpected error from syncFlow.Start(): %v\n"+
in pkg/sql.(*DistSQLPlanner).Run
recv.expectedRowsRead = int64(physPlan.TotalEstimatedScannedRows)
return dsp.Run(planCtx, txn, physPlan, recv, evalCtx, nil /* finishedSetupFn */)
}
in pkg/sql.(*DistSQLPlanner).PlanAndRun
// the planner whether or not to plan remote table readers.
cleanup := ex.server.cfg.DistSQLPlanner.PlanAndRun(
ctx, evalCtx, planCtx, planner.txn, planner.curPlan.main, recv,
in pkg/sql.(*connExecutor).execWithDistSQLEngine
ex.sessionTracing.TraceExecStart(ctx, "distributed")
stats, err := ex.execWithDistSQLEngine(
ctx, planner, stmt.AST.StatementType(), res, distributePlan.WillDistribute(), progAtomic,
in pkg/sql.(*connExecutor).dispatchToExecutionEngine
p.autoCommit = os.ImplicitTxn.Get() && !ex.server.cfg.TestingKnobs.DisableAutoCommit
if err := ex.dispatchToExecutionEngine(ctx, p, res); err != nil {
return nil, nil, err
in pkg/sql.(*connExecutor).execStmtInOpenState
} else {
ev, payload, err = ex.execStmtInOpenState(ctx, stmt, res, pinfo)
}
in pkg/sql.(*connExecutor).execStmt
if !portal.exhausted {
ev, payload, err = ex.execStmt(stmtCtx, curStmt, stmtRes, pinfo)
// Portal suspension is supported via a "side" state machine
in pkg/sql.(*connExecutor).execPortal
res = stmtRes
ev, payload, err = ex.execPortal(ctx, portal, portalName, stmtRes, pinfo)
return err
in pkg/sql.(*connExecutor).execCmd.func2
return err
}()
// Note: we write to ex.statsCollector.phaseTimes, instead of ex.phaseTimes,
in pkg/sql.(*connExecutor).execCmd
var err error
if err = ex.execCmd(ex.Ctx()); err != nil {
if errors.IsAny(err, io.EOF, errDrainingComplete) {
in pkg/sql.(*connExecutor).run
}()
return h.ex.run(ctx, s.pool, reserved, cancel)
}
in pkg/sql.(*Server).ServeConn
reservedOwned = false // We're about to pass ownership away.
retErr = sqlServer.ServeConn(ctx, connHandler, reserved, cancelConn)
}()
in pkg/sql/pgwire.(*conn).processCommandsAsync.func1

pkg/kv/kvclient/kvcoord/txn_coord_sender.go in pkg/kv/kvclient/kvcoord.sanityCheckCommittedErr at line 825
pkg/kv/kvclient/kvcoord/txn_coord_sender.go in pkg/kv/kvclient/kvcoord.(*TxnCoordSender).updateStateLocked at line 798
pkg/kv/kvclient/kvcoord/txn_coord_sender.go in pkg/kv/kvclient/kvcoord.(*TxnCoordSender).Send at line 504
pkg/kv/db.go in pkg/kv.(*DB).sendUsingSender at line 745
pkg/kv/txn.go in pkg/kv.(*Txn).Send at line 934
pkg/kv/db.go in pkg/kv.sendAndFill at line 655
pkg/kv/txn.go in pkg/kv.(*Txn).Run at line 603
pkg/kv/txn.go in pkg/kv.(*Txn).CommitInBatch at line 665
pkg/sql/tablewriter.go in pkg/sql.(*tableWriterBase).finalize at line 149
pkg/sql/update.go in pkg/sql.(*updateNode).BatchedNext at line 195
pkg/sql/plan_batch.go in pkg/sql.(*rowCountNode).startExec at line 159
pkg/sql/plan.go in pkg/sql.startExec.func2 at line 506
pkg/sql/walk.go in pkg/sql.(*planVisitor).visitInternal.func1 at line 119
pkg/sql/walk.go in pkg/sql.(*planVisitor).visitInternal at line 298
pkg/sql/walk.go in pkg/sql.(*planVisitor).visit at line 86
pkg/sql/walk.go in pkg/sql.walkPlan at line 50
pkg/sql/plan.go in pkg/sql.startExec at line 509
pkg/sql/plan_node_to_row_source.go in pkg/sql.(*planNodeToRowSource).Start at line 123
pkg/sql/execinfra/processorsbase.go in pkg/sql/execinfra.(*ProcessorBase).Run at line 775
pkg/sql/flowinfra/flow.go in pkg/sql/flowinfra.(*FlowBase).Run at line 395
pkg/sql/distsql_running.go in pkg/sql.(*DistSQLPlanner).Run at line 417
pkg/sql/distsql_running.go in pkg/sql.(*DistSQLPlanner).PlanAndRun at line 997
pkg/sql/conn_executor_exec.go in pkg/sql.(*connExecutor).execWithDistSQLEngine at line 1002
pkg/sql/conn_executor_exec.go in pkg/sql.(*connExecutor).dispatchToExecutionEngine at line 873
pkg/sql/conn_executor_exec.go in pkg/sql.(*connExecutor).execStmtInOpenState at line 640
pkg/sql/conn_executor_exec.go in pkg/sql.(*connExecutor).execStmt at line 114
pkg/sql/conn_executor_exec.go in pkg/sql.(*connExecutor).execPortal at line 203
pkg/sql/conn_executor.go in pkg/sql.(*connExecutor).execCmd.func2 at line 1533
pkg/sql/conn_executor.go in pkg/sql.(*connExecutor).execCmd at line 1535
pkg/sql/conn_executor.go in pkg/sql.(*connExecutor).run at line 1391
pkg/sql/conn_executor.go in pkg/sql.(*Server).ServeConn at line 508
pkg/sql/pgwire/conn.go in pkg/sql/pgwire.(*conn).processCommandsAsync.func1 at line 628
Tag Value
Cockroach Release v20.2.11-36-g68a0fb9723
Cockroach SHA: 68a0fb9
Platform linux amd64
Distribution CCL
Environment v20.2.11-36-g68a0fb9723
Command server
Go Version ``
# of CPUs
# of Goroutines
@cockroach-teamcity cockroach-teamcity added C-bug Code not up to spec/doc, specs & docs deemed correct. Solution expected to change code/behavior. O-sentry Originated from an in-the-wild panic report. labels Jun 24, 2021
@jordanlewis jordanlewis added the T-kv KV Team label Jun 28, 2021
@yuzefovich
Copy link
Member

dup of #57552

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
C-bug Code not up to spec/doc, specs & docs deemed correct. Solution expected to change code/behavior. O-sentry Originated from an in-the-wild panic report. T-kv KV Team
Projects
None yet
Development

No branches or pull requests

3 participants