Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

sql: v20.2.0-alpha.1: panic with "no bytes in account to release" #50804

Closed
cockroach-teamcity opened this issue Jun 30, 2020 · 2 comments · Fixed by #50962
Closed

sql: v20.2.0-alpha.1: panic with "no bytes in account to release" #50804

cockroach-teamcity opened this issue Jun 30, 2020 · 2 comments · Fixed by #50962
Assignees
Labels
C-bug Code not up to spec/doc, specs & docs deemed correct. Solution expected to change code/behavior. O-sentry Originated from an in-the-wild panic report.

Comments

@cockroach-teamcity
Copy link
Member

This issue was autofiled by Sentry. It represents a crash or reported error on a live cluster with telemetry enabled.

Sentry link: https://sentry.io/organizations/cockroach-labs/issues/1754062469/?referrer=webhooks_plugin

Panic message:

*errors.errorString
*safedetails.withSafeDetails: panic: %v (1)
conn_executor.go:488: *withstack.withStack (top exception)
*safedetails.withSafeDetails: while executing: %s (2)
conn_executor.go:488: *withstack.withStack (3)
(check the extra data payloads)

Stacktrace (expand for inline code snippets):

r := recover()
h.ex.closeWrapper(ctx, r)
}()
in pkg/sql.(*Server).ServeConn.func1
/usr/local/go/src/runtime/panic.go#L678-L680 in runtime.gopanic
if b.used < delta {
panic(fmt.Sprintf("%s: no bytes in account to release, current %d, free %d",
b.mon.name, b.used, delta))
in pkg/util/mon.(*BoundAccount).Shrink
// used in the shrink paths.
c.memAcc.Shrink(context.TODO(), size)
}
in pkg/sql/rowcontainer.(*RowContainer).PopFirst
v.run.row = v.run.rows.At(0)
v.run.rows.PopFirst()
return true, nil
in pkg/sql.(*vTableLookupJoinNode).Next
for p.State == execinfra.StateRunning {
valid, err := p.node.Next(p.params)
if err != nil || !valid {
in pkg/sql.(*planNodeToRowSource).Next
}
row, meta := source.Next()
if meta != nil {
in pkg/sql/rowexec.(*hashJoiner).receiveNext
var err error
row, meta, emitDirectly, err = h.receiveNext(side)
if err != nil {
in pkg/sql/rowexec.(*hashJoiner).readProbeSide
case hjReadingProbeSide:
h.runningState, row, meta = h.readProbeSide()
case hjProbingRow:
in pkg/sql/rowexec.(*hashJoiner).Next
var p *execinfrapb.ProducerMetadata
r.row, p = r.source.Next()
in pkg/sql.(*rowSourceToPlanNode).Next
// Lookup more rows from the virtual table.
ok, err := v.input.Next(params)
if !ok || err != nil {
in pkg/sql.(*vTableLookupJoinNode).Next
for p.State == execinfra.StateRunning {
valid, err := p.node.Next(p.params)
if err != nil || !valid {
in pkg/sql.(*planNodeToRowSource).Next
}
row, meta := source.Next()
if meta != nil {
in pkg/sql/rowexec.(*hashJoiner).receiveNext
row, meta, emitDirectly, err := h.receiveNext(side)
if err != nil {
in pkg/sql/rowexec.(*hashJoiner).build
case hjBuilding:
h.runningState, row, meta = h.build()
case hjConsumingStoredSide:
in pkg/sql/rowexec.(*hashJoiner).Next
for {
row, meta := w.input.Next()
if meta != nil {
in pkg/sql/rowexec.(*windower).accumulateRows
case windowerAccumulating:
w.runningState, row, meta = w.accumulateRows()
case windowerEmittingRows:
in pkg/sql/rowexec.(*windower).Next
for {
row, meta := s.input.Next()
if meta != nil {
in pkg/sql/rowexec.(*sortAllProcessor).fill
valid, err := s.fill()
if !valid || err != nil {
in pkg/sql/rowexec.(*sortAllProcessor).Start
}
ctx = pb.self.Start(ctx)
Run(ctx, pb.self, pb.Out.output)
in pkg/sql/execinfra.(*ProcessorBase).Run
}
headProc.Run(ctx)
return nil
in pkg/sql/flowinfra.(*FlowBase).Run
// TODO(radu): this should go through the flow scheduler.
if err := flow.Run(ctx, func() {}); err != nil {
log.Fatalf(ctx, "unexpected error from syncFlow.Start(): %s "+
in pkg/sql.(*DistSQLPlanner).Run
recv.expectedRowsRead = int64(physPlan.TotalEstimatedScannedRows)
return dsp.Run(planCtx, txn, physPlan, recv, evalCtx, nil /* finishedSetupFn */)
}
in pkg/sql.(*DistSQLPlanner).PlanAndRun
// the planner whether or not to plan remote table readers.
cleanup := ex.server.cfg.DistSQLPlanner.PlanAndRun(
ctx, evalCtx, planCtx, planner.txn, planner.curPlan.main, recv,
in pkg/sql.(*connExecutor).execWithDistSQLEngine
ex.sessionTracing.TraceExecStart(ctx, "distributed")
bytesRead, rowsRead, err := ex.execWithDistSQLEngine(ctx, planner, stmt.AST.StatementType(), res, distributePlan, progAtomic)
ex.sessionTracing.TraceExecEnd(ctx, res.Err(), res.RowsAffected())
in pkg/sql.(*connExecutor).dispatchToExecutionEngine
p.autoCommit = os.ImplicitTxn.Get() && !ex.server.cfg.TestingKnobs.DisableAutoCommit
if err := ex.dispatchToExecutionEngine(ctx, p, res); err != nil {
return nil, nil, err
in pkg/sql.(*connExecutor).execStmtInOpenState
} else {
ev, payload, err = ex.execStmtInOpenState(ctx, stmt, res, pinfo)
}
in pkg/sql.(*connExecutor).execStmt
stmtCtx := withStatement(ctx, ex.curStmt)
ev, payload, err = ex.execStmt(stmtCtx, curStmt, stmtRes, pinfo)
if err != nil {
in pkg/sql.(*connExecutor).execCmd
var err error
if err = ex.execCmd(ex.Ctx()); err != nil {
if errors.IsAny(err, io.EOF, errDrainingComplete) {
in pkg/sql.(*connExecutor).run
}()
return h.ex.run(ctx, s.pool, reserved, cancel)
}
in pkg/sql.(*Server).ServeConn
reservedOwned = false // We're about to pass ownership away.
retErr = sqlServer.ServeConn(ctx, connHandler, reserved, cancelConn)
}()
in pkg/sql/pgwire.(*conn).processCommandsAsync.func1
/usr/local/go/src/runtime/asm_amd64.s#L1356-L1358 in runtime.goexit

pkg/sql/conn_executor.go in pkg/sql.(*Server).ServeConn.func1 at line 488
/usr/local/go/src/runtime/panic.go in runtime.gopanic at line 679
pkg/util/mon/bytes_usage.go in pkg/util/mon.(*BoundAccount).Shrink at line 574
pkg/sql/rowcontainer/datum_row_container.go in pkg/sql/rowcontainer.(*RowContainer).PopFirst at line 294
pkg/sql/virtual_table.go in pkg/sql.(*vTableLookupJoinNode).Next at line 263
pkg/sql/plan_node_to_row_source.go in pkg/sql.(*planNodeToRowSource).Next at line 171
pkg/sql/rowexec/hashjoiner.go in pkg/sql/rowexec.(*hashJoiner).receiveNext at line 652
pkg/sql/rowexec/hashjoiner.go in pkg/sql/rowexec.(*hashJoiner).readProbeSide at line 426
pkg/sql/rowexec/hashjoiner.go in pkg/sql/rowexec.(*hashJoiner).Next at line 237
pkg/sql/row_source_to_plan_node.go in pkg/sql.(*rowSourceToPlanNode).Next at line 75
pkg/sql/virtual_table.go in pkg/sql.(*vTableLookupJoinNode).Next at line 268
pkg/sql/plan_node_to_row_source.go in pkg/sql.(*planNodeToRowSource).Next at line 171
pkg/sql/rowexec/hashjoiner.go in pkg/sql/rowexec.(*hashJoiner).receiveNext at line 652
pkg/sql/rowexec/hashjoiner.go in pkg/sql/rowexec.(*hashJoiner).build at line 299
pkg/sql/rowexec/hashjoiner.go in pkg/sql/rowexec.(*hashJoiner).Next at line 233
pkg/sql/rowexec/windower.go in pkg/sql/rowexec.(*windower).accumulateRows at line 280
pkg/sql/rowexec/windower.go in pkg/sql/rowexec.(*windower).Next at line 232
pkg/sql/rowexec/sorter.go in pkg/sql/rowexec.(*sortAllProcessor).fill at line 291
pkg/sql/rowexec/sorter.go in pkg/sql/rowexec.(*sortAllProcessor).Start at line 272
pkg/sql/execinfra/processorsbase.go in pkg/sql/execinfra.(*ProcessorBase).Run at line 747
pkg/sql/flowinfra/flow.go in pkg/sql/flowinfra.(*FlowBase).Run at line 369
pkg/sql/distsql_running.go in pkg/sql.(*DistSQLPlanner).Run at line 409
pkg/sql/distsql_running.go in pkg/sql.(*DistSQLPlanner).PlanAndRun at line 1008
pkg/sql/conn_executor_exec.go in pkg/sql.(*connExecutor).execWithDistSQLEngine at line 903
pkg/sql/conn_executor_exec.go in pkg/sql.(*connExecutor).dispatchToExecutionEngine at line 800
pkg/sql/conn_executor_exec.go in pkg/sql.(*connExecutor).execStmtInOpenState at line 488
pkg/sql/conn_executor_exec.go in pkg/sql.(*connExecutor).execStmt at line 99
pkg/sql/conn_executor.go in pkg/sql.(*connExecutor).execCmd at line 1418
pkg/sql/conn_executor.go in pkg/sql.(*connExecutor).run at line 1285
pkg/sql/conn_executor.go in pkg/sql.(*Server).ServeConn at line 490
pkg/sql/pgwire/conn.go in pkg/sql/pgwire.(*conn).processCommandsAsync.func1 at line 595
/usr/local/go/src/runtime/asm_amd64.s in runtime.goexit at line 1357
Tag Value
Cockroach Release v20.2.0-alpha.1
Cockroach SHA: 59b7964
Platform linux amd64
Distribution CCL
Environment v20.2.0-alpha.1
Command server
Go Version ``
# of CPUs
# of Goroutines
@cockroach-teamcity cockroach-teamcity added C-bug Code not up to spec/doc, specs & docs deemed correct. Solution expected to change code/behavior. O-sentry Originated from an in-the-wild panic report. labels Jun 30, 2020
@yuzefovich
Copy link
Member

Statement signature is SELECT * FROM (SELECT _._, _._, _._, _._, _._ OR ((_._ = _) AND _._) AS _, _._, _._, row_number() OVER (PARTITION BY _._ ORDER BY _._) AS _, _ AS _, pg_get_expr(_._, _._) AS _, _._, _._, _._ FROM _._ AS _ JOIN _._ AS _ ON (_._ = _._) JOIN _._ AS _ ON (_._ = _._) JOIN _._ AS _ ON (_._ = _._) LEFT JOIN _._ AS _ ON ((_._ = _._) AND (_._ = _._)) LEFT JOIN _._ AS _ ON ((_._ = _._) AND (_._ = _._)) LEFT JOIN _._ AS _ ON ((_._ = _._) AND (_._ = _)) LEFT JOIN _._ AS _ ON ((_._ = _._) AND (_._ = _)) WHERE ((((_._ IN (_, _, __more3__)) AND (_._ > _)) AND (NOT _._)) AND (_._ LIKE _)) AND (_._ LIKE _)) AS _ WHERE _ AND (_ LIKE _) ORDER BY _, _._, _.

I'm not sure why this is a panic - I think it shouldn't be a crash.

@yuzefovich yuzefovich changed the title sentry: *errors.errorString *safedetails.withSafeDetails: panic: %v (1) conn_executor.go:488: *withstack.withStack (top exception) *safedetails.withSafeDetails: while executing: %s (2) conn_executor.go:488: *withstack.withStack (3) (check the extra data payloads) sql: v20.2.0-alpha.1: panic with "no bytes in account to release" Jun 30, 2020
@asubiotto
Copy link
Contributor

The action item here is to change the panic to ReportOrPanic

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
C-bug Code not up to spec/doc, specs & docs deemed correct. Solution expected to change code/behavior. O-sentry Originated from an in-the-wild panic report.
Projects
None yet
Development

Successfully merging a pull request may close this issue.

3 participants