Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

sentry: panic due to decoding unset EncDatum #41630

Closed
cockroach-teamcity opened this issue Oct 16, 2019 · 4 comments
Closed

sentry: panic due to decoding unset EncDatum #41630

cockroach-teamcity opened this issue Oct 16, 2019 · 4 comments
Labels
C-bug Code not up to spec/doc, specs & docs deemed correct. Solution expected to change code/behavior. O-sentry Originated from an in-the-wild panic report.

Comments

@cockroach-teamcity
Copy link
Member

This issue was autofiled by Sentry. It represents a crash or reported error on a live cluster with telemetry enabled.

Sentry link: https://sentry.io/organizations/cockroach-labs/issues/1276713044/?referrer=webhooks_plugin

Panic message:

(0) encoded_datum.go:220: decoding unset EncDatum | string
(1) statement: UPDATE _ SET _ = _ + _ WHERE _ = $1 ORDER BY _ DESC LIMIT (SELECT count(*) FROM _ WHERE _ = $1)
(see stack traces in additional data)

Stacktrace (expand for inline code snippets):

if ed.encoded == nil {
return pgerror.NewAssertionErrorf("decoding unset EncDatum")
}
in pkg/sql/sqlbase.(*EncDatum).EnsureDecoded
func (eh *exprHelper) IndexedVarEval(idx int, ctx *tree.EvalContext) (tree.Datum, error) {
err := eh.row[idx].EnsureDecoded(&eh.types[idx], &eh.datumAlloc)
if err != nil {
in pkg/sql/distsqlrun.(*exprHelper).IndexedVarEval
}
return ctx.IVarContainer.IndexedVarEval(v.Idx, ctx)
}
in pkg/sql/sem/tree.(*IndexedVar).Eval
eh.evalCtx.PushIVarContainer(eh)
d, err := eh.expr.Eval(eh.evalCtx)
eh.evalCtx.PopIVarContainer()
in pkg/sql/distsqlrun.(*exprHelper).eval
for i := range h.renderExprs {
datum, err := h.renderExprs[i].eval(row)
if err != nil {
in pkg/sql/distsqlrun.(*ProcOutputHelper).ProcessRow
func (pb *ProcessorBase) ProcessRowHelper(row sqlbase.EncDatumRow) sqlbase.EncDatumRow {
outRow, ok, err := pb.out.ProcessRow(pb.Ctx, row)
if err != nil {
in pkg/sql/distsqlrun.(*ProcessorBase).ProcessRowHelper
ij.fetcherReady = false
} else if outRow := ij.ProcessRowHelper(row); outRow != nil {
return outRow, nil
in pkg/sql/distsqlrun.(*indexJoiner).Next
var p *distsqlrun.ProducerMetadata
r.row, p = r.source.Next()
in pkg/sql.(*rowSourceToPlanNode).Next

cockroach/pkg/sql/update.go

Lines 535 to 537 in 1a14d34

// Advance one individual row.
if next, err := u.source.Next(params); !next {
lastBatch = true
in pkg/sql.(*updateNode).BatchedNext
for {
if next, err := r.source.BatchedNext(params); !next {
return err
in pkg/sql.(*rowCountNode).startExec

cockroach/pkg/sql/plan.go

Lines 495 to 497 in 1a14d34

leaveNode: func(_ string, n planNode) error {
return n.startExec(params)
},
in pkg/sql.startExec.func2

cockroach/pkg/sql/walk.go

Lines 145 to 147 in 1a14d34

}
v.err = v.observer.leaveNode(name, plan)
}()
in pkg/sql.(*planVisitor).visitInternal.func1

cockroach/pkg/sql/walk.go

Lines 612 to 614 in 1a14d34

}
}
in pkg/sql.(*planVisitor).visitInternal

cockroach/pkg/sql/walk.go

Lines 112 to 114 in 1a14d34

}
v.visitInternal(plan, name)
return plan
in pkg/sql.(*planVisitor).visit
v := makePlanVisitor(ctx, observer)
v.visit(plan)
return v.err
in pkg/sql.walkPlan

cockroach/pkg/sql/plan.go

Lines 498 to 500 in 1a14d34

}
return walkPlan(params.ctx, plan, o)
}
in pkg/sql.startExec
// This starts all of the nodes below this node.
if err := startExec(p.params, p.node); err != nil {
p.MoveToDraining(err)
in pkg/sql.(*planNodeToRowSource).Start
}
ctx = pb.self.Start(ctx)
Run(ctx, pb.self, pb.out.output)
in pkg/sql/distsqlrun.(*ProcessorBase).Run
}
headProc.Run(ctx)
return nil
in pkg/sql/distsqlrun.(*Flow).Run
// TODO(radu): this should go through the flow scheduler.
if err := flow.Run(ctx, func() {}); err != nil {
log.Fatalf(ctx, "unexpected error from syncFlow.Start(): %s "+
in pkg/sql.(*DistSQLPlanner).Run
dsp.FinalizePlan(planCtx, &physPlan)
dsp.Run(planCtx, txn, &physPlan, recv, evalCtx, nil /* finishedSetupFn */)
}
in pkg/sql.(*DistSQLPlanner).PlanAndRun
// the planner whether or not to plan remote table readers.
ex.server.cfg.DistSQLPlanner.PlanAndRun(
ctx, evalCtx, planCtx, planner.txn, planner.curPlan.plan, recv)
in pkg/sql.(*connExecutor).execWithDistSQLEngine
ex.sessionTracing.TraceExecStart(ctx, "distributed")
err = ex.execWithDistSQLEngine(ctx, planner, stmt.AST.StatementType(), res, distributePlan)
ex.sessionTracing.TraceExecEnd(ctx, res.Err(), res.RowsAffected())
in pkg/sql.(*connExecutor).dispatchToExecutionEngine
p.autoCommit = os.ImplicitTxn.Get() && !ex.server.cfg.TestingKnobs.DisableAutoCommit
if err := ex.dispatchToExecutionEngine(ctx, p, res); err != nil {
return nil, nil, err
in pkg/sql.(*connExecutor).execStmtInOpenState
} else {
ev, payload, err = ex.execStmtInOpenState(ctx, stmt, pinfo, res)
}
in pkg/sql.(*connExecutor).execStmt
stmtCtx := withStatement(ex.Ctx(), ex.curStmt)
ev, payload, err = ex.execStmt(stmtCtx, curStmt, stmtRes, pinfo)
if err != nil {
in pkg/sql.(*connExecutor).run
}()
return h.ex.run(ctx, s.pool, reserved, cancel)
}
in pkg/sql.(*Server).ServeConn
reservedOwned = false // We're about to pass ownership away.
retErr = sqlServer.ServeConn(ctx, connHandler, reserved, cancelConn)
}()
in pkg/sql/pgwire.(*conn).processCommandsAsync.func1

pkg/sql/sqlbase/encoded_datum.go in pkg/sql/sqlbase.(*EncDatum).EnsureDecoded at line 220
pkg/sql/distsqlrun/expr.go in pkg/sql/distsqlrun.(*exprHelper).IndexedVarEval at line 132
pkg/sql/sem/tree/indexed_vars.go in pkg/sql/sem/tree.(*IndexedVar).Eval at line 80
pkg/sql/distsqlrun/expr.go in pkg/sql/distsqlrun.(*exprHelper).eval at line 194
pkg/sql/distsqlrun/processors.go in pkg/sql/distsqlrun.(*ProcOutputHelper).ProcessRow at line 375
pkg/sql/distsqlrun/processors.go in pkg/sql/distsqlrun.(*ProcessorBase).ProcessRowHelper at line 778
pkg/sql/distsqlrun/indexjoiner.go in pkg/sql/distsqlrun.(*indexJoiner).Next at line 183
pkg/sql/row_source_to_plan_node.go in pkg/sql.(*rowSourceToPlanNode).Next at line 78
pkg/sql/update.go in pkg/sql.(*updateNode).BatchedNext at line 536
pkg/sql/plan_batch.go in pkg/sql.(*rowCountNode).startExec at line 173
pkg/sql/plan.go in pkg/sql.startExec.func2 at line 496
pkg/sql/walk.go in pkg/sql.(*planVisitor).visitInternal.func1 at line 146
pkg/sql/walk.go in pkg/sql.(*planVisitor).visitInternal at line 613
pkg/sql/walk.go in pkg/sql.(*planVisitor).visit at line 113
pkg/sql/walk.go in pkg/sql.walkPlan at line 77
pkg/sql/plan.go in pkg/sql.startExec at line 499
pkg/sql/plan_node_to_row_source.go in pkg/sql.(*planNodeToRowSource).Start at line 124
pkg/sql/distsqlrun/processors.go in pkg/sql/distsqlrun.(*ProcessorBase).Run at line 800
pkg/sql/distsqlrun/flow.go in pkg/sql/distsqlrun.(*Flow).Run at line 626
pkg/sql/distsql_running.go in pkg/sql.(*DistSQLPlanner).Run at line 252
pkg/sql/distsql_running.go in pkg/sql.(*DistSQLPlanner).PlanAndRun at line 839
pkg/sql/conn_executor_exec.go in pkg/sql.(*connExecutor).execWithDistSQLEngine at line 1125
pkg/sql/conn_executor_exec.go in pkg/sql.(*connExecutor).dispatchToExecutionEngine at line 961
pkg/sql/conn_executor_exec.go in pkg/sql.(*connExecutor).execStmtInOpenState at line 456
pkg/sql/conn_executor_exec.go in pkg/sql.(*connExecutor).execStmt at line 102
pkg/sql/conn_executor.go in pkg/sql.(*connExecutor).run at line 1246
pkg/sql/conn_executor.go in pkg/sql.(*Server).ServeConn at line 436
pkg/sql/pgwire/conn.go in pkg/sql/pgwire.(*conn).processCommandsAsync.func1 at line 580
Tag Value
Cockroach Release v19.1.3
Cockroach SHA: 1a14d34
Platform linux amd64
Distribution CCL
Environment v19.1.3
Command server
Go Version go1.11.6
# of CPUs 6
# of Goroutines 241
@cockroach-teamcity cockroach-teamcity added C-bug Code not up to spec/doc, specs & docs deemed correct. Solution expected to change code/behavior. O-sentry Originated from an in-the-wild panic report. labels Oct 16, 2019
@rytaft
Copy link
Collaborator

rytaft commented Oct 22, 2019

@jordanlewis could this be the same issue as #39794? Does that issue need a backport to 19.1?

@rytaft rytaft changed the title sentry: (0) encoded_datum.go:220: decoding unset EncDatum | string (1) statement: UPDATE _ SET _ = _ + _ WHERE _ = $1 ORDER BY _ DESC LIMIT (SELECT count(*) FROM _ WHERE _ = $1) (see stack traces in additional data) sentry: panic due to decoding unset EncDatum Oct 22, 2019
@rytaft
Copy link
Collaborator

rytaft commented Oct 22, 2019

Similar to #36834 and #36356.

@robert-s-lee
Copy link
Contributor

@jordanlewis what release would the fix be merged in?

@asubiotto
Copy link
Contributor

I think this might've been fixed by #42833

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
C-bug Code not up to spec/doc, specs & docs deemed correct. Solution expected to change code/behavior. O-sentry Originated from an in-the-wild panic report.
Projects
None yet
Development

No branches or pull requests

4 participants