-
Notifications
You must be signed in to change notification settings - Fork 3.8k
/
data.go
346 lines (315 loc) · 11.8 KB
/
data.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package execinfrapb
import (
"context"
"sync"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/logtags"
)
// ConvertToColumnOrdering converts an Ordering type (as defined in data.proto)
// to a sqlbase.ColumnOrdering type.
func ConvertToColumnOrdering(specOrdering Ordering) colinfo.ColumnOrdering {
ordering := make(colinfo.ColumnOrdering, len(specOrdering.Columns))
for i, c := range specOrdering.Columns {
ordering[i].ColIdx = int(c.ColIdx)
if c.Direction == Ordering_Column_ASC {
ordering[i].Direction = encoding.Ascending
} else {
ordering[i].Direction = encoding.Descending
}
}
return ordering
}
// ConvertToSpecOrdering converts a sqlbase.ColumnOrdering type
// to an Ordering type (as defined in data.proto).
func ConvertToSpecOrdering(columnOrdering colinfo.ColumnOrdering) Ordering {
return ConvertToMappedSpecOrdering(columnOrdering, nil)
}
// ConvertToMappedSpecOrdering converts a sqlbase.ColumnOrdering type
// to an Ordering type (as defined in data.proto), using the column
// indices contained in planToStreamColMap.
func ConvertToMappedSpecOrdering(
columnOrdering colinfo.ColumnOrdering, planToStreamColMap []int,
) Ordering {
specOrdering := Ordering{}
specOrdering.Columns = make([]Ordering_Column, len(columnOrdering))
for i, c := range columnOrdering {
colIdx := c.ColIdx
if planToStreamColMap != nil {
colIdx = planToStreamColMap[c.ColIdx]
if colIdx == -1 {
panic(errors.AssertionFailedf("column %d in sort ordering not available", c.ColIdx))
}
}
specOrdering.Columns[i].ColIdx = uint32(colIdx)
if c.Direction == encoding.Ascending {
specOrdering.Columns[i].Direction = Ordering_Column_ASC
} else {
specOrdering.Columns[i].Direction = Ordering_Column_DESC
}
}
return specOrdering
}
// ExprFmtCtxBase produces a FmtCtx used for serializing expressions; a proper
// IndexedVar formatting function needs to be added on. It replaces placeholders
// with their values.
func ExprFmtCtxBase(evalCtx *tree.EvalContext) *tree.FmtCtx {
fmtCtx := tree.NewFmtCtx(tree.FmtCheckEquivalence)
fmtCtx.SetPlaceholderFormat(
func(fmtCtx *tree.FmtCtx, p *tree.Placeholder) {
d, err := p.Eval(evalCtx)
if err != nil {
panic(errors.AssertionFailedf("failed to serialize placeholder: %s", err))
}
d.Format(fmtCtx)
})
return fmtCtx
}
// Expression is the representation of a SQL expression.
// See data.proto for the corresponding proto definition. Its automatic type
// declaration is suppressed in the proto via the typedecl=false option, so that
// we can add the LocalExpr field which is not serialized. It never needs to be
// serialized because we only use it in the case where we know we won't need to
// send it, as a proto, to another machine.
type Expression struct {
// Version is unused.
Version string
// Expr, if present, is the string representation of this expression.
// SQL expressions are passed as a string, with ordinal references
// (@1, @2, @3 ..) used for "input" variables.
Expr string
// LocalExpr is an unserialized field that's used to pass expressions to
// the gateway node without serializing/deserializing them. It is always
// set in non-test setup.
LocalExpr tree.TypedExpr
}
// Empty returns true if the expression has neither an Expr nor LocalExpr.
func (e *Expression) Empty() bool {
return e.Expr == "" && e.LocalExpr == nil
}
// String implements the Stringer interface.
func (e Expression) String() string {
if e.Expr != "" {
return e.Expr
}
if e.LocalExpr != nil {
ctx := tree.NewFmtCtx(tree.FmtCheckEquivalence)
ctx.FormatNode(e.LocalExpr)
return ctx.CloseAndGetString()
}
return "none"
}
// String implements fmt.Stringer.
func (e *Error) String() string {
if err := e.ErrorDetail(context.TODO()); err != nil {
return err.Error()
}
return "<nil>"
}
// NewError creates an Error from an error, to be sent on the wire. It will
// recognize certain errors and marshall them accordingly, and everything
// unrecognized is turned into a PGError with code "internal".
func NewError(ctx context.Context, err error) *Error {
resErr := &Error{}
// Encode the full error to the best of our ability.
// This field is ignored by 19.1 nodes and prior.
ctx = logtags.AddTag(ctx, "sent-error", nil)
fullError := errors.EncodeError(ctx, errors.WithContextTags(err, ctx))
resErr.FullError = &fullError
return resErr
}
// ErrorDetail returns the payload as a Go error.
func (e *Error) ErrorDetail(ctx context.Context) (err error) {
if e == nil {
return nil
}
defer func() {
ctx = logtags.AddTag(ctx, "received-error", nil)
err = errors.WithContextTags(err, ctx)
}()
if e.FullError != nil {
// If there's a 19.2-forward full error, decode and use that.
// This will reveal a fully causable detailed error structure.
return errors.DecodeError(ctx, *e.FullError)
}
// We're receiving an error we don't know about. It's all right,
// it's still an error, just one we didn't expect. Let it go
// through. We'll pick it up in reporting.
return errors.AssertionFailedf("unknown error from remote node")
}
// ProducerMetadata represents a metadata record flowing through a DistSQL flow.
type ProducerMetadata struct {
// Only one of these fields will be set. If this ever changes, note that
// there're consumers out there that extract the error and, if there is one,
// forward it in isolation and drop the rest of the record.
Ranges []roachpb.RangeInfo
// TODO(vivek): change to type Error
Err error
// TraceData is sent if tracing is enabled.
TraceData []tracingpb.RecordedSpan
// LeafTxnFinalState contains the final state of the LeafTxn to be
// sent from leaf flows to the RootTxn held by the flow's ultimate
// receiver.
LeafTxnFinalState *roachpb.LeafTxnFinalState
// RowNum corresponds to a row produced by a "source" processor that takes no
// inputs. It is used in tests to verify that all metadata is forwarded
// exactly once to the receiver on the gateway node.
RowNum *RemoteProducerMetadata_RowNum
// SamplerProgress contains incremental progress information from the sampler
// processor.
SamplerProgress *RemoteProducerMetadata_SamplerProgress
// BulkProcessorProgress contains incremental progress information from a bulk
// operation processor (backfiller, import, etc).
BulkProcessorProgress *RemoteProducerMetadata_BulkProcessorProgress
// Metrics contains information about goodput of the node.
Metrics *RemoteProducerMetadata_Metrics
}
var (
// TODO(yuzefovich): use this pool in other places apart from metrics
// collection.
// producerMetadataPool is a pool of producer metadata objects.
producerMetadataPool = sync.Pool{
New: func() interface{} {
return &ProducerMetadata{}
},
}
// rpmMetricsPool is a pool of metadata used to propagate metrics.
rpmMetricsPool = sync.Pool{
New: func() interface{} {
return &RemoteProducerMetadata_Metrics{}
},
}
)
// Release is part of Releasable interface.
func (meta *ProducerMetadata) Release() {
*meta = ProducerMetadata{}
producerMetadataPool.Put(meta)
}
// Release is part of Releasable interface. Note that although this meta is
// only used together with a ProducerMetadata that comes from another pool, we
// do not combine two Release methods into one because two objects have a
// different lifetime.
func (meta *RemoteProducerMetadata_Metrics) Release() {
*meta = RemoteProducerMetadata_Metrics{}
rpmMetricsPool.Put(meta)
}
// GetProducerMeta returns a producer metadata object from the pool.
func GetProducerMeta() *ProducerMetadata {
return producerMetadataPool.Get().(*ProducerMetadata)
}
// GetMetricsMeta returns a metadata object from the pool of metrics metadata.
func GetMetricsMeta() *RemoteProducerMetadata_Metrics {
return rpmMetricsPool.Get().(*RemoteProducerMetadata_Metrics)
}
// RemoteProducerMetaToLocalMeta converts a RemoteProducerMetadata struct to
// ProducerMetadata and returns whether the conversion was successful or not.
func RemoteProducerMetaToLocalMeta(
ctx context.Context, rpm RemoteProducerMetadata,
) (ProducerMetadata, bool) {
meta := GetProducerMeta()
switch v := rpm.Value.(type) {
case *RemoteProducerMetadata_RangeInfo:
meta.Ranges = v.RangeInfo.RangeInfo
case *RemoteProducerMetadata_TraceData_:
meta.TraceData = v.TraceData.CollectedSpans
case *RemoteProducerMetadata_LeafTxnFinalState:
meta.LeafTxnFinalState = v.LeafTxnFinalState
case *RemoteProducerMetadata_RowNum_:
meta.RowNum = v.RowNum
case *RemoteProducerMetadata_SamplerProgress_:
meta.SamplerProgress = v.SamplerProgress
case *RemoteProducerMetadata_BulkProcessorProgress_:
meta.BulkProcessorProgress = v.BulkProcessorProgress
case *RemoteProducerMetadata_Error:
meta.Err = v.Error.ErrorDetail(ctx)
case *RemoteProducerMetadata_Metrics_:
meta.Metrics = v.Metrics
default:
return *meta, false
}
return *meta, true
}
// LocalMetaToRemoteProducerMeta converts a ProducerMetadata struct to
// RemoteProducerMetadata.
func LocalMetaToRemoteProducerMeta(
ctx context.Context, meta ProducerMetadata,
) RemoteProducerMetadata {
var rpm RemoteProducerMetadata
if meta.Ranges != nil {
rpm.Value = &RemoteProducerMetadata_RangeInfo{
RangeInfo: &RemoteProducerMetadata_RangeInfos{
RangeInfo: meta.Ranges,
},
}
} else if meta.TraceData != nil {
rpm.Value = &RemoteProducerMetadata_TraceData_{
TraceData: &RemoteProducerMetadata_TraceData{
CollectedSpans: meta.TraceData,
},
}
} else if meta.LeafTxnFinalState != nil {
rpm.Value = &RemoteProducerMetadata_LeafTxnFinalState{
LeafTxnFinalState: meta.LeafTxnFinalState,
}
} else if meta.RowNum != nil {
rpm.Value = &RemoteProducerMetadata_RowNum_{
RowNum: meta.RowNum,
}
} else if meta.SamplerProgress != nil {
rpm.Value = &RemoteProducerMetadata_SamplerProgress_{
SamplerProgress: meta.SamplerProgress,
}
} else if meta.BulkProcessorProgress != nil {
rpm.Value = &RemoteProducerMetadata_BulkProcessorProgress_{
BulkProcessorProgress: meta.BulkProcessorProgress,
}
} else if meta.Metrics != nil {
rpm.Value = &RemoteProducerMetadata_Metrics_{
Metrics: meta.Metrics,
}
} else if meta.Err != nil {
rpm.Value = &RemoteProducerMetadata_Error{
Error: NewError(ctx, meta.Err),
}
} else if util.CrdbTestBuild {
panic("unhandled field in local meta or all fields are nil")
}
return rpm
}
// MetadataSource is an interface implemented by processors and columnar
// operators that can produce metadata.
type MetadataSource interface {
// DrainMeta returns all the metadata produced by the processor or operator.
// It will be called exactly once, usually, when the processor or operator
// has finished doing its computations. This is a signal that the output
// requires no more rows to be returned.
// Implementers can choose what to do on subsequent calls (if such occur).
// TODO(yuzefovich): modify the contract to require returning nil on all
// calls after the first one.
DrainMeta(context.Context) []ProducerMetadata
}
// MetadataSources is a slice of MetadataSource.
type MetadataSources []MetadataSource
// DrainMeta calls DrainMeta on all MetadataSources and returns a single slice
// with all the accumulated metadata.
func (s MetadataSources) DrainMeta(ctx context.Context) []ProducerMetadata {
var result []ProducerMetadata
for _, src := range s {
result = append(result, src.DrainMeta(ctx)...)
}
return result
}