-
Notifications
You must be signed in to change notification settings - Fork 1.1k
/
Copy pathmetric.go
438 lines (391 loc) · 16 KB
/
metric.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:generate stringer -type=ExportKind
package metric // import "go.opentelemetry.io/otel/sdk/export/metric"
import (
"context"
"sync"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/number"
"go.opentelemetry.io/otel/metric/sdkapi"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/resource"
)
// Processor is responsible for deciding which kind of aggregation to
// use (via AggregatorSelector), gathering exported results from the
// SDK during collection, and deciding over which dimensions to group
// the exported data.
//
// The SDK supports binding only one of these interfaces, as it has
// the sole responsibility of determining which Aggregator to use for
// each record.
//
// The embedded AggregatorSelector interface is called (concurrently)
// in instrumentation context to select the appropriate Aggregator for
// an instrument.
//
// The `Process` method is called during collection in a
// single-threaded context from the SDK, after the aggregator is
// checkpointed, allowing the processor to build the set of metrics
// currently being exported.
type Processor interface {
// AggregatorSelector is responsible for selecting the
// concrete type of Aggregator used for a metric in the SDK.
//
// This may be a static decision based on fields of the
// Descriptor, or it could use an external configuration
// source to customize the treatment of each metric
// instrument.
//
// The result from AggregatorSelector.AggregatorFor should be
// the same type for a given Descriptor or else nil. The same
// type should be returned for a given descriptor, because
// Aggregators only know how to Merge with their own type. If
// the result is nil, the metric instrument will be disabled.
//
// Note that the SDK only calls AggregatorFor when new records
// require an Aggregator. This does not provide a way to
// disable metrics with active records.
AggregatorSelector
// Process is called by the SDK once per internal record,
// passing the export Accumulation (a Descriptor, the corresponding
// Labels, and the checkpointed Aggregator). This call has no
// Context argument because it is expected to perform only
// computation. An SDK is not expected to call exporters from
// with Process, use a controller for that (see
// ./controllers/{pull,push}.
Process(accum Accumulation) error
}
// AggregatorSelector supports selecting the kind of Aggregator to
// use at runtime for a specific metric instrument.
type AggregatorSelector interface {
// AggregatorFor allocates a variable number of aggregators of
// a kind suitable for the requested export. This method
// initializes a `...*Aggregator`, to support making a single
// allocation.
//
// When the call returns without initializing the *Aggregator
// to a non-nil value, the metric instrument is explicitly
// disabled.
//
// This must return a consistent type to avoid confusion in
// later stages of the metrics export process, i.e., when
// Merging or Checkpointing aggregators for a specific
// instrument.
//
// Note: This is context-free because the aggregator should
// not relate to the incoming context. This call should not
// block.
AggregatorFor(descriptor *metric.Descriptor, aggregator ...*Aggregator)
}
// Checkpointer is the interface used by a Controller to coordinate
// the Processor with Accumulator(s) and Exporter(s). The
// StartCollection() and FinishCollection() methods start and finish a
// collection interval. Controllers call the Accumulator(s) during
// collection to process Accumulations.
type Checkpointer interface {
// Processor processes metric data for export. The Process
// method is bracketed by StartCollection and FinishCollection
// calls. The embedded AggregatorSelector can be called at
// any time.
Processor
// CheckpointSet returns the current data set. This may be
// called before and after collection. The
// implementation is required to return the same value
// throughout its lifetime, since CheckpointSet exposes a
// sync.Locker interface. The caller is responsible for
// locking the CheckpointSet before initiating collection.
CheckpointSet() CheckpointSet
// StartCollection begins a collection interval.
StartCollection()
// FinishCollection ends a collection interval.
FinishCollection() error
}
// Aggregator implements a specific aggregation behavior, e.g., a
// behavior to track a sequence of updates to an instrument. Sum-only
// instruments commonly use a simple Sum aggregator, but for the
// distribution instruments (ValueRecorder, ValueObserver) there are a
// number of possible aggregators with different cost and accuracy
// tradeoffs.
//
// Note that any Aggregator may be attached to any instrument--this is
// the result of the OpenTelemetry API/SDK separation. It is possible
// to attach a Sum aggregator to a ValueRecorder instrument or a
// MinMaxSumCount aggregator to a Counter instrument.
type Aggregator interface {
// Aggregation returns an Aggregation interface to access the
// current state of this Aggregator. The caller is
// responsible for synchronization and must not call any the
// other methods in this interface concurrently while using
// the Aggregation.
Aggregation() aggregation.Aggregation
// Update receives a new measured value and incorporates it
// into the aggregation. Update() calls may be called
// concurrently.
//
// Descriptor.NumberKind() should be consulted to determine
// whether the provided number is an int64 or float64.
//
// The Context argument comes from user-level code and could be
// inspected for a `correlation.Map` or `trace.SpanContext`.
Update(ctx context.Context, number number.Number, descriptor *metric.Descriptor) error
// SynchronizedMove is called during collection to finish one
// period of aggregation by atomically saving the
// currently-updating state into the argument Aggregator AND
// resetting the current value to the zero state.
//
// SynchronizedMove() is called concurrently with Update(). These
// two methods must be synchronized with respect to each
// other, for correctness.
//
// After saving a synchronized copy, the Aggregator can be converted
// into one or more of the interfaces in the `aggregation` sub-package,
// according to kind of Aggregator that was selected.
//
// This method will return an InconsistentAggregatorError if
// this Aggregator cannot be copied into the destination due
// to an incompatible type.
//
// This call has no Context argument because it is expected to
// perform only computation.
//
// When called with a nil `destination`, this Aggregator is reset
// and the current value is discarded.
SynchronizedMove(destination Aggregator, descriptor *metric.Descriptor) error
// Merge combines the checkpointed state from the argument
// Aggregator into this Aggregator. Merge is not synchronized
// with respect to Update or SynchronizedMove.
//
// The owner of an Aggregator being merged is responsible for
// synchronization of both Aggregator states.
Merge(aggregator Aggregator, descriptor *metric.Descriptor) error
}
// Subtractor is an optional interface implemented by some
// Aggregators. An Aggregator must support `Subtract()` in order to
// be configured for a Precomputed-Sum instrument (SumObserver,
// UpDownSumObserver) using a DeltaExporter.
type Subtractor interface {
// Subtract subtracts the `operand` from this Aggregator and
// outputs the value in `result`.
Subtract(operand, result Aggregator, descriptor *metric.Descriptor) error
}
// Exporter handles presentation of the checkpoint of aggregate
// metrics. This is the final stage of a metrics export pipeline,
// where metric data are formatted for a specific system.
type Exporter interface {
// Export is called immediately after completing a collection
// pass in the SDK.
//
// The Context comes from the controller that initiated
// collection.
//
// The CheckpointSet interface refers to the Processor that just
// completed collection.
Export(ctx context.Context, resource *resource.Resource, checkpointSet CheckpointSet) error
// ExportKindSelector is an interface used by the Processor
// in deciding whether to compute Delta or Cumulative
// Aggregations when passing Records to this Exporter.
ExportKindSelector
}
// ExportKindSelector is a sub-interface of Exporter used to indicate
// whether the Processor should compute Delta or Cumulative
// Aggregations.
type ExportKindSelector interface {
// ExportKindFor should return the correct ExportKind that
// should be used when exporting data for the given metric
// instrument and Aggregator kind.
ExportKindFor(descriptor *metric.Descriptor, aggregatorKind aggregation.Kind) ExportKind
}
// CheckpointSet allows a controller to access a complete checkpoint of
// aggregated metrics from the Processor. This is passed to the
// Exporter which may then use ForEach to iterate over the collection
// of aggregated metrics.
type CheckpointSet interface {
// ForEach iterates over aggregated checkpoints for all
// metrics that were updated during the last collection
// period. Each aggregated checkpoint returned by the
// function parameter may return an error.
//
// The ExportKindSelector argument is used to determine
// whether the Record is computed using Delta or Cumulative
// aggregation.
//
// ForEach tolerates ErrNoData silently, as this is
// expected from the Meter implementation. Any other kind
// of error will immediately halt ForEach and return
// the error to the caller.
ForEach(kindSelector ExportKindSelector, recordFunc func(Record) error) error
// Locker supports locking the checkpoint set. Collection
// into the checkpoint set cannot take place (in case of a
// stateful processor) while it is locked.
//
// The Processor attached to the Accumulator MUST be called
// with the lock held.
sync.Locker
// RLock acquires a read lock corresponding to this Locker.
RLock()
// RUnlock releases a read lock corresponding to this Locker.
RUnlock()
}
// Metadata contains the common elements for exported metric data that
// are shared by the Accumulator->Processor and Processor->Exporter
// steps.
type Metadata struct {
descriptor *metric.Descriptor
labels *attribute.Set
}
// Accumulation contains the exported data for a single metric instrument
// and label set, as prepared by an Accumulator for the Processor.
type Accumulation struct {
Metadata
aggregator Aggregator
}
// Record contains the exported data for a single metric instrument
// and label set, as prepared by the Processor for the Exporter.
// This includes the effective start and end time for the aggregation.
type Record struct {
Metadata
aggregation aggregation.Aggregation
start time.Time
end time.Time
}
// Descriptor describes the metric instrument being exported.
func (m Metadata) Descriptor() *metric.Descriptor {
return m.descriptor
}
// Labels describes the labels associated with the instrument and the
// aggregated data.
func (m Metadata) Labels() *attribute.Set {
return m.labels
}
// NewAccumulation allows Accumulator implementations to construct new
// Accumulations to send to Processors. The Descriptor, Labels,
// and Aggregator represent aggregate metric events received over a single
// collection period.
func NewAccumulation(descriptor *metric.Descriptor, labels *attribute.Set, aggregator Aggregator) Accumulation {
return Accumulation{
Metadata: Metadata{
descriptor: descriptor,
labels: labels,
},
aggregator: aggregator,
}
}
// Aggregator returns the checkpointed aggregator. It is safe to
// access the checkpointed state without locking.
func (r Accumulation) Aggregator() Aggregator {
return r.aggregator
}
// NewRecord allows Processor implementations to construct export
// records. The Descriptor, Labels, and Aggregator represent
// aggregate metric events received over a single collection period.
func NewRecord(descriptor *metric.Descriptor, labels *attribute.Set, aggregation aggregation.Aggregation, start, end time.Time) Record {
return Record{
Metadata: Metadata{
descriptor: descriptor,
labels: labels,
},
aggregation: aggregation,
start: start,
end: end,
}
}
// Aggregation returns the aggregation, an interface to the record and
// its aggregator, dependent on the kind of both the input and exporter.
func (r Record) Aggregation() aggregation.Aggregation {
return r.aggregation
}
// StartTime is the start time of the interval covered by this aggregation.
func (r Record) StartTime() time.Time {
return r.start
}
// EndTime is the end time of the interval covered by this aggregation.
func (r Record) EndTime() time.Time {
return r.end
}
// ExportKind indicates the kind of data exported by an exporter.
// These bits may be OR-d together when multiple exporters are in use.
type ExportKind int
const (
// CumulativeExportKind indicates that an Exporter expects a
// Cumulative Aggregation.
CumulativeExportKind ExportKind = 1
// DeltaExportKind indicates that an Exporter expects a
// Delta Aggregation.
DeltaExportKind ExportKind = 2
)
// Includes tests whether `kind` includes a specific kind of
// exporter.
func (kind ExportKind) Includes(has ExportKind) bool {
return kind&has != 0
}
// MemoryRequired returns whether an exporter of this kind requires
// memory to export correctly.
func (kind ExportKind) MemoryRequired(mkind sdkapi.InstrumentKind) bool {
switch mkind {
case sdkapi.ValueRecorderInstrumentKind, sdkapi.ValueObserverInstrumentKind,
sdkapi.CounterInstrumentKind, sdkapi.UpDownCounterInstrumentKind:
// Delta-oriented instruments:
return kind.Includes(CumulativeExportKind)
case sdkapi.SumObserverInstrumentKind, sdkapi.UpDownSumObserverInstrumentKind:
// Cumulative-oriented instruments:
return kind.Includes(DeltaExportKind)
}
// Something unexpected is happening--we could panic. This
// will become an error when the exporter tries to access a
// checkpoint, presumably, so let it be.
return false
}
type (
constantExportKindSelector ExportKind
statelessExportKindSelector struct{}
)
var (
_ ExportKindSelector = constantExportKindSelector(0)
_ ExportKindSelector = statelessExportKindSelector{}
)
// ConstantExportKindSelector returns an ExportKindSelector that returns
// a constant ExportKind, one that is either always cumulative or always delta.
func ConstantExportKindSelector(kind ExportKind) ExportKindSelector {
return constantExportKindSelector(kind)
}
// CumulativeExportKindSelector returns an ExportKindSelector that
// always returns CumulativeExportKind.
func CumulativeExportKindSelector() ExportKindSelector {
return ConstantExportKindSelector(CumulativeExportKind)
}
// DeltaExportKindSelector returns an ExportKindSelector that
// always returns DeltaExportKind.
func DeltaExportKindSelector() ExportKindSelector {
return ConstantExportKindSelector(DeltaExportKind)
}
// StatelessExportKindSelector returns an ExportKindSelector that
// always returns the ExportKind that avoids long-term memory
// requirements.
func StatelessExportKindSelector() ExportKindSelector {
return statelessExportKindSelector{}
}
// ExportKindFor implements ExportKindSelector.
func (c constantExportKindSelector) ExportKindFor(_ *metric.Descriptor, _ aggregation.Kind) ExportKind {
return ExportKind(c)
}
// ExportKindFor implements ExportKindSelector.
func (s statelessExportKindSelector) ExportKindFor(desc *metric.Descriptor, kind aggregation.Kind) ExportKind {
if kind == aggregation.SumKind && desc.InstrumentKind().PrecomputedSum() {
return CumulativeExportKind
}
return DeltaExportKind
}