From 19346a4cff9d1b4de6c9bc96d0849785177af0a1 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 30 Oct 2019 05:07:27 -0700 Subject: [PATCH 01/73] Add MetricAggregator.Merge() implementations --- sdk/export/metric.go | 3 + sdk/metric/aggregator/counter/counter.go | 9 +++ sdk/metric/aggregator/counter/counter_test.go | 28 ++++++++++ sdk/metric/aggregator/ddsketch/ddsketch.go | 10 ++++ .../aggregator/ddsketch/ddsketch_test.go | 50 ++++++++++++++++- sdk/metric/aggregator/gauge/gauge.go | 37 +++++++++++-- sdk/metric/aggregator/gauge/gauge_test.go | 55 +++++++++++++++++++ sdk/metric/aggregator/maxsumcount/msc.go | 55 +++++++++++++------ sdk/metric/aggregator/maxsumcount/msc_test.go | 42 ++++++++++++++ sdk/metric/aggregator/test/test.go | 43 ++++++++++++++- 10 files changed, 305 insertions(+), 27 deletions(-) diff --git a/sdk/export/metric.go b/sdk/export/metric.go index 7de3cfbd149..6efaaf8d57c 100644 --- a/sdk/export/metric.go +++ b/sdk/export/metric.go @@ -33,6 +33,9 @@ type MetricAggregator interface { // called in a single-threaded context. Update() // calls may arrive concurrently. Collect(context.Context, MetricRecord, MetricBatcher) + + // Merge combines state from two aggregators into one. + Merge(MetricAggregator, *Descriptor) } // MetricRecord is the unit of export, pairing a metric diff --git a/sdk/metric/aggregator/counter/counter.go b/sdk/metric/aggregator/counter/counter.go index 58d7cee3c69..4f3aff16897 100644 --- a/sdk/metric/aggregator/counter/counter.go +++ b/sdk/metric/aggregator/counter/counter.go @@ -64,3 +64,12 @@ func (c *Aggregator) Update(_ context.Context, number core.Number, rec export.Me c.current.AddNumberAtomic(kind, number) } + +func (c *Aggregator) Merge(oa export.MetricAggregator, desc *export.Descriptor) { + o, _ := oa.(*Aggregator) + if o == nil { + // TODO warn + return + } + c.checkpoint.AddNumber(desc.NumberKind(), o.checkpoint) +} diff --git a/sdk/metric/aggregator/counter/counter_test.go b/sdk/metric/aggregator/counter/counter_test.go index 45e381b0d39..c8492493e42 100644 --- a/sdk/metric/aggregator/counter/counter_test.go +++ b/sdk/metric/aggregator/counter/counter_test.go @@ -91,3 +91,31 @@ func TestCounterNonMonotonic(t *testing.T) { require.Equal(t, sum, agg.AsNumber(), "Same sum - monotonic") }) } + +func TestCounterMerge(t *testing.T) { + ctx := context.Background() + + test.RunProfiles(t, func(t *testing.T, profile test.Profile) { + agg1 := New() + agg2 := New() + + batcher, record := test.NewAggregatorTest(export.CounterMetricKind, profile.NumberKind, false) + + sum := core.Number(0) + for i := 0; i < count; i++ { + x := profile.Random(+1) + sum.AddNumber(profile.NumberKind, x) + agg1.Update(ctx, x, record) + agg2.Update(ctx, x, record) + } + + agg1.Collect(ctx, record, batcher) + agg2.Collect(ctx, record, batcher) + + agg1.Merge(agg2, record.Descriptor()) + + sum.AddNumber(record.Descriptor().NumberKind(), sum) + + require.Equal(t, sum, agg1.AsNumber(), "Same sum - monotonic") + }) +} diff --git a/sdk/metric/aggregator/ddsketch/ddsketch.go b/sdk/metric/aggregator/ddsketch/ddsketch.go index fd2a85facc7..67a0ed22b90 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch.go @@ -101,3 +101,13 @@ func (c *Aggregator) Update(_ context.Context, number core.Number, rec export.Me defer c.lock.Unlock() c.current.Add(number.CoerceToFloat64(kind)) } + +func (c *Aggregator) Merge(oa export.MetricAggregator, d *export.Descriptor) { + o, _ := oa.(*Aggregator) + if o == nil { + // TODO warn + return + } + + c.checkpoint.Merge(o.checkpoint) +} diff --git a/sdk/metric/aggregator/ddsketch/ddsketch_test.go b/sdk/metric/aggregator/ddsketch/ddsketch_test.go index 74b9bf32ac4..5fe69ec6ed1 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch_test.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch_test.go @@ -57,11 +57,57 @@ func TestDDSketchAbsolute(t *testing.T) { all[len(all)-1].CoerceToFloat64(profile.NumberKind), agg.Max(), "Same max - absolute") - // Median require.InEpsilon(t, - all[len(all)/2].CoerceToFloat64(profile.NumberKind), + all.Median(profile.NumberKind).CoerceToFloat64(profile.NumberKind), agg.Quantile(0.5), 0.1, "Same median - absolute") }) } + +func TestDDSketchMerge(t *testing.T) { + ctx := context.Background() + + test.RunProfiles(t, func(t *testing.T, profile test.Profile) { + batcher, record := test.NewAggregatorTest(export.MeasureMetricKind, profile.NumberKind, false) + + agg1 := New(NewDefaultConfig(), record.Descriptor()) + agg2 := New(NewDefaultConfig(), record.Descriptor()) + + var all test.Numbers + for i := 0; i < count; i++ { + x := profile.Random(+1) + all = append(all, x) + agg1.Update(ctx, x, record) + } + + for i := 0; i < count; i++ { + x := profile.Random(+1) + all = append(all, x) + agg2.Update(ctx, x, record) + } + + agg1.Collect(ctx, record, batcher) + agg2.Collect(ctx, record, batcher) + + agg1.Merge(agg2, record.Descriptor()) + + all.Sort() + + require.InEpsilon(t, + all.Sum(profile.NumberKind).CoerceToFloat64(profile.NumberKind), + agg1.Sum(), + 0.0000001, + "Same sum - absolute") + require.Equal(t, all.Count(), agg1.Count(), "Same count - absolute") + require.Equal(t, + all[len(all)-1].CoerceToFloat64(profile.NumberKind), + agg1.Max(), + "Same max - absolute") + require.InEpsilon(t, + all.Median(profile.NumberKind).CoerceToFloat64(profile.NumberKind), + agg1.Quantile(0.5), + 0.1, + "Same median - absolute") + }) +} diff --git a/sdk/metric/aggregator/gauge/gauge.go b/sdk/metric/aggregator/gauge/gauge.go index b6f03cb801f..5dc7e004b3b 100644 --- a/sdk/metric/aggregator/gauge/gauge.go +++ b/sdk/metric/aggregator/gauge/gauge.go @@ -33,12 +33,10 @@ type ( // Aggregator aggregates gauge events. Aggregator struct { - // data is an atomic pointer to *gaugeData. It is set - // to `nil` if the gauge has not been set since the - // last collection. + // current is an atomic pointer to *gaugeData. It is never nil. current unsafe.Pointer - // N.B. Export is not called when checkpoint is nil + // checkpoint is a copy of the current value taken in Collect() checkpoint unsafe.Pointer } @@ -125,3 +123,34 @@ func (g *Aggregator) updateMonotonic(number core.Number, desc *export.Descriptor } } } + +func (g *Aggregator) Merge(oa export.MetricAggregator, desc *export.Descriptor) { + o, _ := oa.(*Aggregator) + if o == nil { + // TODO warn + return + } + + ggd := (*gaugeData)(atomic.LoadPointer(&g.checkpoint)) + ogd := (*gaugeData)(atomic.LoadPointer(&o.checkpoint)) + + if desc.Alternate() { + // Monotonic: use the greater value + cmp := ggd.value.CompareNumber(desc.NumberKind(), ogd.value) + + if cmp > 0 { + return + } + + if cmp < 0 { + g.checkpoint = unsafe.Pointer(ogd) + return + } + } + // Non-monotonic gauge or equal values + if ggd.timestamp.After(ogd.timestamp) { + return + } + + g.checkpoint = unsafe.Pointer(ogd) +} diff --git a/sdk/metric/aggregator/gauge/gauge_test.go b/sdk/metric/aggregator/gauge/gauge_test.go index dd54776142f..399d89bc50a 100644 --- a/sdk/metric/aggregator/gauge/gauge_test.go +++ b/sdk/metric/aggregator/gauge/gauge_test.go @@ -94,3 +94,58 @@ func TestGaugeMonotonicDescending(t *testing.T) { require.Equal(t, first, agg.AsNumber(), "Same last value - monotonic") }) } + +func TestGaugeNormalMerge(t *testing.T) { + ctx := context.Background() + + test.RunProfiles(t, func(t *testing.T, profile test.Profile) { + agg1 := New() + agg2 := New() + + batcher, record := test.NewAggregatorTest(export.GaugeMetricKind, profile.NumberKind, false) + + first1 := profile.Random(+1) + first2 := profile.Random(+1) + first1.AddNumber(profile.NumberKind, first2) + + agg1.Update(ctx, first1, record) + agg2.Update(ctx, first2, record) + + agg1.Collect(ctx, record, batcher) + agg2.Collect(ctx, record, batcher) + + t1 := agg1.Timestamp() + t2 := agg2.Timestamp() + require.True(t, t1.Before(t2)) + + agg1.Merge(agg2, record.Descriptor()) + + require.Equal(t, t2, agg1.Timestamp(), "Merged values - non-monotonic") + require.Equal(t, first2, agg1.AsNumber(), "Merged values - non-monotonic") + }) +} + +func TestGaugeMonotonicMerge(t *testing.T) { + ctx := context.Background() + + test.RunProfiles(t, func(t *testing.T, profile test.Profile) { + agg1 := New() + agg2 := New() + + batcher, record := test.NewAggregatorTest(export.GaugeMetricKind, profile.NumberKind, true) + + first1 := profile.Random(+1) + agg1.Update(ctx, first1, record) + + first2 := profile.Random(+1) + first2.AddNumber(profile.NumberKind, first1) + agg2.Update(ctx, first2, record) + + agg1.Collect(ctx, record, batcher) + agg2.Collect(ctx, record, batcher) + + agg1.Merge(agg2, record.Descriptor()) + + require.Equal(t, first2, agg1.AsNumber(), "Merged values - monotonic") + }) +} diff --git a/sdk/metric/aggregator/maxsumcount/msc.go b/sdk/metric/aggregator/maxsumcount/msc.go index b549a311536..b4f218ca267 100644 --- a/sdk/metric/aggregator/maxsumcount/msc.go +++ b/sdk/metric/aggregator/maxsumcount/msc.go @@ -25,8 +25,8 @@ type ( // Aggregator aggregates measure events, keeping only the max, // sum, and count. Aggregator struct { - live state - save state + current state + checkpoint state } state struct { @@ -45,34 +45,38 @@ func New() *Aggregator { // Sum returns the accumulated sum as a Number. func (c *Aggregator) Sum() core.Number { - return c.save.sum + return c.checkpoint.sum } // Count returns the accumulated count. func (c *Aggregator) Count() int64 { - return int64(c.save.count.AsUint64()) + return int64(c.checkpoint.count.AsUint64()) } // Max returns the accumulated max as a Number. func (c *Aggregator) Max() core.Number { - return c.save.max + return c.checkpoint.max } -// Collect saves the current value (atomically) and exports it. +// Collect checkpoints the current value (atomically) and exports it. func (c *Aggregator) Collect(ctx context.Context, rec export.MetricRecord, exp export.MetricBatcher) { desc := rec.Descriptor() kind := desc.NumberKind() zero := core.NewZeroNumber(kind) // N.B. There is no atomic operation that can update all three - // values at once, so there are races between Update() and - // Collect(). Therefore, atomically swap fields independently, - // knowing that individually the three parts of this aggregation - // could be spread across multiple collections in rare cases. - - c.save.count.SetUint64(c.live.count.SwapUint64Atomic(0)) - c.save.sum = c.live.sum.SwapNumberAtomic(zero) - c.save.max = c.live.max.SwapNumberAtomic(zero) + // values at once without a memory allocation. + // + // This aggregator is intended to trade this correctness for + // speed. + // + // Therefore, atomically swap fields independently, knowing + // that individually the three parts of this aggregation could + // be spread across multiple collections in rare cases. + + c.checkpoint.count.SetUint64(c.current.count.SwapUint64Atomic(0)) + c.checkpoint.sum = c.current.sum.SwapNumberAtomic(zero) + c.checkpoint.max = c.current.max.SwapNumberAtomic(zero) exp.Export(ctx, rec, c) } @@ -87,17 +91,32 @@ func (c *Aggregator) Update(_ context.Context, number core.Number, rec export.Me return } - c.live.count.AddUint64Atomic(1) - c.live.sum.AddNumberAtomic(kind, number) + c.current.count.AddUint64Atomic(1) + c.current.sum.AddNumberAtomic(kind, number) for { - current := c.live.max.AsNumberAtomic() + current := c.current.max.AsNumberAtomic() if number.CompareNumber(kind, current) <= 0 { break } - if c.live.max.CompareAndSwapNumber(current, number) { + if c.current.max.CompareAndSwapNumber(current, number) { break } } } + +func (c *Aggregator) Merge(oa export.MetricAggregator, desc *export.Descriptor) { + o, _ := oa.(*Aggregator) + if o == nil { + // TODO warn + return + } + + c.checkpoint.sum.AddNumber(desc.NumberKind(), o.checkpoint.sum) + c.checkpoint.count.AddNumber(core.Uint64NumberKind, o.checkpoint.count) + + if c.checkpoint.max.CompareNumber(desc.NumberKind(), o.checkpoint.max) < 0 { + c.checkpoint.max.SetNumber(o.checkpoint.max) + } +} diff --git a/sdk/metric/aggregator/maxsumcount/msc_test.go b/sdk/metric/aggregator/maxsumcount/msc_test.go index 85893247479..12808ccc3a5 100644 --- a/sdk/metric/aggregator/maxsumcount/msc_test.go +++ b/sdk/metric/aggregator/maxsumcount/msc_test.go @@ -57,3 +57,45 @@ func TestMaxSumCountAbsolute(t *testing.T) { "Same sum - absolute") }) } + +func TestMaxSumCountMerge(t *testing.T) { + ctx := context.Background() + + test.RunProfiles(t, func(t *testing.T, profile test.Profile) { + batcher, record := test.NewAggregatorTest(export.MeasureMetricKind, profile.NumberKind, false) + + agg1 := New() + agg2 := New() + + var all test.Numbers + + for i := 0; i < count; i++ { + x := profile.Random(+1) + all = append(all, x) + agg1.Update(ctx, x, record) + } + for i := 0; i < count; i++ { + x := profile.Random(+1) + all = append(all, x) + agg2.Update(ctx, x, record) + } + + agg1.Collect(ctx, record, batcher) + agg2.Collect(ctx, record, batcher) + + agg1.Merge(agg2, record.Descriptor()) + + all.Sort() + + require.InEpsilon(t, + all.Sum(profile.NumberKind).CoerceToFloat64(profile.NumberKind), + agg1.Sum().CoerceToFloat64(profile.NumberKind), + 0.000000001, + "Same sum - absolute") + require.Equal(t, all.Count(), agg1.Count(), "Same sum - absolute") + require.Equal(t, + all[len(all)-1], + agg1.Max(), + "Same max - absolute") + }) +} diff --git a/sdk/metric/aggregator/test/test.go b/sdk/metric/aggregator/test/test.go index 4dcb133777c..0cba335d9ea 100644 --- a/sdk/metric/aggregator/test/test.go +++ b/sdk/metric/aggregator/test/test.go @@ -85,9 +85,19 @@ func RunProfiles(t *testing.T, f func(*testing.T, Profile)) { type Numbers []core.Number func (n *Numbers) Sort() { - sort.Slice(*n, func(i, j int) bool { - return (*n)[i] < (*n)[j] - }) + sort.Sort(n) +} + +func (n *Numbers) Less(i, j int) bool { + return (*n)[i] < (*n)[j] +} + +func (n *Numbers) Len() int { + return len(*n) +} + +func (n *Numbers) Swap(i, j int) { + (*n)[i], (*n)[j] = (*n)[j], (*n)[i] } func (n *Numbers) Sum(kind core.NumberKind) core.Number { @@ -101,3 +111,30 @@ func (n *Numbers) Sum(kind core.NumberKind) core.Number { func (n *Numbers) Count() int64 { return int64(len(*n)) } + +func (n *Numbers) Median(kind core.NumberKind) core.Number { + if !sort.IsSorted(n) { + panic("Sort these numbers before calling Median") + } + + l := len(*n) + if l%2 == 1 { + return (*n)[l/2] + } + + lower := (*n)[l/2-1] + upper := (*n)[l/2] + + sum := lower + sum.AddNumber(kind, upper) + + switch kind { + case core.Uint64NumberKind: + return core.NewUint64Number(sum.AsUint64() / 2) + case core.Int64NumberKind: + return core.NewInt64Number(sum.AsInt64() / 2) + case core.Float64NumberKind: + return core.NewFloat64Number(sum.AsFloat64() / 2) + } + panic("unknown number kind") +} From 03f7854f4565e34f815ca689b02cf28bc7f7325d Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 30 Oct 2019 11:17:32 -0700 Subject: [PATCH 02/73] Update from feedback --- sdk/metric/aggregator/counter/counter.go | 5 +---- sdk/metric/aggregator/gauge/gauge_test.go | 7 ++++--- sdk/metric/aggregator/maxsumcount/msc.go | 8 ++------ sdk/metric/aggregator/maxsumcount/msc_test.go | 6 +++--- 4 files changed, 10 insertions(+), 16 deletions(-) diff --git a/sdk/metric/aggregator/counter/counter.go b/sdk/metric/aggregator/counter/counter.go index 4f3aff16897..bfbd9502000 100644 --- a/sdk/metric/aggregator/counter/counter.go +++ b/sdk/metric/aggregator/counter/counter.go @@ -45,10 +45,7 @@ func (c *Aggregator) AsNumber() core.Number { // Collect checkpoints the current value (atomically) and exports it. func (c *Aggregator) Collect(ctx context.Context, rec export.MetricRecord, exp export.MetricBatcher) { - desc := rec.Descriptor() - kind := desc.NumberKind() - zero := core.NewZeroNumber(kind) - c.checkpoint = c.current.SwapNumberAtomic(zero) + c.checkpoint = c.current.SwapNumberAtomic(core.Number(0)) exp.Export(ctx, rec, c) } diff --git a/sdk/metric/aggregator/gauge/gauge_test.go b/sdk/metric/aggregator/gauge/gauge_test.go index 399d89bc50a..de529bc3a64 100644 --- a/sdk/metric/aggregator/gauge/gauge_test.go +++ b/sdk/metric/aggregator/gauge/gauge_test.go @@ -120,8 +120,8 @@ func TestGaugeNormalMerge(t *testing.T) { agg1.Merge(agg2, record.Descriptor()) - require.Equal(t, t2, agg1.Timestamp(), "Merged values - non-monotonic") - require.Equal(t, first2, agg1.AsNumber(), "Merged values - non-monotonic") + require.Equal(t, t2, agg1.Timestamp(), "Merged timestamp - non-monotonic") + require.Equal(t, first2, agg1.AsNumber(), "Merged value - non-monotonic") }) } @@ -146,6 +146,7 @@ func TestGaugeMonotonicMerge(t *testing.T) { agg1.Merge(agg2, record.Descriptor()) - require.Equal(t, first2, agg1.AsNumber(), "Merged values - monotonic") + require.Equal(t, first2, agg1.AsNumber(), "Merged value - monotonic") + require.Equal(t, agg2.Timestamp(), agg1.Timestamp(), "Merged timestamp - monotonic") }) } diff --git a/sdk/metric/aggregator/maxsumcount/msc.go b/sdk/metric/aggregator/maxsumcount/msc.go index b4f218ca267..3d3ed955e77 100644 --- a/sdk/metric/aggregator/maxsumcount/msc.go +++ b/sdk/metric/aggregator/maxsumcount/msc.go @@ -60,10 +60,6 @@ func (c *Aggregator) Max() core.Number { // Collect checkpoints the current value (atomically) and exports it. func (c *Aggregator) Collect(ctx context.Context, rec export.MetricRecord, exp export.MetricBatcher) { - desc := rec.Descriptor() - kind := desc.NumberKind() - zero := core.NewZeroNumber(kind) - // N.B. There is no atomic operation that can update all three // values at once without a memory allocation. // @@ -75,8 +71,8 @@ func (c *Aggregator) Collect(ctx context.Context, rec export.MetricRecord, exp e // be spread across multiple collections in rare cases. c.checkpoint.count.SetUint64(c.current.count.SwapUint64Atomic(0)) - c.checkpoint.sum = c.current.sum.SwapNumberAtomic(zero) - c.checkpoint.max = c.current.max.SwapNumberAtomic(zero) + c.checkpoint.sum = c.current.sum.SwapNumberAtomic(core.Number(0)) + c.checkpoint.max = c.current.max.SwapNumberAtomic(core.Number(0)) exp.Export(ctx, rec, c) } diff --git a/sdk/metric/aggregator/maxsumcount/msc_test.go b/sdk/metric/aggregator/maxsumcount/msc_test.go index 12808ccc3a5..eb05c8bc2e0 100644 --- a/sdk/metric/aggregator/maxsumcount/msc_test.go +++ b/sdk/metric/aggregator/maxsumcount/msc_test.go @@ -50,11 +50,11 @@ func TestMaxSumCountAbsolute(t *testing.T) { agg.Sum().CoerceToFloat64(profile.NumberKind), 0.000000001, "Same sum - absolute") - require.Equal(t, all.Count(), agg.Count(), "Same sum - absolute") + require.Equal(t, all.Count(), agg.Count(), "Same count - absolute") require.Equal(t, all[len(all)-1], agg.Max(), - "Same sum - absolute") + "Same ma - absolute") }) } @@ -92,7 +92,7 @@ func TestMaxSumCountMerge(t *testing.T) { agg1.Sum().CoerceToFloat64(profile.NumberKind), 0.000000001, "Same sum - absolute") - require.Equal(t, all.Count(), agg1.Count(), "Same sum - absolute") + require.Equal(t, all.Count(), agg1.Count(), "Same count - absolute") require.Equal(t, all[len(all)-1], agg1.Max(), From 09d88a144124c20d85bdddf007c454f943d34d2d Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 30 Oct 2019 11:39:08 -0700 Subject: [PATCH 03/73] Type --- sdk/metric/aggregator/maxsumcount/msc_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/metric/aggregator/maxsumcount/msc_test.go b/sdk/metric/aggregator/maxsumcount/msc_test.go index eb05c8bc2e0..a5da053366e 100644 --- a/sdk/metric/aggregator/maxsumcount/msc_test.go +++ b/sdk/metric/aggregator/maxsumcount/msc_test.go @@ -54,7 +54,7 @@ func TestMaxSumCountAbsolute(t *testing.T) { require.Equal(t, all[len(all)-1], agg.Max(), - "Same ma - absolute") + "Same max - absolute") }) } From c6ca4fdb79fd559a080f65f0326ed3dede66e0a6 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 30 Oct 2019 22:10:56 -0700 Subject: [PATCH 04/73] Ckpt --- sdk/export/exporter.go | 10 +++------- sdk/export/metric.go | 17 +++++++++++++++++ 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/sdk/export/exporter.go b/sdk/export/exporter.go index 54c6aa785f5..13bda3f5c8a 100644 --- a/sdk/export/exporter.go +++ b/sdk/export/exporter.go @@ -46,13 +46,9 @@ type SpanBatcher interface { // Multiple-exporters could be implemented by implementing this interface // for a group of MetricBatcher. type MetricBatcher interface { - // AggregatorFor should return the kind of aggregator - // suited to the requested export. Returning `nil` - // indicates to ignore the metric update. - // - // Note: This is context-free because the handle should not be - // bound to the incoming context. This call should not block. - AggregatorFor(MetricRecord) MetricAggregator + // MetricAggregationSelector is responsible for selecting the + // concrete type of aggregation used for a metric in the SDK. + MetricAggregationSelector // Export receives pairs of records and aggregators // during the SDK Collect(). Exporter implementations diff --git a/sdk/export/metric.go b/sdk/export/metric.go index 6efaaf8d57c..c402311896b 100644 --- a/sdk/export/metric.go +++ b/sdk/export/metric.go @@ -21,6 +21,20 @@ import ( "go.opentelemetry.io/api/unit" ) +// MetricAggregationSelector supports selecting the kind of aggregator +// to use at runtime for a specific metric instrument. +type MetricAggregationSelector interface { + // AggregatorFor should return the kind of aggregator suited + // to the requested export. Returning `nil` indicates to + // ignore this metric instrument. Although it is not + // required, this should return a consistent type to avoid + // confusion in later stages of the metrics export process. + // + // Note: This is context-free because the handle should not be + // bound to the incoming context. This call should not block. + AggregatorFor(MetricRecord) MetricAggregator +} + // MetricAggregator implements a specific aggregation behavior, e.g., // a counter, a gauge, a histogram. type MetricAggregator interface { @@ -49,6 +63,9 @@ type MetricRecord interface { Labels() []core.KeyValue } +type MetricEncoder interface { +} + // MetricKind describes the kind of instrument. type MetricKind int8 From acc2450dfe1ce856150fb4e6f543161895547142 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 30 Oct 2019 22:15:55 -0700 Subject: [PATCH 05/73] Ckpt --- sdk/metric/batcher/stateless/stateless.go | 120 ++++++++++++++++++++++ 1 file changed, 120 insertions(+) create mode 100644 sdk/metric/batcher/stateless/stateless.go diff --git a/sdk/metric/batcher/stateless/stateless.go b/sdk/metric/batcher/stateless/stateless.go new file mode 100644 index 00000000000..0339d53c800 --- /dev/null +++ b/sdk/metric/batcher/stateless/stateless.go @@ -0,0 +1,120 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stateless + +import ( + "context" + "strings" + + "go.opentelemetry.io/api/core" + "go.opentelemetry.io/sdk/export" +) + +type ( + Batcher struct { + dki dkiMap + agg aggMap + selector export.MetricAggregationSelector + } + + aggEntry struct { + aggregator export.MetricAggregator + descriptor *export.Descriptor + + // NOTE: When only a single exporter is in use, + // there's a potential to avoid encoding the labels + // twice, since this class has to encode them once. + labels []core.KeyValue + } + + dkiMap map[*export.Descriptor]map[core.Key]int + aggMap map[string]aggEntry +) + +var _ export.MetricBatcher = &Batcher{} + +func NewBatcher(selector export.MetricAggregationSelector) *Batcher { + return &Batcher{ + selector: selector, + dki: dkiMap{}, + agg: aggMap{}, + } +} + +func (b *Batcher) AggregatorFor(record export.MetricRecord) export.MetricAggregator { + return b.selector.AggregatorFor(record) +} + +func (b *Batcher) Export(_ context.Context, record export.MetricRecord, agg export.MetricAggregator) { + desc := record.Descriptor() + keys := desc.Keys() + + // Cache the mapping from Descriptor->Key->Index + ki, ok := b.dki[desc] + if !ok { + ki = map[core.Key]int{} + b.dki[desc] = ki + + for i, k := range keys { + ki[k] = i + } + } + + // Compute the value list. Note: Unspecified values become + // empty strings. + canon := make([]core.Value, len(keys)) + + for _, kv := range record.Labels() { + pos, ok := ki[kv.Key] + if !ok { + continue + } + canon[pos] = kv.Value + } + + // Compute an encoded lookup key. + // + // Note the opportunity to use an export-specific + // representation here, then avoid recomputing it in the + // exporter. For example, depending on the exporter, we could + // use an OpenMetrics representation, a statsd representation, + // etc. This only benefits a single exporter, of course. + // + // Note also the possibility to cache this lookup in the form + // (Descriptor, LabelSet)->Encoded. + var sb strings.Builder + for i := 0; i < len(keys); i++ { + sb.WriteString(string(keys[i])) + sb.WriteRune('=') + sb.WriteString(canon[i].Emit()) + + if i < len(keys)-1 { + sb.WriteRune(',') + } + } + + encoded := sb.String() + + // Perform the group-by. + rag, ok := b.agg[encoded] + if !ok { + b.agg[encoded] = aggEntry{ + aggregator: agg, + descriptor: record.Descriptor(), + } + } else { + rag.aggregator.Merge(agg, record.Descriptor()) + } +} From 5b23af41642e42ee5881c95b5f4d6badec4b889f Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 30 Oct 2019 23:32:33 -0700 Subject: [PATCH 06/73] Add push controller --- sdk/export/metric.go | 4 ++ sdk/metric/batcher/stateless/stateless.go | 9 +-- sdk/metric/controller/push/push.go | 76 +++++++++++++++++++++++ 3 files changed, 85 insertions(+), 4 deletions(-) create mode 100644 sdk/metric/controller/push/push.go diff --git a/sdk/export/metric.go b/sdk/export/metric.go index c402311896b..ff33351a8ea 100644 --- a/sdk/export/metric.go +++ b/sdk/export/metric.go @@ -64,6 +64,10 @@ type MetricRecord interface { } type MetricEncoder interface { + Encode(context.Context, MetricProducer) +} + +type MetricProducer interface { } // MetricKind describes the kind of instrument. diff --git a/sdk/metric/batcher/stateless/stateless.go b/sdk/metric/batcher/stateless/stateless.go index 0339d53c800..a8728fa85a1 100644 --- a/sdk/metric/batcher/stateless/stateless.go +++ b/sdk/metric/batcher/stateless/stateless.go @@ -88,12 +88,13 @@ func (b *Batcher) Export(_ context.Context, record export.MetricRecord, agg expo // // Note the opportunity to use an export-specific // representation here, then avoid recomputing it in the - // exporter. For example, depending on the exporter, we could + // exporter. For example, depending on the exporter, we could // use an OpenMetrics representation, a statsd representation, // etc. This only benefits a single exporter, of course. // - // Note also the possibility to cache this lookup in the form - // (Descriptor, LabelSet)->Encoded. + // Note also the possibility to speed this computation of + // "encoded" from "canon" in the form of a (Descriptor, + // LabelSet)->Encoded cache. var sb strings.Builder for i := 0; i < len(keys); i++ { sb.WriteString(string(keys[i])) @@ -107,7 +108,7 @@ func (b *Batcher) Export(_ context.Context, record export.MetricRecord, agg expo encoded := sb.String() - // Perform the group-by. + // Reduce dimensionality. rag, ok := b.agg[encoded] if !ok { b.agg[encoded] = aggEntry{ diff --git a/sdk/metric/controller/push/push.go b/sdk/metric/controller/push/push.go new file mode 100644 index 00000000000..26b10e31b3b --- /dev/null +++ b/sdk/metric/controller/push/push.go @@ -0,0 +1,76 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package push + +import ( + "context" + "time" + + "go.opentelemetry.io/api/metric" + "go.opentelemetry.io/sdk/export" + sdk "go.opentelemetry.io/sdk/metric" +) + +type Controller struct { + sdk *sdk.SDK + batcher export.MetricBatcher + encoder export.MetricEncoder + ticker *time.Ticker + ch chan struct{} +} + +// var _ metric.Provider = &Controller{} + +func New(batcher export.MetricBatcher, encoder export.MetricEncoder, period time.Duration) *Controller { + return &Controller{ + sdk: sdk.New(batcher), + batcher: batcher, + encoder: encoder, + ticker: time.NewTicker(period), + ch: make(chan struct{}), + } +} + +func (c *Controller) GetMeter(name string) metric.Meter { + return c.sdk +} + +func (c *Controller) Start() { + go c.run() +} + +func (c *Controller) Stop() { + close(c.ch) +} + +func (c *Controller) run() { + for { + select { + case <-c.ch: + return + case <-c.ticker.C: + c.tick() + } + } +} + +func (c *Controller) tick() { + ctx := context.Background() + c.sdk.Collect(ctx) + c.encoder.Encode(ctx, c.batcher) + + // TODO MetricBatcher has to implement MetricProducer, what is the method + // to iterate over the checkpoint? +} From 80e0df8af2ba424f98eaf852bce0c96a334c75d3 Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 31 Oct 2019 03:48:16 -0700 Subject: [PATCH 07/73] Ckpt --- sdk/export/exporter.go | 4 ++++ sdk/export/metric.go | 1 + sdk/metric/controller/push/push.go | 5 +---- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/sdk/export/exporter.go b/sdk/export/exporter.go index 13bda3f5c8a..9dc26f5b9db 100644 --- a/sdk/export/exporter.go +++ b/sdk/export/exporter.go @@ -50,6 +50,10 @@ type MetricBatcher interface { // concrete type of aggregation used for a metric in the SDK. MetricAggregationSelector + // GetProducer is the interface used by exporters to access + // aggregate checkpoints after collection. + GetProducer() MetricProducer + // Export receives pairs of records and aggregators // during the SDK Collect(). Exporter implementations // must access the specific aggregator to receive the diff --git a/sdk/export/metric.go b/sdk/export/metric.go index ff33351a8ea..bd537be75ce 100644 --- a/sdk/export/metric.go +++ b/sdk/export/metric.go @@ -68,6 +68,7 @@ type MetricEncoder interface { } type MetricProducer interface { + Next() MetricAggregator } // MetricKind describes the kind of instrument. diff --git a/sdk/metric/controller/push/push.go b/sdk/metric/controller/push/push.go index 26b10e31b3b..f51949bfd7a 100644 --- a/sdk/metric/controller/push/push.go +++ b/sdk/metric/controller/push/push.go @@ -69,8 +69,5 @@ func (c *Controller) run() { func (c *Controller) tick() { ctx := context.Background() c.sdk.Collect(ctx) - c.encoder.Encode(ctx, c.batcher) - - // TODO MetricBatcher has to implement MetricProducer, what is the method - // to iterate over the checkpoint? + c.encoder.Encode(ctx, c.batcher.GetProducer()) } From 34232425013bc533869801ece0cabf910db8bb4e Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 31 Oct 2019 04:56:42 -0700 Subject: [PATCH 08/73] Add aggregator interfaces, stdout encoder --- exporter/metric/stateless/stdout/stdout.go | 37 +++++++++++ sdk/export/exporter.go | 12 ++-- sdk/export/metric.go | 11 +++- sdk/metric/aggregator/api.go | 64 +++++++++++++++++++ sdk/metric/aggregator/counter/counter.go | 6 +- sdk/metric/aggregator/counter/counter_test.go | 8 +-- sdk/metric/aggregator/ddsketch/ddsketch.go | 4 +- sdk/metric/aggregator/gauge/gauge.go | 8 +-- sdk/metric/aggregator/gauge/gauge_test.go | 10 +-- sdk/metric/aggregator/maxsumcount/msc.go | 2 +- sdk/metric/aggregator/test/test.go | 6 +- sdk/metric/batcher/stateless/stateless.go | 18 +++++- sdk/metric/benchmark_test.go | 6 +- sdk/metric/controller/push/push.go | 30 +++++---- sdk/metric/monotone_test.go | 8 ++- sdk/metric/stress_test.go | 12 ++-- 16 files changed, 190 insertions(+), 52 deletions(-) create mode 100644 exporter/metric/stateless/stdout/stdout.go create mode 100644 sdk/metric/aggregator/api.go diff --git a/exporter/metric/stateless/stdout/stdout.go b/exporter/metric/stateless/stdout/stdout.go new file mode 100644 index 00000000000..8e8e3471cce --- /dev/null +++ b/exporter/metric/stateless/stdout/stdout.go @@ -0,0 +1,37 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stdout + +import ( + "context" + + "go.opentelemetry.io/api/core" + "go.opentelemetry.io/sdk/export" +) + +type Exporter struct { +} + +var _ export.MetricExporter = &Exporter{} + +func New() *Exporter { + return &Exporter{} +} + +func (*Exporter) Export(_ context.Context, producer export.MetricProducer) { + producer.Foreach(func(agg export.MetricAggregator, desc *export.Descriptor, labels []core.KeyValue) { + // fmt.Printf("%s %s\n", + }) +} diff --git a/sdk/export/exporter.go b/sdk/export/exporter.go index 9dc26f5b9db..42ed721b1c0 100644 --- a/sdk/export/exporter.go +++ b/sdk/export/exporter.go @@ -50,14 +50,14 @@ type MetricBatcher interface { // concrete type of aggregation used for a metric in the SDK. MetricAggregationSelector - // GetProducer is the interface used by exporters to access - // aggregate checkpoints after collection. - GetProducer() MetricProducer - - // Export receives pairs of records and aggregators + // Process receives pairs of records and aggregators // during the SDK Collect(). Exporter implementations // must access the specific aggregator to receive the // exporter data, since the format of the data varies // by aggregation. - Export(context.Context, MetricRecord, MetricAggregator) + Process(context.Context, MetricRecord, MetricAggregator) + + // ReadCheckpoint is the interface used by exporters to access + // aggregate checkpoints after collection. + ReadCheckpoint() MetricProducer } diff --git a/sdk/export/metric.go b/sdk/export/metric.go index bd537be75ce..67c1a66f915 100644 --- a/sdk/export/metric.go +++ b/sdk/export/metric.go @@ -63,12 +63,17 @@ type MetricRecord interface { Labels() []core.KeyValue } -type MetricEncoder interface { - Encode(context.Context, MetricProducer) +// MetricExporter handles presentation of the checkpoint of aggregate +// metrics. This is the final stage of a metrics export pipeline, +// where metric data are formatted for a specific system. +type MetricExporter interface { + Export(context.Context, MetricProducer) } +// MetricProducer allows a MetricExporter to access a checkpoint of +// aggregated metrics one at a time. type MetricProducer interface { - Next() MetricAggregator + Foreach(func(MetricAggregator, *Descriptor, []core.Value)) } // MetricKind describes the kind of instrument. diff --git a/sdk/metric/aggregator/api.go b/sdk/metric/aggregator/api.go new file mode 100644 index 00000000000..cbd72b3c3ff --- /dev/null +++ b/sdk/metric/aggregator/api.go @@ -0,0 +1,64 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregator + +import ( + "time" + + "go.opentelemetry.io/api/core" +) + +type ( + Sum interface { + Sum() core.Number + } + + Count interface { + Count() core.Number + } + + Max interface { + Max() core.Number + } + + Min interface { + Min() core.Number + } + + Quantile interface { + Quantile() core.Number + } + + LastValue interface { + LastValue() core.Number + Timestamp() time.Time + } + + MaxSumCount interface { + Sum + Count + Max + } + + MaxSumCountMin interface { + MaxSumCount + Min + } + + Distribution interface { + MaxSumCountMin + Quantile + } +) diff --git a/sdk/metric/aggregator/counter/counter.go b/sdk/metric/aggregator/counter/counter.go index bfbd9502000..a2a3a85c74b 100644 --- a/sdk/metric/aggregator/counter/counter.go +++ b/sdk/metric/aggregator/counter/counter.go @@ -38,8 +38,8 @@ func New() *Aggregator { return &Aggregator{} } -// AsNumber returns the accumulated count as an int64. -func (c *Aggregator) AsNumber() core.Number { +// Sum returns the accumulated count as a Number. +func (c *Aggregator) Sum() core.Number { return c.checkpoint.AsNumber() } @@ -47,7 +47,7 @@ func (c *Aggregator) AsNumber() core.Number { func (c *Aggregator) Collect(ctx context.Context, rec export.MetricRecord, exp export.MetricBatcher) { c.checkpoint = c.current.SwapNumberAtomic(core.Number(0)) - exp.Export(ctx, rec, c) + exp.Process(ctx, rec, c) } // Update modifies the current value (atomically) for later export. diff --git a/sdk/metric/aggregator/counter/counter_test.go b/sdk/metric/aggregator/counter/counter_test.go index c8492493e42..e2662a2ed08 100644 --- a/sdk/metric/aggregator/counter/counter_test.go +++ b/sdk/metric/aggregator/counter/counter_test.go @@ -44,7 +44,7 @@ func TestCounterMonotonic(t *testing.T) { agg.Collect(ctx, record, batcher) - require.Equal(t, sum, agg.AsNumber(), "Same sum - monotonic") + require.Equal(t, sum, agg.Sum(), "Same sum - monotonic") }) } @@ -64,7 +64,7 @@ func TestCounterMonotonicNegative(t *testing.T) { agg.Update(ctx, sum, record) agg.Collect(ctx, record, batcher) - require.Equal(t, sum, agg.AsNumber(), "Same sum - monotonic") + require.Equal(t, sum, agg.Sum(), "Same sum - monotonic") }) } @@ -88,7 +88,7 @@ func TestCounterNonMonotonic(t *testing.T) { agg.Collect(ctx, record, batcher) - require.Equal(t, sum, agg.AsNumber(), "Same sum - monotonic") + require.Equal(t, sum, agg.Sum(), "Same sum - monotonic") }) } @@ -116,6 +116,6 @@ func TestCounterMerge(t *testing.T) { sum.AddNumber(record.Descriptor().NumberKind(), sum) - require.Equal(t, sum, agg1.AsNumber(), "Same sum - monotonic") + require.Equal(t, sum, agg1.Sum(), "Same sum - monotonic") }) } diff --git a/sdk/metric/aggregator/ddsketch/ddsketch.go b/sdk/metric/aggregator/ddsketch/ddsketch.go index 67a0ed22b90..db2dd572f7f 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch.go @@ -82,9 +82,7 @@ func (c *Aggregator) Collect(ctx context.Context, rec export.MetricRecord, exp e c.current = replace c.lock.Unlock() - if c.checkpoint.Count() != 0 { - exp.Export(ctx, rec, c) - } + exp.Process(ctx, rec, c) } // Update modifies the current value (atomically) for later export. diff --git a/sdk/metric/aggregator/gauge/gauge.go b/sdk/metric/aggregator/gauge/gauge.go index 5dc7e004b3b..8bc7f50b8a3 100644 --- a/sdk/metric/aggregator/gauge/gauge.go +++ b/sdk/metric/aggregator/gauge/gauge.go @@ -68,12 +68,12 @@ func New() *Aggregator { } } -// AsNumber returns the recorded gauge value as an int64. -func (g *Aggregator) AsNumber() core.Number { +// LastValue returns the last-recorded gauge value as a Number. +func (g *Aggregator) LastValue() core.Number { return (*gaugeData)(g.checkpoint).value.AsNumber() } -// Timestamp returns the timestamp of the alst recorded gauge value. +// Timestamp returns the timestamp of the last recorded gauge value. func (g *Aggregator) Timestamp() time.Time { return (*gaugeData)(g.checkpoint).timestamp } @@ -82,7 +82,7 @@ func (g *Aggregator) Timestamp() time.Time { func (g *Aggregator) Collect(ctx context.Context, rec export.MetricRecord, exp export.MetricBatcher) { g.checkpoint = atomic.LoadPointer(&g.current) - exp.Export(ctx, rec, g) + exp.Process(ctx, rec, g) } // Update modifies the current value (atomically) for later export. diff --git a/sdk/metric/aggregator/gauge/gauge_test.go b/sdk/metric/aggregator/gauge/gauge_test.go index de529bc3a64..2401eb0b23b 100644 --- a/sdk/metric/aggregator/gauge/gauge_test.go +++ b/sdk/metric/aggregator/gauge/gauge_test.go @@ -47,7 +47,7 @@ func TestGaugeNonMonotonic(t *testing.T) { agg.Collect(ctx, record, batcher) - require.Equal(t, last, agg.AsNumber(), "Same last value - non-monotonic") + require.Equal(t, last, agg.LastValue(), "Same last value - non-monotonic") }) } @@ -69,7 +69,7 @@ func TestGaugeMonotonic(t *testing.T) { agg.Collect(ctx, record, batcher) - require.Equal(t, last, agg.AsNumber(), "Same last value - monotonic") + require.Equal(t, last, agg.LastValue(), "Same last value - monotonic") }) } @@ -91,7 +91,7 @@ func TestGaugeMonotonicDescending(t *testing.T) { agg.Collect(ctx, record, batcher) - require.Equal(t, first, agg.AsNumber(), "Same last value - monotonic") + require.Equal(t, first, agg.LastValue(), "Same last value - monotonic") }) } @@ -121,7 +121,7 @@ func TestGaugeNormalMerge(t *testing.T) { agg1.Merge(agg2, record.Descriptor()) require.Equal(t, t2, agg1.Timestamp(), "Merged timestamp - non-monotonic") - require.Equal(t, first2, agg1.AsNumber(), "Merged value - non-monotonic") + require.Equal(t, first2, agg1.LastValue(), "Merged value - non-monotonic") }) } @@ -146,7 +146,7 @@ func TestGaugeMonotonicMerge(t *testing.T) { agg1.Merge(agg2, record.Descriptor()) - require.Equal(t, first2, agg1.AsNumber(), "Merged value - monotonic") + require.Equal(t, first2, agg1.LastValue(), "Merged value - monotonic") require.Equal(t, agg2.Timestamp(), agg1.Timestamp(), "Merged timestamp - monotonic") }) } diff --git a/sdk/metric/aggregator/maxsumcount/msc.go b/sdk/metric/aggregator/maxsumcount/msc.go index 3d3ed955e77..e7a8d6a644d 100644 --- a/sdk/metric/aggregator/maxsumcount/msc.go +++ b/sdk/metric/aggregator/maxsumcount/msc.go @@ -74,7 +74,7 @@ func (c *Aggregator) Collect(ctx context.Context, rec export.MetricRecord, exp e c.checkpoint.sum = c.current.sum.SwapNumberAtomic(core.Number(0)) c.checkpoint.max = c.current.max.SwapNumberAtomic(core.Number(0)) - exp.Export(ctx, rec, c) + exp.Process(ctx, rec, c) } // Update modifies the current value (atomically) for later export. diff --git a/sdk/metric/aggregator/test/test.go b/sdk/metric/aggregator/test/test.go index 0cba335d9ea..36df6700c02 100644 --- a/sdk/metric/aggregator/test/test.go +++ b/sdk/metric/aggregator/test/test.go @@ -71,7 +71,11 @@ func (m *metricBatcher) AggregatorFor(rec export.MetricRecord) export.MetricAggr return nil } -func (m *metricBatcher) Export(context.Context, export.MetricRecord, export.MetricAggregator) { +func (m *metricBatcher) ReadCheckpoint() export.MetricProducer { + return nil +} + +func (m *metricBatcher) Process(context.Context, export.MetricRecord, export.MetricAggregator) { } func RunProfiles(t *testing.T, f func(*testing.T, Profile)) { diff --git a/sdk/metric/batcher/stateless/stateless.go b/sdk/metric/batcher/stateless/stateless.go index a8728fa85a1..8559963d02f 100644 --- a/sdk/metric/batcher/stateless/stateless.go +++ b/sdk/metric/batcher/stateless/stateless.go @@ -36,7 +36,7 @@ type ( // NOTE: When only a single exporter is in use, // there's a potential to avoid encoding the labels // twice, since this class has to encode them once. - labels []core.KeyValue + labels []core.Value } dkiMap map[*export.Descriptor]map[core.Key]int @@ -44,6 +44,7 @@ type ( ) var _ export.MetricBatcher = &Batcher{} +var _ export.MetricProducer = aggMap{} func NewBatcher(selector export.MetricAggregationSelector) *Batcher { return &Batcher{ @@ -57,7 +58,7 @@ func (b *Batcher) AggregatorFor(record export.MetricRecord) export.MetricAggrega return b.selector.AggregatorFor(record) } -func (b *Batcher) Export(_ context.Context, record export.MetricRecord, agg export.MetricAggregator) { +func (b *Batcher) Process(_ context.Context, record export.MetricRecord, agg export.MetricAggregator) { desc := record.Descriptor() keys := desc.Keys() @@ -113,9 +114,22 @@ func (b *Batcher) Export(_ context.Context, record export.MetricRecord, agg expo if !ok { b.agg[encoded] = aggEntry{ aggregator: agg, + labels: canon, descriptor: record.Descriptor(), } } else { rag.aggregator.Merge(agg, record.Descriptor()) } } + +func (b *Batcher) ReadCheckpoint() export.MetricProducer { + checkpoint := b.agg + b.agg = aggMap{} + return checkpoint +} + +func (c aggMap) Foreach(f func(export.MetricAggregator, *export.Descriptor, []core.Value)) { + for _, entry := range c { + f(entry.aggregator, entry.descriptor, entry.labels) + } +} diff --git a/sdk/metric/benchmark_test.go b/sdk/metric/benchmark_test.go index eb99c93314e..06b821db0ae 100644 --- a/sdk/metric/benchmark_test.go +++ b/sdk/metric/benchmark_test.go @@ -61,7 +61,11 @@ func (bf *benchFixture) AggregatorFor(rec export.MetricRecord) export.MetricAggr return nil } -func (bf *benchFixture) Export(ctx context.Context, rec export.MetricRecord, agg export.MetricAggregator) { +func (bf *benchFixture) Process(ctx context.Context, rec export.MetricRecord, agg export.MetricAggregator) { +} + +func (bf *benchFixture) ReadCheckpoint() export.MetricProducer { + return nil } func makeLabels(n int) []core.KeyValue { diff --git a/sdk/metric/controller/push/push.go b/sdk/metric/controller/push/push.go index f51949bfd7a..93da40259c9 100644 --- a/sdk/metric/controller/push/push.go +++ b/sdk/metric/controller/push/push.go @@ -23,23 +23,27 @@ import ( sdk "go.opentelemetry.io/sdk/metric" ) +// Controller organizes a periodic push of metric data. type Controller struct { - sdk *sdk.SDK - batcher export.MetricBatcher - encoder export.MetricEncoder - ticker *time.Ticker - ch chan struct{} + sdk *sdk.SDK + batcher export.MetricBatcher + exporter export.MetricExporter + ticker *time.Ticker + ch chan struct{} } -// var _ metric.Provider = &Controller{} +var _ metric.Provider = &Controller{} -func New(batcher export.MetricBatcher, encoder export.MetricEncoder, period time.Duration) *Controller { +// New constructs a Controller, an implementation of metric.Provider, +// using the provider batcher, exporter, period. The batcher itself +// is configured with aggregation policy selection. +func New(batcher export.MetricBatcher, exporter export.MetricExporter, period time.Duration) *Controller { return &Controller{ - sdk: sdk.New(batcher), - batcher: batcher, - encoder: encoder, - ticker: time.NewTicker(period), - ch: make(chan struct{}), + sdk: sdk.New(batcher), + batcher: batcher, + exporter: exporter, + ticker: time.NewTicker(period), + ch: make(chan struct{}), } } @@ -69,5 +73,5 @@ func (c *Controller) run() { func (c *Controller) tick() { ctx := context.Background() c.sdk.Collect(ctx) - c.encoder.Encode(ctx, c.batcher.GetProducer()) + c.exporter.Export(ctx, c.batcher.ReadCheckpoint()) } diff --git a/sdk/metric/monotone_test.go b/sdk/metric/monotone_test.go index b4cdfd79554..4aba28e01c0 100644 --- a/sdk/metric/monotone_test.go +++ b/sdk/metric/monotone_test.go @@ -41,14 +41,18 @@ func (m *monotoneBatcher) AggregatorFor(rec export.MetricRecord) export.MetricAg return gauge.New() } -func (m *monotoneBatcher) Export(_ context.Context, record export.MetricRecord, agg export.MetricAggregator) { +func (m *monotoneBatcher) ReadCheckpoint() export.MetricProducer { + return nil +} + +func (m *monotoneBatcher) Process(_ context.Context, record export.MetricRecord, agg export.MetricAggregator) { require.Equal(m.t, "my.gauge.name", record.Descriptor().Name()) require.Equal(m.t, 1, len(record.Labels())) require.Equal(m.t, "a", string(record.Labels()[0].Key)) require.Equal(m.t, "b", record.Labels()[0].Value.Emit()) gauge := agg.(*gauge.Aggregator) - val := gauge.AsNumber() + val := gauge.LastValue() ts := gauge.Timestamp() m.currentValue = &val diff --git a/sdk/metric/stress_test.go b/sdk/metric/stress_test.go index d9c6b0a865e..d01638a5134 100644 --- a/sdk/metric/stress_test.go +++ b/sdk/metric/stress_test.go @@ -222,7 +222,7 @@ func (f *testFixture) assertTest(numCollect int) { } func (f *testFixture) preCollect() { - // Collect calls Export in a single-threaded context. No need + // Collect calls Process in a single-threaded context. No need // to lock this struct. f.dupCheck = map[testKey]int{} } @@ -238,7 +238,11 @@ func (f *testFixture) AggregatorFor(record export.MetricRecord) export.MetricAgg } } -func (f *testFixture) Export(ctx context.Context, record export.MetricRecord, agg export.MetricAggregator) { +func (f *testFixture) ReadCheckpoint() export.MetricProducer { + return nil +} + +func (f *testFixture) Process(ctx context.Context, record export.MetricRecord, agg export.MetricAggregator) { desc := record.Descriptor() key := testKey{ labels: canonicalizeLabels(record.Labels()), @@ -254,10 +258,10 @@ func (f *testFixture) Export(ctx context.Context, record export.MetricRecord, ag switch desc.MetricKind() { case export.CounterMetricKind: - f.impl.storeCollect(actual, agg.(*counter.Aggregator).AsNumber(), time.Time{}) + f.impl.storeCollect(actual, agg.(*counter.Aggregator).Sum(), time.Time{}) case export.GaugeMetricKind: gauge := agg.(*gauge.Aggregator) - f.impl.storeCollect(actual, gauge.AsNumber(), gauge.Timestamp()) + f.impl.storeCollect(actual, gauge.LastValue(), gauge.Timestamp()) default: panic("Not used in this test") } From a7886319ad1b6d7a46ef9e796557b8b17513c778 Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 31 Oct 2019 07:41:46 -0700 Subject: [PATCH 09/73] Modify basic main.go --- example/basic/go.sum | 1 + example/basic/main.go | 32 +++++-- exporter/metric/stateless/stdout/stdout.go | 97 ++++++++++++++++++++-- sdk/metric/aggregator/api.go | 14 +--- sdk/metric/batcher/stateless/stateless.go | 2 +- sdk/metric/controller/push/push.go | 2 + 6 files changed, 124 insertions(+), 24 deletions(-) diff --git a/example/basic/go.sum b/example/basic/go.sum index 0d402fd076a..789dc1b0023 100644 --- a/example/basic/go.sum +++ b/example/basic/go.sum @@ -90,6 +90,7 @@ github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7 github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= diff --git a/example/basic/main.go b/example/basic/main.go index 2c2c470c222..83dfd3c3913 100644 --- a/example/basic/main.go +++ b/example/basic/main.go @@ -16,12 +16,17 @@ package main import ( "context" + "time" "go.opentelemetry.io/api/distributedcontext" "go.opentelemetry.io/api/key" "go.opentelemetry.io/api/metric" "go.opentelemetry.io/api/trace" + "go.opentelemetry.io/exporter/metric/stateless/stdout" "go.opentelemetry.io/global" + "go.opentelemetry.io/sdk/metric/batcher/stateless" + "go.opentelemetry.io/sdk/metric/controller/push" + "go.opentelemetry.io/sdk/metric/selector/simple" ) var ( @@ -35,6 +40,15 @@ var ( ) func main() { + selector := simple.New() + batcher := stateless.New(selector) + exporter := stdout.New(stdout.Options{PrettyPrint: true}) + pusher := push.New(batcher, exporter, time.Second) + pusher.Start() + defer pusher.Stop() + + global.SetMeterProvider(pusher) + oneMetric := meter.NewFloat64Gauge("ex.com.one", metric.WithKeys(fooKey, barKey, lemonsKey), metric.WithDescription("A gauge set to 1.0"), @@ -63,16 +77,18 @@ func main() { trace.CurrentSpan(ctx).SetAttributes(anotherKey.String("yes")) - gauge.Set(ctx, 1) + for { + gauge.Set(ctx, 1) - meter.RecordBatch( - // Note: call-site variables added as context Entries: - distributedcontext.NewContext(ctx, anotherKey.String("xyz")), - commonLabels, + meter.RecordBatch( + // Note: call-site variables added as context Entries: + distributedcontext.NewContext(ctx, anotherKey.String("xyz")), + commonLabels, - oneMetric.Measurement(1.0), - measureTwo.Measurement(2.0), - ) + oneMetric.Measurement(1.0), + measureTwo.Measurement(2.0), + ) + } return tracer.WithSpan( ctx, diff --git a/exporter/metric/stateless/stdout/stdout.go b/exporter/metric/stateless/stdout/stdout.go index 8e8e3471cce..881fb9ebbe2 100644 --- a/exporter/metric/stateless/stdout/stdout.go +++ b/exporter/metric/stateless/stdout/stdout.go @@ -16,22 +16,109 @@ package stdout import ( "context" + "encoding/json" + "fmt" + "os" + "strings" "go.opentelemetry.io/api/core" "go.opentelemetry.io/sdk/export" + "go.opentelemetry.io/sdk/metric/aggregator" ) type Exporter struct { + options Options +} + +// Options are the options to be used when initializing a stdout export. +type Options struct { + // File is the destination. If not set, os.Stdout is used. + File *os.File + + // PrettyPrint will pretty the json representation of the span, + // making it print "pretty". Default is false. + PrettyPrint bool + + // Quantiles are the desired aggregation quantiles for measure + // metric data, used when the configured aggregator supports + // quantiles. + Quantiles []float64 +} + +type Exposition struct { + Name string `json:"name"` + Max interface{} `json:"max,omitempty"` + Sum interface{} `json:"sum,omitempty"` + Count interface{} `json:"count,omitempty"` + LastValue interface{} `json:"last,omitempty"` + Timestamp interface{} `json:"time,omitempty"` } var _ export.MetricExporter = &Exporter{} -func New() *Exporter { - return &Exporter{} +func New(options Options) *Exporter { + if options.File == nil { + options.File = os.Stdout + } + return &Exporter{ + options: options, + } } -func (*Exporter) Export(_ context.Context, producer export.MetricProducer) { - producer.Foreach(func(agg export.MetricAggregator, desc *export.Descriptor, labels []core.KeyValue) { - // fmt.Printf("%s %s\n", +func (e *Exporter) Export(_ context.Context, producer export.MetricProducer) { + var expose Exposition + producer.Foreach(func(agg export.MetricAggregator, desc *export.Descriptor, labelValues []core.Value) { + expose = Exposition{} + if sum, ok := agg.(aggregator.Sum); ok { + expose.Sum = sum.Sum().Emit(desc.NumberKind()) + + } else if lv, ok := agg.(aggregator.LastValue); ok { + expose.LastValue = lv.LastValue().Emit(desc.NumberKind()) + expose.Timestamp = lv.Timestamp() + + } else if msc, ok := agg.(aggregator.MaxSumCount); ok { + expose.Max = msc.Max().Emit(desc.NumberKind()) + expose.Sum = msc.Sum().Emit(desc.NumberKind()) + expose.Count = msc.Count().Emit(desc.NumberKind()) + + } else if dist, ok := agg.(aggregator.Distribution); ok { + expose.Max = dist.Max().Emit(desc.NumberKind()) + expose.Sum = dist.Sum().Emit(desc.NumberKind()) + expose.Count = dist.Count().Emit(desc.NumberKind()) + + // TODO print one configured quantile per line + } + + var sb strings.Builder + + sb.WriteString(desc.Name()) + sb.WriteRune('{') + + for i, k := range desc.Keys() { + sb.WriteString(string(k)) + sb.WriteRune('=') + sb.WriteRune('"') + sb.WriteString(labelValues[i].Emit()) + sb.WriteRune('"') + } + + sb.WriteRune('}') + + expose.Name = sb.String() + + var data []byte + var err error + if e.options.PrettyPrint { + data, err = json.MarshalIndent(expose, "", "\t") + } else { + data, err = json.Marshal(expose) + } + + if err != nil { + fmt.Fprintf(e.options.File, "JSON encode error: %v\n", err) + return + } + + fmt.Fprintln(e.options.File, string(data)) }) } diff --git a/sdk/metric/aggregator/api.go b/sdk/metric/aggregator/api.go index cbd72b3c3ff..b0eafad0a2d 100644 --- a/sdk/metric/aggregator/api.go +++ b/sdk/metric/aggregator/api.go @@ -20,6 +20,9 @@ import ( "go.opentelemetry.io/api/core" ) +// TODO: Add Min() support to maxsumcount? It's the same as +// Quantile(0) but cheap to compute like Max(). + type ( Sum interface { Sum() core.Number @@ -33,10 +36,6 @@ type ( Max() core.Number } - Min interface { - Min() core.Number - } - Quantile interface { Quantile() core.Number } @@ -52,13 +51,8 @@ type ( Max } - MaxSumCountMin interface { - MaxSumCount - Min - } - Distribution interface { - MaxSumCountMin + MaxSumCount Quantile } ) diff --git a/sdk/metric/batcher/stateless/stateless.go b/sdk/metric/batcher/stateless/stateless.go index 8559963d02f..e85381a7b8b 100644 --- a/sdk/metric/batcher/stateless/stateless.go +++ b/sdk/metric/batcher/stateless/stateless.go @@ -46,7 +46,7 @@ type ( var _ export.MetricBatcher = &Batcher{} var _ export.MetricProducer = aggMap{} -func NewBatcher(selector export.MetricAggregationSelector) *Batcher { +func New(selector export.MetricAggregationSelector) *Batcher { return &Batcher{ selector: selector, dki: dkiMap{}, diff --git a/sdk/metric/controller/push/push.go b/sdk/metric/controller/push/push.go index 93da40259c9..abe77506a70 100644 --- a/sdk/metric/controller/push/push.go +++ b/sdk/metric/controller/push/push.go @@ -57,6 +57,8 @@ func (c *Controller) Start() { func (c *Controller) Stop() { close(c.ch) + + // TODO wait for the last run, flush, etc. } func (c *Controller) run() { From fbc50a1ee26f436583768a5d9c707fea00e9e4b4 Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 31 Oct 2019 07:58:16 -0700 Subject: [PATCH 10/73] Main is working --- example/basic/main.go | 25 ++++++------- exporter/metric/stateless/stdout/stdout.go | 14 +++++--- sdk/metric/batcher/stateless/stateless.go | 6 +++- sdk/metric/controller/push/push.go | 7 +++- sdk/metric/selector/simple/simple.go | 41 ++++++++++++++++++++++ 5 files changed, 75 insertions(+), 18 deletions(-) create mode 100644 sdk/metric/selector/simple/simple.go diff --git a/example/basic/main.go b/example/basic/main.go index 83dfd3c3913..e8302dd6f7d 100644 --- a/example/basic/main.go +++ b/example/basic/main.go @@ -31,7 +31,6 @@ import ( var ( tracer = global.TraceProvider().GetTracer("ex.com/basic") - meter = global.MeterProvider().GetMeter("ex.com/basic") // TODO: should share resources ^^^? fooKey = key.New("ex.com/foo") barKey = key.New("ex.com/bar") @@ -42,13 +41,17 @@ var ( func main() { selector := simple.New() batcher := stateless.New(selector) - exporter := stdout.New(stdout.Options{PrettyPrint: true}) + exporter := stdout.New(stdout.Options{PrettyPrint: false}) pusher := push.New(batcher, exporter, time.Second) pusher.Start() defer pusher.Stop() global.SetMeterProvider(pusher) + // Note: Have to get the meter after the global is + // initialized. See OTEP 0005. + meter := global.MeterProvider().GetMeter("ex.com/basic") + oneMetric := meter.NewFloat64Gauge("ex.com.one", metric.WithKeys(fooKey, barKey, lemonsKey), metric.WithDescription("A gauge set to 1.0"), @@ -77,18 +80,16 @@ func main() { trace.CurrentSpan(ctx).SetAttributes(anotherKey.String("yes")) - for { - gauge.Set(ctx, 1) + gauge.Set(ctx, 1) - meter.RecordBatch( - // Note: call-site variables added as context Entries: - distributedcontext.NewContext(ctx, anotherKey.String("xyz")), - commonLabels, + meter.RecordBatch( + // Note: call-site variables added as context Entries: + distributedcontext.NewContext(ctx, anotherKey.String("xyz")), + commonLabels, - oneMetric.Measurement(1.0), - measureTwo.Measurement(2.0), - ) - } + oneMetric.Measurement(1.0), + measureTwo.Measurement(2.0), + ) return tracer.WithSpan( ctx, diff --git a/exporter/metric/stateless/stdout/stdout.go b/exporter/metric/stateless/stdout/stdout.go index 881fb9ebbe2..ab6a9f7579f 100644 --- a/exporter/metric/stateless/stdout/stdout.go +++ b/exporter/metric/stateless/stdout/stdout.go @@ -92,17 +92,23 @@ func (e *Exporter) Export(_ context.Context, producer export.MetricProducer) { var sb strings.Builder sb.WriteString(desc.Name()) - sb.WriteRune('{') + + if len(desc.Keys()) > 0 { + sb.WriteRune('{') + } for i, k := range desc.Keys() { + if i > 0 { + sb.WriteRune(',') + } sb.WriteString(string(k)) sb.WriteRune('=') - sb.WriteRune('"') sb.WriteString(labelValues[i].Emit()) - sb.WriteRune('"') } - sb.WriteRune('}') + if len(desc.Keys()) > 0 { + sb.WriteRune('}') + } expose.Name = sb.String() diff --git a/sdk/metric/batcher/stateless/stateless.go b/sdk/metric/batcher/stateless/stateless.go index e85381a7b8b..f0cf03ca706 100644 --- a/sdk/metric/batcher/stateless/stateless.go +++ b/sdk/metric/batcher/stateless/stateless.go @@ -74,9 +74,13 @@ func (b *Batcher) Process(_ context.Context, record export.MetricRecord, agg exp } // Compute the value list. Note: Unspecified values become - // empty strings. + // empty strings. TODO: pin this down. canon := make([]core.Value, len(keys)) + for i := 0; i < len(keys); i++ { + canon[i] = core.String("") + } + for _, kv := range record.Labels() { pos, ok := ki[kv.Key] if !ok { diff --git a/sdk/metric/controller/push/push.go b/sdk/metric/controller/push/push.go index abe77506a70..e051a76af70 100644 --- a/sdk/metric/controller/push/push.go +++ b/sdk/metric/controller/push/push.go @@ -16,6 +16,7 @@ package push import ( "context" + "sync" "time" "go.opentelemetry.io/api/metric" @@ -29,6 +30,7 @@ type Controller struct { batcher export.MetricBatcher exporter export.MetricExporter ticker *time.Ticker + wg sync.WaitGroup ch chan struct{} } @@ -52,19 +54,22 @@ func (c *Controller) GetMeter(name string) metric.Meter { } func (c *Controller) Start() { + c.wg.Add(1) go c.run() } func (c *Controller) Stop() { close(c.ch) + c.wg.Wait() - // TODO wait for the last run, flush, etc. + c.tick() } func (c *Controller) run() { for { select { case <-c.ch: + c.wg.Done() return case <-c.ticker.C: c.tick() diff --git a/sdk/metric/selector/simple/simple.go b/sdk/metric/selector/simple/simple.go new file mode 100644 index 00000000000..40c07a46998 --- /dev/null +++ b/sdk/metric/selector/simple/simple.go @@ -0,0 +1,41 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package simple + +import ( + "go.opentelemetry.io/sdk/export" + "go.opentelemetry.io/sdk/metric/aggregator/counter" + "go.opentelemetry.io/sdk/metric/aggregator/gauge" + "go.opentelemetry.io/sdk/metric/aggregator/maxsumcount" +) + +type selector struct{} + +// New returns a simple aggregation selector that uses counter, gauge, +// and maxsumcount behavior for the three kinds of metric. +func New() export.MetricAggregationSelector { + return selector{} +} + +func (s selector) AggregatorFor(record export.MetricRecord) export.MetricAggregator { + switch record.Descriptor().MetricKind() { + case export.GaugeMetricKind: + return gauge.New() + case export.MeasureMetricKind: + return maxsumcount.New() + default: + return counter.New() + } +} From 49aa969e826b350f5b0d09057d346569ac58938c Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 31 Oct 2019 08:02:46 -0700 Subject: [PATCH 11/73] Batch stdout output --- example/basic/main.go | 2 +- exporter/metric/stateless/stdout/stdout.go | 42 +++++++++++++--------- 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/example/basic/main.go b/example/basic/main.go index e8302dd6f7d..c7a5213d5eb 100644 --- a/example/basic/main.go +++ b/example/basic/main.go @@ -41,7 +41,7 @@ var ( func main() { selector := simple.New() batcher := stateless.New(selector) - exporter := stdout.New(stdout.Options{PrettyPrint: false}) + exporter := stdout.New(stdout.Options{PrettyPrint: true}) pusher := push.New(batcher, exporter, time.Second) pusher.Start() defer pusher.Stop() diff --git a/exporter/metric/stateless/stdout/stdout.go b/exporter/metric/stateless/stdout/stdout.go index ab6a9f7579f..9122b23732e 100644 --- a/exporter/metric/stateless/stdout/stdout.go +++ b/exporter/metric/stateless/stdout/stdout.go @@ -20,6 +20,7 @@ import ( "fmt" "os" "strings" + "time" "go.opentelemetry.io/api/core" "go.opentelemetry.io/sdk/export" @@ -45,13 +46,18 @@ type Options struct { Quantiles []float64 } -type Exposition struct { +type expoBatch struct { + Timestamp time.Time `json:"time,omitempty"` + Updates []expoLine `json:"updates,omitempty"` +} + +type expoLine struct { Name string `json:"name"` Max interface{} `json:"max,omitempty"` Sum interface{} `json:"sum,omitempty"` Count interface{} `json:"count,omitempty"` LastValue interface{} `json:"last,omitempty"` - Timestamp interface{} `json:"time,omitempty"` + Timestamp time.Time `json:"time,omitempty"` } var _ export.MetricExporter = &Exporter{} @@ -66,9 +72,9 @@ func New(options Options) *Exporter { } func (e *Exporter) Export(_ context.Context, producer export.MetricProducer) { - var expose Exposition + var batch expoBatch producer.Foreach(func(agg export.MetricAggregator, desc *export.Descriptor, labelValues []core.Value) { - expose = Exposition{} + var expose expoLine if sum, ok := agg.(aggregator.Sum); ok { expose.Sum = sum.Sum().Emit(desc.NumberKind()) @@ -112,19 +118,21 @@ func (e *Exporter) Export(_ context.Context, producer export.MetricProducer) { expose.Name = sb.String() - var data []byte - var err error - if e.options.PrettyPrint { - data, err = json.MarshalIndent(expose, "", "\t") - } else { - data, err = json.Marshal(expose) - } + batch.Updates = append(batch.Updates, expose) + }) - if err != nil { - fmt.Fprintf(e.options.File, "JSON encode error: %v\n", err) - return - } + var data []byte + var err error + if e.options.PrettyPrint { + data, err = json.MarshalIndent(batch, "", "\t") + } else { + data, err = json.Marshal(batch) + } - fmt.Fprintln(e.options.File, string(data)) - }) + if err != nil { + fmt.Fprintf(e.options.File, "JSON encode error: %v\n", err) + return + } + + fmt.Fprintln(e.options.File, string(data)) } From c2c73be30f275df808f5613c66b2e49855505716 Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 31 Oct 2019 08:03:21 -0700 Subject: [PATCH 12/73] Sum udpate --- example/basic/go.sum | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/example/basic/go.sum b/example/basic/go.sum index 789dc1b0023..aa7064a2684 100644 --- a/example/basic/go.sum +++ b/example/basic/go.sum @@ -1,5 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/sketches-go v0.0.0-20190923095040-43f19ad77ff7 h1:qELHH0AWCvf98Yf+CNIJx9vOZOfHFDDzgDRYsnNk/vs= github.com/DataDog/sketches-go v0.0.0-20190923095040-43f19ad77ff7/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= @@ -21,6 +22,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= @@ -82,6 +84,7 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -107,9 +110,11 @@ github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgo github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= @@ -136,6 +141,7 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pelletier/go-toml v1.5.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= @@ -177,6 +183,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -274,6 +281,7 @@ google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRn gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -282,6 +290,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 5ea912847e36f728792b42c8954d20d985c7b0ee Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 31 Oct 2019 08:05:48 -0700 Subject: [PATCH 13/73] Rename stdout --- example/basic/main.go | 2 +- exporter/metric/{stateless => }/stdout/stdout.go | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename exporter/metric/{stateless => }/stdout/stdout.go (100%) diff --git a/example/basic/main.go b/example/basic/main.go index c7a5213d5eb..d023e85b2db 100644 --- a/example/basic/main.go +++ b/example/basic/main.go @@ -22,7 +22,7 @@ import ( "go.opentelemetry.io/api/key" "go.opentelemetry.io/api/metric" "go.opentelemetry.io/api/trace" - "go.opentelemetry.io/exporter/metric/stateless/stdout" + "go.opentelemetry.io/exporter/metric/stdout" "go.opentelemetry.io/global" "go.opentelemetry.io/sdk/metric/batcher/stateless" "go.opentelemetry.io/sdk/metric/controller/push" diff --git a/exporter/metric/stateless/stdout/stdout.go b/exporter/metric/stdout/stdout.go similarity index 100% rename from exporter/metric/stateless/stdout/stdout.go rename to exporter/metric/stdout/stdout.go From f0d986ccd59d7ef2918e584ceb46f24cfbcd7197 Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 31 Oct 2019 08:18:43 -0700 Subject: [PATCH 14/73] Add stateless/stateful Batcher options --- example/basic/main.go | 22 ++-- sdk/metric/batcher/stateful/stateful.go | 142 ++++++++++++++++++++++ sdk/metric/batcher/stateless/stateless.go | 117 ++---------------- 3 files changed, 161 insertions(+), 120 deletions(-) create mode 100644 sdk/metric/batcher/stateful/stateful.go diff --git a/example/basic/main.go b/example/basic/main.go index d023e85b2db..4e72bc23ef9 100644 --- a/example/basic/main.go +++ b/example/basic/main.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/api/trace" "go.opentelemetry.io/exporter/metric/stdout" "go.opentelemetry.io/global" - "go.opentelemetry.io/sdk/metric/batcher/stateless" + "go.opentelemetry.io/sdk/metric/batcher/stateful" "go.opentelemetry.io/sdk/metric/controller/push" "go.opentelemetry.io/sdk/metric/selector/simple" ) @@ -40,7 +40,7 @@ var ( func main() { selector := simple.New() - batcher := stateless.New(selector) + batcher := stateful.New(selector) exporter := stdout.New(stdout.Options{PrettyPrint: true}) pusher := push.New(batcher, exporter, time.Second) pusher.Start() @@ -80,16 +80,18 @@ func main() { trace.CurrentSpan(ctx).SetAttributes(anotherKey.String("yes")) - gauge.Set(ctx, 1) + for { + gauge.Set(ctx, 1) - meter.RecordBatch( - // Note: call-site variables added as context Entries: - distributedcontext.NewContext(ctx, anotherKey.String("xyz")), - commonLabels, + meter.RecordBatch( + // Note: call-site variables added as context Entries: + distributedcontext.NewContext(ctx, anotherKey.String("xyz")), + commonLabels, - oneMetric.Measurement(1.0), - measureTwo.Measurement(2.0), - ) + oneMetric.Measurement(1.0), + measureTwo.Measurement(2.0), + ) + } return tracer.WithSpan( ctx, diff --git a/sdk/metric/batcher/stateful/stateful.go b/sdk/metric/batcher/stateful/stateful.go new file mode 100644 index 00000000000..512c89af349 --- /dev/null +++ b/sdk/metric/batcher/stateful/stateful.go @@ -0,0 +1,142 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stateful + +import ( + "context" + "strings" + + "go.opentelemetry.io/api/core" + "go.opentelemetry.io/sdk/export" +) + +type ( + Batcher struct { + dki dkiMap + agg aggMap + selector export.MetricAggregationSelector + } + + aggEntry struct { + aggregator export.MetricAggregator + descriptor *export.Descriptor + + // NOTE: When only a single exporter is in use, + // there's a potential to avoid encoding the labels + // twice, since this class has to encode them once. + labels []core.Value + } + + dkiMap map[*export.Descriptor]map[core.Key]int + aggMap map[string]aggEntry +) + +var _ export.MetricBatcher = &Batcher{} +var _ export.MetricProducer = aggMap{} + +func New(selector export.MetricAggregationSelector) *Batcher { + return &Batcher{ + selector: selector, + dki: dkiMap{}, + agg: aggMap{}, + } +} + +func (b *Batcher) AggregatorFor(record export.MetricRecord) export.MetricAggregator { + return b.selector.AggregatorFor(record) +} + +func (b *Batcher) Process(_ context.Context, record export.MetricRecord, agg export.MetricAggregator) { + desc := record.Descriptor() + keys := desc.Keys() + + // Cache the mapping from Descriptor->Key->Index + ki, ok := b.dki[desc] + if !ok { + ki = map[core.Key]int{} + b.dki[desc] = ki + + for i, k := range keys { + ki[k] = i + } + } + + // Compute the value list. Note: Unspecified values become + // empty strings. TODO: pin this down. + canon := make([]core.Value, len(keys)) + + for i := 0; i < len(keys); i++ { + canon[i] = core.String("") + } + + for _, kv := range record.Labels() { + pos, ok := ki[kv.Key] + if !ok { + continue + } + canon[pos] = kv.Value + } + + // Compute an encoded lookup key. + // + // Note the opportunity to use an export-specific + // representation here, then avoid recomputing it in the + // exporter. For example, depending on the exporter, we could + // use an OpenMetrics representation, a statsd representation, + // etc. This only benefits a single exporter, of course. + // + // Note also the possibility to speed this computation of + // "encoded" from "canon" in the form of a (Descriptor, + // LabelSet)->Encoded cache. + var sb strings.Builder + for i := 0; i < len(keys); i++ { + sb.WriteString(string(keys[i])) + sb.WriteRune('=') + sb.WriteString(canon[i].Emit()) + + if i < len(keys)-1 { + sb.WriteRune(',') + } + } + + encoded := sb.String() + + // Reduce dimensionality. + rag, ok := b.agg[encoded] + if !ok { + b.agg[encoded] = aggEntry{ + aggregator: agg, + labels: canon, + descriptor: record.Descriptor(), + } + } else { + rag.aggregator.Merge(agg, record.Descriptor()) + } +} + +func (b *Batcher) Reset() { + b.agg = aggMap{} +} + +func (b *Batcher) ReadCheckpoint() export.MetricProducer { + checkpoint := b.agg + return checkpoint +} + +func (c aggMap) Foreach(f func(export.MetricAggregator, *export.Descriptor, []core.Value)) { + for _, entry := range c { + f(entry.aggregator, entry.descriptor, entry.labels) + } +} diff --git a/sdk/metric/batcher/stateless/stateless.go b/sdk/metric/batcher/stateless/stateless.go index f0cf03ca706..cab17b8ec0b 100644 --- a/sdk/metric/batcher/stateless/stateless.go +++ b/sdk/metric/batcher/stateless/stateless.go @@ -15,125 +15,22 @@ package stateless import ( - "context" - "strings" - - "go.opentelemetry.io/api/core" "go.opentelemetry.io/sdk/export" + "go.opentelemetry.io/sdk/metric/batcher/stateful" ) -type ( - Batcher struct { - dki dkiMap - agg aggMap - selector export.MetricAggregationSelector - } - - aggEntry struct { - aggregator export.MetricAggregator - descriptor *export.Descriptor - - // NOTE: When only a single exporter is in use, - // there's a potential to avoid encoding the labels - // twice, since this class has to encode them once. - labels []core.Value - } - - dkiMap map[*export.Descriptor]map[core.Key]int - aggMap map[string]aggEntry -) - -var _ export.MetricBatcher = &Batcher{} -var _ export.MetricProducer = aggMap{} +type Batcher struct { + *stateful.Batcher +} func New(selector export.MetricAggregationSelector) *Batcher { return &Batcher{ - selector: selector, - dki: dkiMap{}, - agg: aggMap{}, - } -} - -func (b *Batcher) AggregatorFor(record export.MetricRecord) export.MetricAggregator { - return b.selector.AggregatorFor(record) -} - -func (b *Batcher) Process(_ context.Context, record export.MetricRecord, agg export.MetricAggregator) { - desc := record.Descriptor() - keys := desc.Keys() - - // Cache the mapping from Descriptor->Key->Index - ki, ok := b.dki[desc] - if !ok { - ki = map[core.Key]int{} - b.dki[desc] = ki - - for i, k := range keys { - ki[k] = i - } - } - - // Compute the value list. Note: Unspecified values become - // empty strings. TODO: pin this down. - canon := make([]core.Value, len(keys)) - - for i := 0; i < len(keys); i++ { - canon[i] = core.String("") - } - - for _, kv := range record.Labels() { - pos, ok := ki[kv.Key] - if !ok { - continue - } - canon[pos] = kv.Value - } - - // Compute an encoded lookup key. - // - // Note the opportunity to use an export-specific - // representation here, then avoid recomputing it in the - // exporter. For example, depending on the exporter, we could - // use an OpenMetrics representation, a statsd representation, - // etc. This only benefits a single exporter, of course. - // - // Note also the possibility to speed this computation of - // "encoded" from "canon" in the form of a (Descriptor, - // LabelSet)->Encoded cache. - var sb strings.Builder - for i := 0; i < len(keys); i++ { - sb.WriteString(string(keys[i])) - sb.WriteRune('=') - sb.WriteString(canon[i].Emit()) - - if i < len(keys)-1 { - sb.WriteRune(',') - } - } - - encoded := sb.String() - - // Reduce dimensionality. - rag, ok := b.agg[encoded] - if !ok { - b.agg[encoded] = aggEntry{ - aggregator: agg, - labels: canon, - descriptor: record.Descriptor(), - } - } else { - rag.aggregator.Merge(agg, record.Descriptor()) + Batcher: stateful.New(selector), } } func (b *Batcher) ReadCheckpoint() export.MetricProducer { - checkpoint := b.agg - b.agg = aggMap{} + checkpoint := b.Batcher.ReadCheckpoint() + b.Batcher.Reset() return checkpoint } - -func (c aggMap) Foreach(f func(export.MetricAggregator, *export.Descriptor, []core.Value)) { - for _, entry := range c { - f(entry.aggregator, entry.descriptor, entry.labels) - } -} From 63d79a833e3312b89b2de9fcfbe744561fe70563 Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 31 Oct 2019 08:41:34 -0700 Subject: [PATCH 15/73] Undo a for-loop in the example, remove a done TODO --- example/basic/main.go | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/example/basic/main.go b/example/basic/main.go index 4e72bc23ef9..93fb849bba8 100644 --- a/example/basic/main.go +++ b/example/basic/main.go @@ -80,18 +80,16 @@ func main() { trace.CurrentSpan(ctx).SetAttributes(anotherKey.String("yes")) - for { - gauge.Set(ctx, 1) + gauge.Set(ctx, 1) - meter.RecordBatch( - // Note: call-site variables added as context Entries: - distributedcontext.NewContext(ctx, anotherKey.String("xyz")), - commonLabels, + meter.RecordBatch( + // Note: call-site variables added as context Entries: + distributedcontext.NewContext(ctx, anotherKey.String("xyz")), + commonLabels, - oneMetric.Measurement(1.0), - measureTwo.Measurement(2.0), - ) - } + oneMetric.Measurement(1.0), + measureTwo.Measurement(2.0), + ) return tracer.WithSpan( ctx, @@ -110,7 +108,4 @@ func main() { if err != nil { panic(err) } - - // TODO: How to flush? - // loader.Flush() } From 837035d39dd15d5c68c09b36cb7b15978f926e30 Mon Sep 17 00:00:00 2001 From: jmacd Date: Sat, 2 Nov 2019 23:08:47 -0700 Subject: [PATCH 16/73] Update imports --- exporter/metric/stdout/stdout.go | 8 ++++---- sdk/metric/aggregator/api.go | 4 ++-- sdk/metric/batcher/stateful/stateful.go | 6 +++--- sdk/metric/batcher/stateless/stateless.go | 6 +++--- sdk/metric/controller/push/push.go | 8 ++++---- sdk/metric/doc.go | 2 +- sdk/metric/selector/simple/simple.go | 10 +++++----- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index 9122b23732e..6be1c48150d 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package stdout +package stdout // import "go.opentelemetry.io/otel/exporter/metric/stdout" import ( "context" @@ -22,9 +22,9 @@ import ( "strings" "time" - "go.opentelemetry.io/api/core" - "go.opentelemetry.io/sdk/export" - "go.opentelemetry.io/sdk/metric/aggregator" + "go.opentelemetry.io/otel/api/core" + "go.opentelemetry.io/otel/sdk/export" + "go.opentelemetry.io/otel/sdk/metric/aggregator" ) type Exporter struct { diff --git a/sdk/metric/aggregator/api.go b/sdk/metric/aggregator/api.go index b0eafad0a2d..5f9125b8ead 100644 --- a/sdk/metric/aggregator/api.go +++ b/sdk/metric/aggregator/api.go @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -package aggregator +package aggregator // import "go.opentelemetry.io/otel/sdk/metric/aggregator" import ( "time" - "go.opentelemetry.io/api/core" + "go.opentelemetry.io/otel/api/core" ) // TODO: Add Min() support to maxsumcount? It's the same as diff --git a/sdk/metric/batcher/stateful/stateful.go b/sdk/metric/batcher/stateful/stateful.go index 512c89af349..9d642098963 100644 --- a/sdk/metric/batcher/stateful/stateful.go +++ b/sdk/metric/batcher/stateful/stateful.go @@ -12,14 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -package stateful +package stateful // import "go.opentelemetry.io/otel/sdk/metric/batcher/stateful" import ( "context" "strings" - "go.opentelemetry.io/api/core" - "go.opentelemetry.io/sdk/export" + "go.opentelemetry.io/otel/api/core" + "go.opentelemetry.io/otel/sdk/export" ) type ( diff --git a/sdk/metric/batcher/stateless/stateless.go b/sdk/metric/batcher/stateless/stateless.go index cab17b8ec0b..9d0eeb37c0b 100644 --- a/sdk/metric/batcher/stateless/stateless.go +++ b/sdk/metric/batcher/stateless/stateless.go @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -package stateless +package stateless // import "go.opentelemetry.io/otel/sdk/metric/batcher/stateless" import ( - "go.opentelemetry.io/sdk/export" - "go.opentelemetry.io/sdk/metric/batcher/stateful" + "go.opentelemetry.io/otel/sdk/export" + "go.opentelemetry.io/otel/sdk/metric/batcher/stateful" ) type Batcher struct { diff --git a/sdk/metric/controller/push/push.go b/sdk/metric/controller/push/push.go index e051a76af70..f75a91af015 100644 --- a/sdk/metric/controller/push/push.go +++ b/sdk/metric/controller/push/push.go @@ -12,16 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -package push +package push // import "go.opentelemetry.io/otel/sdk/metric/controller/push" import ( "context" "sync" "time" - "go.opentelemetry.io/api/metric" - "go.opentelemetry.io/sdk/export" - sdk "go.opentelemetry.io/sdk/metric" + "go.opentelemetry.io/otel/api/metric" + "go.opentelemetry.io/otel/sdk/export" + sdk "go.opentelemetry.io/otel/sdk/metric" ) // Controller organizes a periodic push of metric data. diff --git a/sdk/metric/doc.go b/sdk/metric/doc.go index 9957182eabd..8d5a4285983 100644 --- a/sdk/metric/doc.go +++ b/sdk/metric/doc.go @@ -57,4 +57,4 @@ new handles are created and atomically cleared during collect. The reclaim list is used as a second chance, in case there is a race between looking up a record and record deletion. */ -package metric +package metric // import "go.opentelemetry.io/otel/sdk/metric" diff --git a/sdk/metric/selector/simple/simple.go b/sdk/metric/selector/simple/simple.go index 40c07a46998..1284f6ef334 100644 --- a/sdk/metric/selector/simple/simple.go +++ b/sdk/metric/selector/simple/simple.go @@ -12,13 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -package simple +package simple // import "go.opentelemetry.io/otel/sdk/metric/selector/simpler" import ( - "go.opentelemetry.io/sdk/export" - "go.opentelemetry.io/sdk/metric/aggregator/counter" - "go.opentelemetry.io/sdk/metric/aggregator/gauge" - "go.opentelemetry.io/sdk/metric/aggregator/maxsumcount" + "go.opentelemetry.io/otel/sdk/export" + "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" + "go.opentelemetry.io/otel/sdk/metric/aggregator/gauge" + "go.opentelemetry.io/otel/sdk/metric/aggregator/maxsumcount" ) type selector struct{} From 9586471e35bc07b36d194454cae50d6902db2eb6 Mon Sep 17 00:00:00 2001 From: jmacd Date: Sat, 2 Nov 2019 23:14:33 -0700 Subject: [PATCH 17/73] Add note --- exporter/metric/stdout/stdout.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index 6be1c48150d..93518d7a84c 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -43,6 +43,10 @@ type Options struct { // Quantiles are the desired aggregation quantiles for measure // metric data, used when the configured aggregator supports // quantiles. + // + // Note: this exporter is meant as a demonstration; a real + // exporter may wish to configure quantiles on a per-metric + // basis. Quantiles []float64 } From 0c09f8c241dc325691e4d0c8bb7cc7d1df4d6b21 Mon Sep 17 00:00:00 2001 From: jmacd Date: Sun, 3 Nov 2019 21:54:16 -0800 Subject: [PATCH 18/73] Rename defaultkeys --- example/basic/main.go | 24 ++--- exporter/metric/stdout/stdout.go | 12 +-- sdk/export/metric.go | 6 +- .../sampling/sampling.go} | 23 +---- sdk/metric/aggregator/test/test.go | 4 + .../defaultkeys.go} | 34 +++---- sdk/metric/batcher/ungrouped/ungrouped.go | 88 +++++++++++++++++++ sdk/metric/sdk.go | 4 + 8 files changed, 141 insertions(+), 54 deletions(-) rename sdk/metric/{batcher/stateless/stateless.go => aggregator/sampling/sampling.go} (54%) rename sdk/metric/batcher/{stateful/stateful.go => defaultkeys/defaultkeys.go} (83%) create mode 100644 sdk/metric/batcher/ungrouped/ungrouped.go diff --git a/example/basic/main.go b/example/basic/main.go index 776030f54e7..445d678df5f 100644 --- a/example/basic/main.go +++ b/example/basic/main.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/otel/api/trace" "go.opentelemetry.io/otel/exporter/metric/stdout" "go.opentelemetry.io/otel/global" - "go.opentelemetry.io/otel/sdk/metric/batcher/stateful" + "go.opentelemetry.io/otel/sdk/metric/batcher/ungrouped" "go.opentelemetry.io/otel/sdk/metric/controller/push" "go.opentelemetry.io/otel/sdk/metric/selector/simple" ) @@ -40,7 +40,7 @@ var ( func main() { selector := simple.New() - batcher := stateful.New(selector) + batcher := ungrouped.New(selector, true) exporter := stdout.New(stdout.Options{PrettyPrint: true}) pusher := push.New(batcher, exporter, time.Second) pusher.Start() @@ -66,7 +66,7 @@ func main() { barKey.String("bar1"), ) - commonLabels := meter.Labels(lemonsKey.Int(10)) + commonLabels := meter.Labels(lemonsKey.Int(10), key.String("A", "1"), key.String("B", "2"), key.String("C", "3")) gauge := oneMetric.AcquireHandle(commonLabels) defer gauge.Release() @@ -80,16 +80,18 @@ func main() { trace.CurrentSpan(ctx).SetAttributes(anotherKey.String("yes")) - gauge.Set(ctx, 1) + for { + gauge.Set(ctx, 1) - meter.RecordBatch( - // Note: call-site variables added as context Entries: - distributedcontext.NewContext(ctx, anotherKey.String("xyz")), - commonLabels, + meter.RecordBatch( + // Note: call-site variables added as context Entries: + distributedcontext.NewContext(ctx, anotherKey.String("xyz")), + commonLabels, - oneMetric.Measurement(1.0), - measureTwo.Measurement(2.0), - ) + oneMetric.Measurement(1.0), + measureTwo.Measurement(2.0), + ) + } return tracer.WithSpan( ctx, diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index 93518d7a84c..6c330443c6d 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -77,7 +77,7 @@ func New(options Options) *Exporter { func (e *Exporter) Export(_ context.Context, producer export.MetricProducer) { var batch expoBatch - producer.Foreach(func(agg export.MetricAggregator, desc *export.Descriptor, labelValues []core.Value) { + producer.Foreach(func(agg export.MetricAggregator, desc *export.Descriptor, labels []core.KeyValue) { var expose expoLine if sum, ok := agg.(aggregator.Sum); ok { expose.Sum = sum.Sum().Emit(desc.NumberKind()) @@ -103,20 +103,20 @@ func (e *Exporter) Export(_ context.Context, producer export.MetricProducer) { sb.WriteString(desc.Name()) - if len(desc.Keys()) > 0 { + if len(labels) > 0 { sb.WriteRune('{') } - for i, k := range desc.Keys() { + for i, label := range labels { if i > 0 { sb.WriteRune(',') } - sb.WriteString(string(k)) + sb.WriteString(string(label.Key)) sb.WriteRune('=') - sb.WriteString(labelValues[i].Emit()) + sb.WriteString(label.Value.Emit()) } - if len(desc.Keys()) > 0 { + if len(labels) > 0 { sb.WriteRune('}') } diff --git a/sdk/export/metric.go b/sdk/export/metric.go index c3869b09d0c..4c9771f440a 100644 --- a/sdk/export/metric.go +++ b/sdk/export/metric.go @@ -61,6 +61,10 @@ type MetricRecord interface { // Labels() describe the labsels corresponding the // aggregation being performed. Labels() []core.KeyValue + + // EncodedLabels are a unique string-encoded form of Labels() + // suitable for use as a map key. + EncodedLabels() string } // MetricExporter handles presentation of the checkpoint of aggregate @@ -73,7 +77,7 @@ type MetricExporter interface { // MetricProducer allows a MetricExporter to access a checkpoint of // aggregated metrics one at a time. type MetricProducer interface { - Foreach(func(MetricAggregator, *Descriptor, []core.Value)) + Foreach(func(MetricAggregator, *Descriptor, []core.KeyValue)) } // MetricKind describes the kind of instrument. diff --git a/sdk/metric/batcher/stateless/stateless.go b/sdk/metric/aggregator/sampling/sampling.go similarity index 54% rename from sdk/metric/batcher/stateless/stateless.go rename to sdk/metric/aggregator/sampling/sampling.go index 9d0eeb37c0b..74303b9d0b1 100644 --- a/sdk/metric/batcher/stateless/stateless.go +++ b/sdk/metric/aggregator/sampling/sampling.go @@ -12,25 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -package stateless // import "go.opentelemetry.io/otel/sdk/metric/batcher/stateless" +package sampling // import "go.opentelemetry.io/otel/sdk/metric/aggregator/sampling" -import ( - "go.opentelemetry.io/otel/sdk/export" - "go.opentelemetry.io/otel/sdk/metric/batcher/stateful" -) +// import "github.com/lightstep/varopt" -type Batcher struct { - *stateful.Batcher -} - -func New(selector export.MetricAggregationSelector) *Batcher { - return &Batcher{ - Batcher: stateful.New(selector), - } -} - -func (b *Batcher) ReadCheckpoint() export.MetricProducer { - checkpoint := b.Batcher.ReadCheckpoint() - b.Batcher.Reset() - return checkpoint -} +// var _ = varopt.Varopt{} diff --git a/sdk/metric/aggregator/test/test.go b/sdk/metric/aggregator/test/test.go index 71be2a1b860..4e0b8f4a7ea 100644 --- a/sdk/metric/aggregator/test/test.go +++ b/sdk/metric/aggregator/test/test.go @@ -67,6 +67,10 @@ func (t *metricRecord) Labels() []core.KeyValue { return nil } +func (t *metricRecord) EncodedLabels() string { + return "" +} + func (m *metricBatcher) AggregatorFor(rec export.MetricRecord) export.MetricAggregator { return nil } diff --git a/sdk/metric/batcher/stateful/stateful.go b/sdk/metric/batcher/defaultkeys/defaultkeys.go similarity index 83% rename from sdk/metric/batcher/stateful/stateful.go rename to sdk/metric/batcher/defaultkeys/defaultkeys.go index 9d642098963..e89ab088e9c 100644 --- a/sdk/metric/batcher/stateful/stateful.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package stateful // import "go.opentelemetry.io/otel/sdk/metric/batcher/stateful" +package defaultkeys // import "go.opentelemetry.io/otel/sdk/metric/batcher/defaultkeys" import ( "context" @@ -27,6 +27,7 @@ type ( dki dkiMap agg aggMap selector export.MetricAggregationSelector + stateful bool } aggEntry struct { @@ -36,7 +37,7 @@ type ( // NOTE: When only a single exporter is in use, // there's a potential to avoid encoding the labels // twice, since this class has to encode them once. - labels []core.Value + labels []core.KeyValue } dkiMap map[*export.Descriptor]map[core.Key]int @@ -46,11 +47,12 @@ type ( var _ export.MetricBatcher = &Batcher{} var _ export.MetricProducer = aggMap{} -func New(selector export.MetricAggregationSelector) *Batcher { +func New(selector export.MetricAggregationSelector, stateful bool) *Batcher { return &Batcher{ selector: selector, dki: dkiMap{}, agg: aggMap{}, + stateful: stateful, } } @@ -74,11 +76,12 @@ func (b *Batcher) Process(_ context.Context, record export.MetricRecord, agg exp } // Compute the value list. Note: Unspecified values become - // empty strings. TODO: pin this down. - canon := make([]core.Value, len(keys)) + // empty strings. TODO: pin this down, we have no appropriate + // Value constructor. + canon := make([]core.KeyValue, len(keys)) - for i := 0; i < len(keys); i++ { - canon[i] = core.String("") + for i, key := range keys { + canon[i] = key.String("") } for _, kv := range record.Labels() { @@ -86,7 +89,7 @@ func (b *Batcher) Process(_ context.Context, record export.MetricRecord, agg exp if !ok { continue } - canon[pos] = kv.Value + canon[pos].Value = kv.Value } // Compute an encoded lookup key. @@ -104,7 +107,7 @@ func (b *Batcher) Process(_ context.Context, record export.MetricRecord, agg exp for i := 0; i < len(keys); i++ { sb.WriteString(string(keys[i])) sb.WriteRune('=') - sb.WriteString(canon[i].Emit()) + sb.WriteString(canon[i].Value.Emit()) if i < len(keys)-1 { sb.WriteRune(',') @@ -119,23 +122,22 @@ func (b *Batcher) Process(_ context.Context, record export.MetricRecord, agg exp b.agg[encoded] = aggEntry{ aggregator: agg, labels: canon, - descriptor: record.Descriptor(), + descriptor: desc, } } else { - rag.aggregator.Merge(agg, record.Descriptor()) + rag.aggregator.Merge(agg, desc) } } -func (b *Batcher) Reset() { - b.agg = aggMap{} -} - func (b *Batcher) ReadCheckpoint() export.MetricProducer { checkpoint := b.agg + if !b.stateful { + b.agg = aggMap{} + } return checkpoint } -func (c aggMap) Foreach(f func(export.MetricAggregator, *export.Descriptor, []core.Value)) { +func (c aggMap) Foreach(f func(export.MetricAggregator, *export.Descriptor, []core.KeyValue)) { for _, entry := range c { f(entry.aggregator, entry.descriptor, entry.labels) } diff --git a/sdk/metric/batcher/ungrouped/ungrouped.go b/sdk/metric/batcher/ungrouped/ungrouped.go new file mode 100644 index 00000000000..46da5c0df3e --- /dev/null +++ b/sdk/metric/batcher/ungrouped/ungrouped.go @@ -0,0 +1,88 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ungrouped // import "go.opentelemetry.io/otel/sdk/metric/batcher/ungrouped" + +import ( + "context" + + "go.opentelemetry.io/otel/api/core" + "go.opentelemetry.io/otel/sdk/export" +) + +type ( + Batcher struct { + selector export.MetricAggregationSelector + batchMap batchMap + stateful bool + } + + batchKey struct { + descriptor *export.Descriptor + encoded string + } + + batchValue struct { + aggregator export.MetricAggregator + labels []core.KeyValue + } + + batchMap map[batchKey]batchValue +) + +var _ export.MetricBatcher = &Batcher{} +var _ export.MetricProducer = batchMap{} + +func New(selector export.MetricAggregationSelector, stateful bool) *Batcher { + return &Batcher{ + selector: selector, + batchMap: batchMap{}, + stateful: stateful, + } +} + +func (b *Batcher) AggregatorFor(record export.MetricRecord) export.MetricAggregator { + return b.selector.AggregatorFor(record) +} + +func (b *Batcher) Process(_ context.Context, record export.MetricRecord, agg export.MetricAggregator) { + desc := record.Descriptor() + key := batchKey{ + descriptor: desc, + encoded: record.EncodedLabels(), + } + value, ok := b.batchMap[key] + if !ok { + b.batchMap[key] = batchValue{ + aggregator: agg, + labels: record.Labels(), + } + } else { + value.aggregator.Merge(agg, desc) + } +} + +func (b *Batcher) ReadCheckpoint() export.MetricProducer { + checkpoint := b.batchMap + if !b.stateful { + b.batchMap = batchMap{} + } + return checkpoint +} + +func (c batchMap) Foreach(f func(export.MetricAggregator, *export.Descriptor, []core.KeyValue)) { + for key, value := range c { + f(value.aggregator, key.descriptor, value.labels) + } +} diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 43c1c266754..53932a9d15f 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -479,3 +479,7 @@ func (r *record) Descriptor() *export.Descriptor { func (r *record) Labels() []core.KeyValue { return r.labels.sorted } + +func (r *record) EncodedLabels() string { + return r.labels.encoded +} From 03ff7d227d8899b16301b1fd607e0d37cb74ce2e Mon Sep 17 00:00:00 2001 From: jmacd Date: Sun, 3 Nov 2019 23:37:52 -0800 Subject: [PATCH 19/73] Support variable label encoder to speed OpenMetrics/Statsd export --- example/basic/main.go | 23 ++++--- exporter/metric/stdout/stdout.go | 19 ++---- sdk/export/metric.go | 18 ++++- sdk/metric/batcher/defaultkeys/defaultkeys.go | 65 +++++++++---------- sdk/metric/batcher/ungrouped/ungrouped.go | 8 ++- sdk/metric/benchmark_test.go | 2 +- sdk/metric/controller/push/push.go | 8 ++- sdk/metric/labelencoder.go | 56 ++++++++++++++++ sdk/metric/monotone_test.go | 2 +- sdk/metric/sdk.go | 61 +++++++---------- sdk/metric/stress_test.go | 2 +- 11 files changed, 157 insertions(+), 107 deletions(-) create mode 100644 sdk/metric/labelencoder.go diff --git a/example/basic/main.go b/example/basic/main.go index 445d678df5f..132a2d69093 100644 --- a/example/basic/main.go +++ b/example/basic/main.go @@ -24,7 +24,8 @@ import ( "go.opentelemetry.io/otel/api/trace" "go.opentelemetry.io/otel/exporter/metric/stdout" "go.opentelemetry.io/otel/global" - "go.opentelemetry.io/otel/sdk/metric/batcher/ungrouped" + metricsdk "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/batcher/defaultkeys" "go.opentelemetry.io/otel/sdk/metric/controller/push" "go.opentelemetry.io/otel/sdk/metric/selector/simple" ) @@ -40,8 +41,8 @@ var ( func main() { selector := simple.New() - batcher := ungrouped.New(selector, true) exporter := stdout.New(stdout.Options{PrettyPrint: true}) + batcher := defaultkeys.New(selector, metricsdk.DefaultLabelEncoder(), true) pusher := push.New(batcher, exporter, time.Second) pusher.Start() defer pusher.Stop() @@ -80,18 +81,16 @@ func main() { trace.CurrentSpan(ctx).SetAttributes(anotherKey.String("yes")) - for { - gauge.Set(ctx, 1) + gauge.Set(ctx, 1) - meter.RecordBatch( - // Note: call-site variables added as context Entries: - distributedcontext.NewContext(ctx, anotherKey.String("xyz")), - commonLabels, + meter.RecordBatch( + // Note: call-site variables added as context Entries: + distributedcontext.NewContext(ctx, anotherKey.String("xyz")), + commonLabels, - oneMetric.Measurement(1.0), - measureTwo.Measurement(2.0), - ) - } + oneMetric.Measurement(1.0), + measureTwo.Measurement(2.0), + ) return tracer.WithSpan( ctx, diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index 6c330443c6d..6fed44cb920 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -22,7 +22,6 @@ import ( "strings" "time" - "go.opentelemetry.io/otel/api/core" "go.opentelemetry.io/otel/sdk/export" "go.opentelemetry.io/otel/sdk/metric/aggregator" ) @@ -77,7 +76,10 @@ func New(options Options) *Exporter { func (e *Exporter) Export(_ context.Context, producer export.MetricProducer) { var batch expoBatch - producer.Foreach(func(agg export.MetricAggregator, desc *export.Descriptor, labels []core.KeyValue) { + producer.Foreach(func(agg export.MetricAggregator, record export.ProducedRecord) { + desc := record.Descriptor + labels := record.Labels // HERE TODO + var expose expoLine if sum, ok := agg.(aggregator.Sum); ok { expose.Sum = sum.Sum().Emit(desc.NumberKind()) @@ -105,18 +107,7 @@ func (e *Exporter) Export(_ context.Context, producer export.MetricProducer) { if len(labels) > 0 { sb.WriteRune('{') - } - - for i, label := range labels { - if i > 0 { - sb.WriteRune(',') - } - sb.WriteString(string(label.Key)) - sb.WriteRune('=') - sb.WriteString(label.Value.Emit()) - } - - if len(labels) > 0 { + sb.WriteString(record.EncodedLabels) sb.WriteRune('}') } diff --git a/sdk/export/metric.go b/sdk/export/metric.go index 4c9771f440a..a5aa8bb01a1 100644 --- a/sdk/export/metric.go +++ b/sdk/export/metric.go @@ -74,10 +74,26 @@ type MetricExporter interface { Export(context.Context, MetricProducer) } +// MetricLabelEncoder enables an optimization for export pipelines that +// use text to encode their label sets. This interface allows configuring +// the encoder used in the SDK and/or the MetricBatcher so that by the +// time the exporter is called, the same encoding may be used. +type MetricLabelEncoder interface { + EncodeLabels([]core.KeyValue) string +} + +// ProducedRecord +type ProducedRecord struct { + Descriptor *Descriptor + Labels []core.KeyValue + Encoder MetricLabelEncoder + EncodedLabels string +} + // MetricProducer allows a MetricExporter to access a checkpoint of // aggregated metrics one at a time. type MetricProducer interface { - Foreach(func(MetricAggregator, *Descriptor, []core.KeyValue)) + Foreach(func(MetricAggregator, ProducedRecord)) } // MetricKind describes the kind of instrument. diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys.go b/sdk/metric/batcher/defaultkeys/defaultkeys.go index e89ab088e9c..4528cf993bb 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys.go @@ -16,7 +16,6 @@ package defaultkeys // import "go.opentelemetry.io/otel/sdk/metric/batcher/defau import ( "context" - "strings" "go.opentelemetry.io/otel/api/core" "go.opentelemetry.io/otel/sdk/export" @@ -24,32 +23,37 @@ import ( type ( Batcher struct { - dki dkiMap - agg aggMap selector export.MetricAggregationSelector + lencoder export.MetricLabelEncoder stateful bool + dki dkiMap + agg aggMap } aggEntry struct { aggregator export.MetricAggregator descriptor *export.Descriptor - // NOTE: When only a single exporter is in use, - // there's a potential to avoid encoding the labels - // twice, since this class has to encode them once. - labels []core.KeyValue + labels []core.KeyValue + encoded string } dkiMap map[*export.Descriptor]map[core.Key]int aggMap map[string]aggEntry + + producer struct { + aggMap aggMap + lencoder export.MetricLabelEncoder + } ) var _ export.MetricBatcher = &Batcher{} -var _ export.MetricProducer = aggMap{} +var _ export.MetricProducer = &producer{} -func New(selector export.MetricAggregationSelector, stateful bool) *Batcher { +func New(selector export.MetricAggregationSelector, lencoder export.MetricLabelEncoder, stateful bool) *Batcher { return &Batcher{ selector: selector, + lencoder: lencoder, dki: dkiMap{}, agg: aggMap{}, stateful: stateful, @@ -84,6 +88,9 @@ func (b *Batcher) Process(_ context.Context, record export.MetricRecord, agg exp canon[i] = key.String("") } + // Note also the possibility to speed this computation of + // "encoded" via "canon" in the form of a (Descriptor, + // LabelSet)->(Labels, Encoded) cache. for _, kv := range record.Labels() { pos, ok := ki[kv.Key] if !ok { @@ -93,28 +100,7 @@ func (b *Batcher) Process(_ context.Context, record export.MetricRecord, agg exp } // Compute an encoded lookup key. - // - // Note the opportunity to use an export-specific - // representation here, then avoid recomputing it in the - // exporter. For example, depending on the exporter, we could - // use an OpenMetrics representation, a statsd representation, - // etc. This only benefits a single exporter, of course. - // - // Note also the possibility to speed this computation of - // "encoded" from "canon" in the form of a (Descriptor, - // LabelSet)->Encoded cache. - var sb strings.Builder - for i := 0; i < len(keys); i++ { - sb.WriteString(string(keys[i])) - sb.WriteRune('=') - sb.WriteString(canon[i].Value.Emit()) - - if i < len(keys)-1 { - sb.WriteRune(',') - } - } - - encoded := sb.String() + encoded := b.lencoder.EncodeLabels(canon) // Reduce dimensionality. rag, ok := b.agg[encoded] @@ -134,11 +120,20 @@ func (b *Batcher) ReadCheckpoint() export.MetricProducer { if !b.stateful { b.agg = aggMap{} } - return checkpoint + return &producer{ + aggMap: checkpoint, + lencoder: b.lencoder, + } } -func (c aggMap) Foreach(f func(export.MetricAggregator, *export.Descriptor, []core.KeyValue)) { - for _, entry := range c { - f(entry.aggregator, entry.descriptor, entry.labels) +func (p *producer) Foreach(f func(export.MetricAggregator, export.ProducedRecord)) { + for encoded, entry := range p.aggMap { + pr := export.ProducedRecord{ + Descriptor: entry.descriptor, + Labels: entry.labels, + Encoder: p.lencoder, + EncodedLabels: encoded, + } + f(entry.aggregator, pr) } } diff --git a/sdk/metric/batcher/ungrouped/ungrouped.go b/sdk/metric/batcher/ungrouped/ungrouped.go index 46da5c0df3e..350e1759c0a 100644 --- a/sdk/metric/batcher/ungrouped/ungrouped.go +++ b/sdk/metric/batcher/ungrouped/ungrouped.go @@ -81,8 +81,12 @@ func (b *Batcher) ReadCheckpoint() export.MetricProducer { return checkpoint } -func (c batchMap) Foreach(f func(export.MetricAggregator, *export.Descriptor, []core.KeyValue)) { +func (c batchMap) Foreach(f func(export.MetricAggregator, export.ProducedRecord)) { for key, value := range c { - f(value.aggregator, key.descriptor, value.labels) + pr := export.ProducedRecord{ + Descriptor: key.descriptor, + Labels: value.labels, + } + f(value.aggregator, pr) } } diff --git a/sdk/metric/benchmark_test.go b/sdk/metric/benchmark_test.go index f56e6f96aa2..f39bd4a9133 100644 --- a/sdk/metric/benchmark_test.go +++ b/sdk/metric/benchmark_test.go @@ -41,7 +41,7 @@ func newFixture(b *testing.B) *benchFixture { bf := &benchFixture{ B: b, } - bf.sdk = sdk.New(bf) + bf.sdk = sdk.New(bf, sdk.DefaultLabelEncoder()) return bf } diff --git a/sdk/metric/controller/push/push.go b/sdk/metric/controller/push/push.go index f75a91af015..234aec346f4 100644 --- a/sdk/metric/controller/push/push.go +++ b/sdk/metric/controller/push/push.go @@ -40,8 +40,14 @@ var _ metric.Provider = &Controller{} // using the provider batcher, exporter, period. The batcher itself // is configured with aggregation policy selection. func New(batcher export.MetricBatcher, exporter export.MetricExporter, period time.Duration) *Controller { + lencoder, _ := exporter.(export.MetricLabelEncoder) + + if lencoder == nil { + lencoder = sdk.DefaultLabelEncoder() + } + return &Controller{ - sdk: sdk.New(batcher), + sdk: sdk.New(batcher, lencoder), batcher: batcher, exporter: exporter, ticker: time.NewTicker(period), diff --git a/sdk/metric/labelencoder.go b/sdk/metric/labelencoder.go new file mode 100644 index 00000000000..987208c89e5 --- /dev/null +++ b/sdk/metric/labelencoder.go @@ -0,0 +1,56 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "bytes" + "sync" + + "go.opentelemetry.io/otel/api/core" + "go.opentelemetry.io/otel/sdk/export" +) + +type defaultLabelEncoder struct { + // pool is a pool of labelset builders. + pool sync.Pool // *bytes.Buffer +} + +var _ export.MetricLabelEncoder = &defaultLabelEncoder{} + +func DefaultLabelEncoder() export.MetricLabelEncoder { + return &defaultLabelEncoder{ + pool: sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, + }, + } +} + +func (d *defaultLabelEncoder) EncodeLabels(labels []core.KeyValue) string { + buf := d.pool.Get().(*bytes.Buffer) + defer d.pool.Put(buf) + buf.Reset() + + for i, kv := range labels { + if i > 0 { + _, _ = buf.WriteRune(',') + } + _, _ = buf.WriteString(string(kv.Key)) + _, _ = buf.WriteRune('=') + _, _ = buf.WriteString(kv.Value.Emit()) + } + return buf.String() +} diff --git a/sdk/metric/monotone_test.go b/sdk/metric/monotone_test.go index f02564a149e..950ec1e299a 100644 --- a/sdk/metric/monotone_test.go +++ b/sdk/metric/monotone_test.go @@ -65,7 +65,7 @@ func TestMonotoneGauge(t *testing.T) { batcher := &monotoneBatcher{ t: t, } - sdk := sdk.New(batcher) + sdk := sdk.New(batcher, sdk.DefaultLabelEncoder()) gauge := sdk.NewInt64Gauge("my.gauge.name", metric.WithMonotonic(true)) diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 53932a9d15f..50fe94eadef 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -15,7 +15,6 @@ package metric import ( - "bytes" "context" "sort" "sync" @@ -34,16 +33,13 @@ type ( // // The SDK supports a Collect() API to gather and export // current data. Collect() should be arranged according to - // the exporter model. Push-based exporters will setup a - // timer to call Collect() periodically. Pull-based exporters + // the batcher model. Push-based batchers will setup a + // timer to call Collect() periodically. Pull-based batchers // will call Collect() when a pull request arrives. SDK struct { // current maps `mapkey` to *record. current sync.Map - // pool is a pool of labelset builders. - pool sync.Pool // *bytes.Buffer - // empty is the (singleton) result of Labels() // w/ zero arguments. empty labels @@ -56,8 +52,11 @@ type ( // incremented in `Collect()`. currentEpoch int64 - // exporter is the configured exporter+configuration. - exporter export.MetricBatcher + // batcher is the configured batcher+configuration. + batcher export.MetricBatcher + + // lencoder determines how labels are uniquely encoded. + lencoder export.MetricLabelEncoder // collectLock prevents simultaneous calls to Collect(). collectLock sync.Mutex @@ -119,7 +118,7 @@ type ( // recorder implements the actual RecordOne() API, // depending on the type of aggregation. If nil, the - // metric was disabled by the exporter. + // metric was disabled by the batcher. recorder export.MetricAggregator // next contains the next pointer for both the primary @@ -182,7 +181,7 @@ func (i *instrument) acquireHandle(ls *labels) *record { atomic.AddInt64(&rec.refcount, 1) return rec } - rec.recorder = i.meter.exporter.AggregatorFor(rec) + rec.recorder = i.meter.batcher.AggregatorFor(rec) i.meter.addPrimary(rec) return rec @@ -200,23 +199,19 @@ func (i *instrument) RecordOne(ctx context.Context, number core.Number, ls api.L h.RecordOne(ctx, number) } -// New constructs a new SDK for the given exporter. This SDK supports -// only a single exporter. +// New constructs a new SDK for the given batcher. This SDK supports +// only a single batcher. // // The SDK does not start any background process to collect itself -// periodically, this responsbility lies with the exporter, typically, +// periodically, this responsbility lies with the batcher, typically, // depending on the type of export. For example, a pull-based -// exporter will call Collect() when it receives a request to scrape -// current metric values. A push-based exporter should configure its +// batcher will call Collect() when it receives a request to scrape +// current metric values. A push-based batcher should configure its // own periodic collection. -func New(exporter export.MetricBatcher) *SDK { +func New(batcher export.MetricBatcher, lencoder export.MetricLabelEncoder) *SDK { m := &SDK{ - pool: sync.Pool{ - New: func() interface{} { - return &bytes.Buffer{} - }, - }, - exporter: exporter, + batcher: batcher, + lencoder: lencoder, } m.empty.meter = m return m @@ -228,9 +223,9 @@ func (m *SDK) Labels(kvs ...core.KeyValue) api.LabelSet { // Note: This computes a canonical encoding of the labels to // use as a map key. It happens to use the encoding used by // statsd for labels, allowing an optimization for statsd - // exporters. This could be made configurable in the + // batchers. This could be made configurable in the // constructor, to support the same optimization for different - // exporters. + // batchers. // Check for empty set. if len(kvs) == 0 { @@ -251,24 +246,12 @@ func (m *SDK) Labels(kvs ...core.KeyValue) api.LabelSet { } sorted = sorted[0:oi] - // Serialize. - buf := m.pool.Get().(*bytes.Buffer) - defer m.pool.Put(buf) - buf.Reset() - _, _ = buf.WriteRune('|') - delimiter := '#' - for _, kv := range sorted { - _, _ = buf.WriteRune(delimiter) - _, _ = buf.WriteString(string(kv.Key)) - _, _ = buf.WriteRune(':') - _, _ = buf.WriteString(kv.Value.Emit()) - delimiter = ',' - } + encoded := m.lencoder.EncodeLabels(sorted) return &labels{ meter: m, sorted: sorted, - encoded: buf.String(), + encoded: encoded, } } @@ -409,7 +392,7 @@ func (m *SDK) Collect(ctx context.Context) { func (m *SDK) collect(ctx context.Context, r *record) { if r.recorder != nil { - r.recorder.Collect(ctx, r, m.exporter) + r.recorder.Collect(ctx, r, m.batcher) } } diff --git a/sdk/metric/stress_test.go b/sdk/metric/stress_test.go index 769a1ac8857..1096643ff89 100644 --- a/sdk/metric/stress_test.go +++ b/sdk/metric/stress_test.go @@ -276,7 +276,7 @@ func stressTest(t *testing.T, impl testImpl) { lused: map[string]bool{}, } cc := concurrency() - sdk := sdk.New(fixture) + sdk := sdk.New(fixture, sdk.DefaultLabelEncoder()) fixture.wg.Add(cc + 1) for i := 0; i < cc; i++ { From 88a236eb5596a80bf4322cd2cffc595e3017cf9e Mon Sep 17 00:00:00 2001 From: jmacd Date: Sun, 3 Nov 2019 23:41:00 -0800 Subject: [PATCH 20/73] Lint --- sdk/metric/batcher/defaultkeys/defaultkeys.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys.go b/sdk/metric/batcher/defaultkeys/defaultkeys.go index 4528cf993bb..37e15344f4f 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys.go @@ -33,9 +33,7 @@ type ( aggEntry struct { aggregator export.MetricAggregator descriptor *export.Descriptor - - labels []core.KeyValue - encoded string + labels []core.KeyValue } dkiMap map[*export.Descriptor]map[core.Key]int From db41c9de404df6938c7b5233630119f768f9a9c6 Mon Sep 17 00:00:00 2001 From: jmacd Date: Tue, 5 Nov 2019 15:36:33 -0800 Subject: [PATCH 21/73] Doc --- sdk/metric/doc.go | 144 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 102 insertions(+), 42 deletions(-) diff --git a/sdk/metric/doc.go b/sdk/metric/doc.go index 8d5a4285983..479d019b66d 100644 --- a/sdk/metric/doc.go +++ b/sdk/metric/doc.go @@ -14,47 +14,107 @@ /* -Package metric implements the OpenTelemetry `Meter` API. The SDK -supports configurable metrics export behavior through a -`export.MetricBatcher` API. Most metrics behavior is controlled -by the `MetricBatcher`, including: - -1. Selecting the concrete type of aggregation to use -2. Receiving exported data during SDK.Collect() - -The call to SDK.Collect() initiates collection. The SDK calls the -`MetricBatcher` for each current record, asking the aggregator to -export itself. Aggregators, found in `./aggregators`, are responsible -for receiving updates and exporting their current state. - -The SDK.Collect() API should be called by an exporter. During the -call to Collect(), the exporter receives calls in a single-threaded -context. No locking is required because the SDK.Collect() call -prevents concurrency. - -The SDK uses lock-free algorithms to maintain its internal state. -There are three central data structures at work: - -1. A sync.Map maps unique (InstrumentID, LabelSet) to records -2. A "primary" atomic list of records -3. A "reclaim" atomic list of records - -Collection is oriented around epochs. The SDK internally has a -notion of the "current" epoch, which is incremented each time -Collect() is called. Records contain two atomic counter values, -the epoch in which it was last modified and the epoch in which it -was last collected. Records may be garbage collected when the -epoch in which they were last updated is less than the epoch in -which they were last collected. - -Collect() performs a record-by-record scan of all active records -and exports their current state, before incrementing the current -epoch. Collection events happen at a point in time during -`Collect()`, but all records are not collected in the same instant. - -The purpose of the two lists: the primary list is appended-to when -new handles are created and atomically cleared during collect. The -reclaim list is used as a second chance, in case there is a race -between looking up a record and record deletion. + Package metric implements the OpenTelemetry metric.Meter API. The + SDK supports configurable metrics export behavior through a + collection of export interfaces that support various export + strategies, described below. + + The metric.Meter API consists of methods for constructing each of + the basic kinds of metric instrument. There are six types of + instrument available to the end user, comprised of three basic + kinds of metric instrument (Counter, Gauge, Measure) crossed with + two kinds of number (int64, float64). + + The API assists the SDK by consolidating the variety of metric + instruments into a narrower interface, allowing the SDK to avoid + repetition of boilerplate. The API and SDK are separated such + that an event reacheing the SDK has a uniform structure: an + instrument, a label set, and a numerical value. + + To this end, the API uses a core.Number type to represent either + an int64 or a float64, depending on the instrument's definition. + A single implementation interface is used for instruments, + metric.InstrumentImpl, and a single implementation interface is + used for handles, metric.HandleImpl. + + There are three entry points for events in the Metrics API: via + instrument handles, via direct instrument calls, and via + BatchRecord. The SDK is designed with handles as the primary + entry point, the other two entry points are implemented in terms + of short-lived handles. For example, the implementation of a + direct call allocates a handle, operates on the handle, and + releases the handle. Similarly, the implementation of + RecordBatch uses a short-lived handle for each measurement in + the batch. + + Internal Structure + + The SDK is designed with minimal use of locking, to avoid adding + contention for user-level code. For each handle, whether it is + held by user-level code or a short-lived device, there exists an + internal record managed by the SDK. Each internal record + corresponds to a specific instrument and label set combination. + + A sync.Map maintains the mapping of current instruments and label + sets to internal records. To create a new handle, the SDK + consults the Map to locate an existing record, otherwise it + constructs a new record. The SDK maintains a count of the number + of references to each record, ensuring that records are not + reclaimed from the Map while they are still active from the user's + perspective. + + Metric collection is performed via a single-threaded call to + Collect that sweeps through all records in the SDK, checkpointing + their state. When a record is discovered that has no references + and has not been updated since the prior collection pass, it is + marked for reclamation and removed from the Map. There exists, at + this moment, a race condition since another goroutine could, in + the same instant, obtain a reference to the handle. + + The SDK is designed to tolerate this sort of race condition, in + the name of reducing lock contention. It is possible for more + than one record with identical instrument and label set to exist + simultaneously, though only one can be linked from the Map at a + time. To avoid lost updates, the SDK maintains two additional + linked lists of records, one managed by the collection code path + and one managed by the instrumentation code path. + + The SDK maintains a current epoch number, corresponding to the + number of completed collections. Each record contains the last + epoch during which it was collected and updated. These variables + allow the collection code path to detect stale records while + allowing the instrumentation code path to detect potential + reclamations. When the instrumentation code path detects a + potential reclamation, it adds itself to the second linked list, + where records are saved from reclamation. + + Each record has an associated aggregator, which maintains the + current state resulting from all metric events since its last + checkpoint. Aggregators may be lock-free or they may use locking, + but they should expect to be called concurrently. Because of the + tolerated race condition described above, aggregators must be + capable of merging with another aggregator of the same type. + + Export Pipeline + + While the SDK serves to maintain a current set of records and + coordinate collection, the behavior of a metrics export pipeline + is configured through the export types in + go.opentelemetry.io/otel/sdk/export/metric. They are briefly + summarized here: + + Aggregator: a specific algorithm for combining metric events + AggregationSelector: decides which aggregator to use + Batcher: determine the aggregation dimensions, group (and de-dup) records + Descriptor: summarizes an instrument and its metadata + Record: interface to the SDK-internal record + LabelEncoder: defines a unique mapping from label set to encoded string + Producer: interface to the batcher's checkpoint + ProducedRecord: result of the batcher's grouping + Exporter: output produced records to their final destination + + One final type, a Controller, implements the metric.MeterProvider + interface and is responsible for initiating collection. + */ package metric // import "go.opentelemetry.io/otel/sdk/metric" From 0bc5ffed9c226b0514122cc3dd3a636c2efc3a23 Mon Sep 17 00:00:00 2001 From: jmacd Date: Tue, 5 Nov 2019 16:05:54 -0800 Subject: [PATCH 22/73] Precommit/lint --- go.sum | 1 - sdk/metric/doc.go | 195 ++++++++++++++++++++++------------------------ 2 files changed, 93 insertions(+), 103 deletions(-) diff --git a/go.sum b/go.sum index bc0be47acd3..cadf1b265e7 100644 --- a/go.sum +++ b/go.sum @@ -277,7 +277,6 @@ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opentelemetry.io v0.1.0 h1:EANZoRCOP+A3faIlw/iN6YEWoYb1vleZRKm1EvH8T48= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= diff --git a/sdk/metric/doc.go b/sdk/metric/doc.go index 479d019b66d..3088b55760c 100644 --- a/sdk/metric/doc.go +++ b/sdk/metric/doc.go @@ -13,108 +13,99 @@ // limitations under the License. /* - - Package metric implements the OpenTelemetry metric.Meter API. The - SDK supports configurable metrics export behavior through a - collection of export interfaces that support various export - strategies, described below. - - The metric.Meter API consists of methods for constructing each of - the basic kinds of metric instrument. There are six types of - instrument available to the end user, comprised of three basic - kinds of metric instrument (Counter, Gauge, Measure) crossed with - two kinds of number (int64, float64). - - The API assists the SDK by consolidating the variety of metric - instruments into a narrower interface, allowing the SDK to avoid - repetition of boilerplate. The API and SDK are separated such - that an event reacheing the SDK has a uniform structure: an - instrument, a label set, and a numerical value. - - To this end, the API uses a core.Number type to represent either - an int64 or a float64, depending on the instrument's definition. - A single implementation interface is used for instruments, - metric.InstrumentImpl, and a single implementation interface is - used for handles, metric.HandleImpl. - - There are three entry points for events in the Metrics API: via - instrument handles, via direct instrument calls, and via - BatchRecord. The SDK is designed with handles as the primary - entry point, the other two entry points are implemented in terms - of short-lived handles. For example, the implementation of a - direct call allocates a handle, operates on the handle, and - releases the handle. Similarly, the implementation of - RecordBatch uses a short-lived handle for each measurement in - the batch. - - Internal Structure - - The SDK is designed with minimal use of locking, to avoid adding - contention for user-level code. For each handle, whether it is - held by user-level code or a short-lived device, there exists an - internal record managed by the SDK. Each internal record - corresponds to a specific instrument and label set combination. - - A sync.Map maintains the mapping of current instruments and label - sets to internal records. To create a new handle, the SDK - consults the Map to locate an existing record, otherwise it - constructs a new record. The SDK maintains a count of the number - of references to each record, ensuring that records are not - reclaimed from the Map while they are still active from the user's - perspective. - - Metric collection is performed via a single-threaded call to - Collect that sweeps through all records in the SDK, checkpointing - their state. When a record is discovered that has no references - and has not been updated since the prior collection pass, it is - marked for reclamation and removed from the Map. There exists, at - this moment, a race condition since another goroutine could, in - the same instant, obtain a reference to the handle. - - The SDK is designed to tolerate this sort of race condition, in - the name of reducing lock contention. It is possible for more - than one record with identical instrument and label set to exist - simultaneously, though only one can be linked from the Map at a - time. To avoid lost updates, the SDK maintains two additional - linked lists of records, one managed by the collection code path - and one managed by the instrumentation code path. - - The SDK maintains a current epoch number, corresponding to the - number of completed collections. Each record contains the last - epoch during which it was collected and updated. These variables - allow the collection code path to detect stale records while - allowing the instrumentation code path to detect potential - reclamations. When the instrumentation code path detects a - potential reclamation, it adds itself to the second linked list, - where records are saved from reclamation. - - Each record has an associated aggregator, which maintains the - current state resulting from all metric events since its last - checkpoint. Aggregators may be lock-free or they may use locking, - but they should expect to be called concurrently. Because of the - tolerated race condition described above, aggregators must be - capable of merging with another aggregator of the same type. - - Export Pipeline - - While the SDK serves to maintain a current set of records and - coordinate collection, the behavior of a metrics export pipeline - is configured through the export types in - go.opentelemetry.io/otel/sdk/export/metric. They are briefly - summarized here: - - Aggregator: a specific algorithm for combining metric events - AggregationSelector: decides which aggregator to use - Batcher: determine the aggregation dimensions, group (and de-dup) records - Descriptor: summarizes an instrument and its metadata - Record: interface to the SDK-internal record - LabelEncoder: defines a unique mapping from label set to encoded string - Producer: interface to the batcher's checkpoint - ProducedRecord: result of the batcher's grouping - Exporter: output produced records to their final destination - - One final type, a Controller, implements the metric.MeterProvider - interface and is responsible for initiating collection. +Package metric implements the OpenTelemetry metric.Meter API. The SDK +supports configurable metrics export behavior through a collection of +export interfaces that support various export strategies, described below. + +The metric.Meter API consists of methods for constructing each of the +basic kinds of metric instrument. There are six types of instrument +available to the end user, comprised of three basic kinds of metric +instrument (Counter, Gauge, Measure) crossed with two kinds of number +(int64, float64). + +The API assists the SDK by consolidating the variety of metric instruments +into a narrower interface, allowing the SDK to avoid repetition of +boilerplate. The API and SDK are separated such that an event reacheing +the SDK has a uniform structure: an instrument, a label set, and a +numerical value. + +To this end, the API uses a core.Number type to represent either an int64 +or a float64, depending on the instrument's definition. A single +implementation interface is used for instruments, metric.InstrumentImpl, +and a single implementation interface is used for handles, +metric.HandleImpl. + +There are three entry points for events in the Metrics API: via instrument +handles, via direct instrument calls, and via BatchRecord. The SDK is +designed with handles as the primary entry point, the other two entry +points are implemented in terms of short-lived handles. For example, the +implementation of a direct call allocates a handle, operates on the +handle, and releases the handle. Similarly, the implementation of +RecordBatch uses a short-lived handle for each measurement in the batch. + +Internal Structure + +The SDK is designed with minimal use of locking, to avoid adding +contention for user-level code. For each handle, whether it is held by +user-level code or a short-lived device, there exists an internal record +managed by the SDK. Each internal record corresponds to a specific +instrument and label set combination. + +A sync.Map maintains the mapping of current instruments and label sets to +internal records. To create a new handle, the SDK consults the Map to +locate an existing record, otherwise it constructs a new record. The SDK +maintains a count of the number of references to each record, ensuring +that records are not reclaimed from the Map while they are still active +from the user's perspective. + +Metric collection is performed via a single-threaded call to Collect that +sweeps through all records in the SDK, checkpointing their state. When a +record is discovered that has no references and has not been updated since +the prior collection pass, it is marked for reclamation and removed from +the Map. There exists, at this moment, a race condition since another +goroutine could, in the same instant, obtain a reference to the handle. + +The SDK is designed to tolerate this sort of race condition, in the name +of reducing lock contention. It is possible for more than one record with +identical instrument and label set to exist simultaneously, though only +one can be linked from the Map at a time. To avoid lost updates, the SDK +maintains two additional linked lists of records, one managed by the +collection code path and one managed by the instrumentation code path. + +The SDK maintains a current epoch number, corresponding to the number of +completed collections. Each record contains the last epoch during which +it was collected and updated. These variables allow the collection code +path to detect stale records while allowing the instrumentation code path +to detect potential reclamations. When the instrumentation code path +detects a potential reclamation, it adds itself to the second linked list, +where records are saved from reclamation. + +Each record has an associated aggregator, which maintains the current +state resulting from all metric events since its last checkpoint. +Aggregators may be lock-free or they may use locking, but they should +expect to be called concurrently. Because of the tolerated race condition +described above, aggregators must be capable of merging with another +aggregator of the same type. + +Export Pipeline + +While the SDK serves to maintain a current set of records and coordinate +collection, the behavior of a metrics export pipeline is configured +through the export types in go.opentelemetry.io/otel/sdk/export/metric. +They are briefly summarized here: + +Aggregator: a specific algorithm for combining metric events +AggregationSelector: decides which aggregator to use +Batcher: determine the aggregation dimensions, group (and de-dup) records +Descriptor: summarizes an instrument and its metadata +Record: interface to the SDK-internal record +LabelEncoder: defines a unique mapping from label set to encoded string +Producer: interface to the batcher's checkpoint +ProducedRecord: result of the batcher's grouping +Exporter: output produced records to their final destination + +One final type, a Controller, implements the metric.MeterProvider +interface and is responsible for initiating collection. */ package metric // import "go.opentelemetry.io/otel/sdk/metric" From c575b083819d5cfa0bd0b3747e68e4a453507407 Mon Sep 17 00:00:00 2001 From: jmacd Date: Tue, 5 Nov 2019 16:36:39 -0800 Subject: [PATCH 23/73] Simplify Aggregator API --- go.sum | 1 + sdk/export/metric/metric.go | 12 +++++----- sdk/metric/aggregator/array/array.go | 4 +--- sdk/metric/aggregator/array/array_test.go | 18 +++++++------- sdk/metric/aggregator/counter/counter.go | 10 ++++---- sdk/metric/aggregator/counter/counter_test.go | 18 +++++++------- sdk/metric/aggregator/ddsketch/ddsketch.go | 6 ++--- .../aggregator/ddsketch/ddsketch_test.go | 10 ++++---- sdk/metric/aggregator/gauge/gauge.go | 8 +++---- sdk/metric/aggregator/gauge/gauge_test.go | 24 +++++++++---------- sdk/metric/aggregator/maxsumcount/msc.go | 6 ++--- sdk/metric/aggregator/maxsumcount/msc_test.go | 10 ++++---- sdk/metric/aggregator/sampling/sampling.go | 19 --------------- sdk/metric/aggregator/test/test.go | 20 ++-------------- sdk/metric/sdk.go | 11 +++++---- 15 files changed, 67 insertions(+), 110 deletions(-) delete mode 100644 sdk/metric/aggregator/sampling/sampling.go diff --git a/go.sum b/go.sum index cadf1b265e7..bc0be47acd3 100644 --- a/go.sum +++ b/go.sum @@ -277,6 +277,7 @@ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opentelemetry.io v0.1.0 h1:EANZoRCOP+A3faIlw/iN6YEWoYb1vleZRKm1EvH8T48= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index dd72e2dbe19..558ce893dc5 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -62,14 +62,14 @@ type AggregationSelector interface { // a counter, a gauge, a histogram. type Aggregator interface { // Update receives a new measured value and incorporates it - // into the aggregation. + // into the aggregation. Update() calls may arrive + // concurrently. Update(context.Context, core.Number, Record) - // Collect is called during the SDK Collect() to - // finish one period of aggregation. Collect() is - // called in a single-threaded context. Update() - // calls may arrive concurrently. - Collect(context.Context, Record, Batcher) + // Checkpoint is called during the SDK Collect() to finish one + // period of aggregation. Checkpoint() is called in a + // single-threaded context. + Checkpoint(context.Context, Record) // Merge combines state from two aggregators into one. Merge(Aggregator, *Descriptor) diff --git a/sdk/metric/aggregator/array/array.go b/sdk/metric/aggregator/array/array.go index 2640a182143..2f31c65e137 100644 --- a/sdk/metric/aggregator/array/array.go +++ b/sdk/metric/aggregator/array/array.go @@ -68,7 +68,7 @@ func (c *Aggregator) Quantile(q float64) (core.Number, error) { return c.checkpoint.Quantile(q) } -func (c *Aggregator) Collect(ctx context.Context, rec export.Record, exp export.Batcher) { +func (c *Aggregator) Checkpoint(ctx context.Context, rec export.Record) { c.lock.Lock() c.checkpoint, c.current = c.current, nil c.lock.Unlock() @@ -83,8 +83,6 @@ func (c *Aggregator) Collect(ctx context.Context, rec export.Record, exp export. for _, v := range c.checkpoint { c.ckptSum.AddNumber(kind, v) } - - exp.Process(ctx, rec, c) } func (c *Aggregator) Update(_ context.Context, number core.Number, rec export.Record) { diff --git a/sdk/metric/aggregator/array/array_test.go b/sdk/metric/aggregator/array/array_test.go index d2099cbc23a..ef814943b63 100644 --- a/sdk/metric/aggregator/array/array_test.go +++ b/sdk/metric/aggregator/array/array_test.go @@ -36,7 +36,7 @@ type updateTest struct { func (ut *updateTest) run(t *testing.T, profile test.Profile) { ctx := context.Background() - batcher, record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !ut.absolute) + record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !ut.absolute) agg := New() @@ -54,7 +54,7 @@ func (ut *updateTest) run(t *testing.T, profile test.Profile) { } } - agg.Collect(ctx, record, batcher) + agg.Checkpoint(ctx, record) all.Sort() @@ -106,7 +106,7 @@ type mergeTest struct { func (mt *mergeTest) run(t *testing.T, profile test.Profile) { ctx := context.Background() - batcher, record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !mt.absolute) + record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !mt.absolute) agg1 := New() agg2 := New() @@ -133,8 +133,8 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { } } - agg1.Collect(ctx, record, batcher) - agg2.Collect(ctx, record, batcher) + agg1.Checkpoint(ctx, record) + agg2.Checkpoint(ctx, record) agg1.Merge(agg2, record.Descriptor()) @@ -198,14 +198,14 @@ func TestArrayErrors(t *testing.T) { ctx := context.Background() - batcher, record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, false) + record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, false) agg.Update(ctx, core.Number(0), record) if profile.NumberKind == core.Float64NumberKind { agg.Update(ctx, core.NewFloat64Number(math.NaN()), record) } - agg.Collect(ctx, record, batcher) + agg.Checkpoint(ctx, record) require.Equal(t, int64(1), agg.Count(), "NaN value was not counted") @@ -226,7 +226,7 @@ func TestArrayErrors(t *testing.T) { func TestArrayFloat64(t *testing.T) { for _, absolute := range []bool{false, true} { t.Run(fmt.Sprint("Absolute=", absolute), func(t *testing.T) { - batcher, record := test.NewAggregatorTest(export.MeasureKind, core.Float64NumberKind, !absolute) + record := test.NewAggregatorTest(export.MeasureKind, core.Float64NumberKind, !absolute) fpsf := func(sign int) []float64 { // Check behavior of a bunch of odd floating @@ -273,7 +273,7 @@ func TestArrayFloat64(t *testing.T) { } } - agg.Collect(ctx, record, batcher) + agg.Checkpoint(ctx, record) all.Sort() diff --git a/sdk/metric/aggregator/counter/counter.go b/sdk/metric/aggregator/counter/counter.go index d36123a6660..ec6df202dbe 100644 --- a/sdk/metric/aggregator/counter/counter.go +++ b/sdk/metric/aggregator/counter/counter.go @@ -26,7 +26,7 @@ type Aggregator struct { // current holds current increments to this counter record current core.Number - // checkpoint is a temporary used during Collect() + // checkpoint is a temporary used during Checkpoint() checkpoint core.Number } @@ -40,14 +40,12 @@ func New() *Aggregator { // Sum returns the accumulated count as a Number. func (c *Aggregator) Sum() core.Number { - return c.checkpoint.AsNumber() + return c.checkpoint } -// Collect checkpoints the current value (atomically) and exports it. -func (c *Aggregator) Collect(ctx context.Context, rec export.Record, exp export.Batcher) { +// Checkpoint checkpoints the current value (atomically) and exports it. +func (c *Aggregator) Checkpoint(ctx context.Context, rec export.Record) { c.checkpoint = c.current.SwapNumberAtomic(core.Number(0)) - - exp.Process(ctx, rec, c) } // Update modifies the current value (atomically) for later export. diff --git a/sdk/metric/aggregator/counter/counter_test.go b/sdk/metric/aggregator/counter/counter_test.go index 7cc598d8d5b..1c028036127 100644 --- a/sdk/metric/aggregator/counter/counter_test.go +++ b/sdk/metric/aggregator/counter/counter_test.go @@ -33,7 +33,7 @@ func TestCounterMonotonic(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { agg := New() - batcher, record := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false) + record := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false) sum := core.Number(0) for i := 0; i < count; i++ { @@ -42,7 +42,7 @@ func TestCounterMonotonic(t *testing.T) { agg.Update(ctx, x, record) } - agg.Collect(ctx, record, batcher) + agg.Checkpoint(ctx, record) require.Equal(t, sum, agg.Sum(), "Same sum - monotonic") }) @@ -54,7 +54,7 @@ func TestCounterMonotonicNegative(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { agg := New() - batcher, record := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false) + record := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false) for i := 0; i < count; i++ { agg.Update(ctx, profile.Random(-1), record) @@ -62,7 +62,7 @@ func TestCounterMonotonicNegative(t *testing.T) { sum := profile.Random(+1) agg.Update(ctx, sum, record) - agg.Collect(ctx, record, batcher) + agg.Checkpoint(ctx, record) require.Equal(t, sum, agg.Sum(), "Same sum - monotonic") }) @@ -74,7 +74,7 @@ func TestCounterNonMonotonic(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { agg := New() - batcher, record := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, true) + record := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, true) sum := core.Number(0) for i := 0; i < count; i++ { @@ -86,7 +86,7 @@ func TestCounterNonMonotonic(t *testing.T) { agg.Update(ctx, y, record) } - agg.Collect(ctx, record, batcher) + agg.Checkpoint(ctx, record) require.Equal(t, sum, agg.Sum(), "Same sum - monotonic") }) @@ -99,7 +99,7 @@ func TestCounterMerge(t *testing.T) { agg1 := New() agg2 := New() - batcher, record := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false) + record := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false) sum := core.Number(0) for i := 0; i < count; i++ { @@ -109,8 +109,8 @@ func TestCounterMerge(t *testing.T) { agg2.Update(ctx, x, record) } - agg1.Collect(ctx, record, batcher) - agg2.Collect(ctx, record, batcher) + agg1.Checkpoint(ctx, record) + agg2.Checkpoint(ctx, record) agg1.Merge(agg2, record.Descriptor()) diff --git a/sdk/metric/aggregator/ddsketch/ddsketch.go b/sdk/metric/aggregator/ddsketch/ddsketch.go index d2995cacaec..12019987658 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch.go @@ -91,16 +91,14 @@ func (c *Aggregator) toNumber(f float64) core.Number { return core.NewInt64Number(int64(f)) } -// Collect checkpoints the current value (atomically) and exports it. -func (c *Aggregator) Collect(ctx context.Context, rec export.Record, exp export.Batcher) { +// Checkpoint checkpoints the current value (atomically) and exports it. +func (c *Aggregator) Checkpoint(ctx context.Context, rec export.Record) { replace := sdk.NewDDSketch(c.cfg) c.lock.Lock() c.checkpoint = c.current c.current = replace c.lock.Unlock() - - exp.Process(ctx, rec, c) } // Update modifies the current value (atomically) for later export. diff --git a/sdk/metric/aggregator/ddsketch/ddsketch_test.go b/sdk/metric/aggregator/ddsketch/ddsketch_test.go index b3605c9dccf..c1c777abe7c 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch_test.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch_test.go @@ -34,7 +34,7 @@ type updateTest struct { func (ut *updateTest) run(t *testing.T, profile test.Profile) { ctx := context.Background() - batcher, record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !ut.absolute) + record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !ut.absolute) agg := New(NewDefaultConfig(), record.Descriptor()) all := test.NewNumbers(profile.NumberKind) @@ -50,7 +50,7 @@ func (ut *updateTest) run(t *testing.T, profile test.Profile) { } } - agg.Collect(ctx, record, batcher) + agg.Checkpoint(ctx, record) all.Sort() @@ -96,7 +96,7 @@ type mergeTest struct { func (mt *mergeTest) run(t *testing.T, profile test.Profile) { ctx := context.Background() - batcher, record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !mt.absolute) + record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !mt.absolute) agg1 := New(NewDefaultConfig(), record.Descriptor()) agg2 := New(NewDefaultConfig(), record.Descriptor()) @@ -126,8 +126,8 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { } } - agg1.Collect(ctx, record, batcher) - agg2.Collect(ctx, record, batcher) + agg1.Checkpoint(ctx, record) + agg2.Checkpoint(ctx, record) agg1.Merge(agg2, record.Descriptor()) diff --git a/sdk/metric/aggregator/gauge/gauge.go b/sdk/metric/aggregator/gauge/gauge.go index 759513a5ce2..0361abb4199 100644 --- a/sdk/metric/aggregator/gauge/gauge.go +++ b/sdk/metric/aggregator/gauge/gauge.go @@ -36,7 +36,7 @@ type ( // current is an atomic pointer to *gaugeData. It is never nil. current unsafe.Pointer - // checkpoint is a copy of the current value taken in Collect() + // checkpoint is a copy of the current value taken in Checkpoint() checkpoint unsafe.Pointer } @@ -78,11 +78,9 @@ func (g *Aggregator) Timestamp() time.Time { return (*gaugeData)(g.checkpoint).timestamp } -// Collect checkpoints the current value (atomically) and exports it. -func (g *Aggregator) Collect(ctx context.Context, rec export.Record, exp export.Batcher) { +// Checkpoint checkpoints the current value (atomically) and exports it. +func (g *Aggregator) Checkpoint(ctx context.Context, rec export.Record) { g.checkpoint = atomic.LoadPointer(&g.current) - - exp.Process(ctx, rec, g) } // Update modifies the current value (atomically) for later export. diff --git a/sdk/metric/aggregator/gauge/gauge_test.go b/sdk/metric/aggregator/gauge/gauge_test.go index 3fbb91c3640..49e51da7fa0 100644 --- a/sdk/metric/aggregator/gauge/gauge_test.go +++ b/sdk/metric/aggregator/gauge/gauge_test.go @@ -36,7 +36,7 @@ func TestGaugeNonMonotonic(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { agg := New() - batcher, record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, false) + record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, false) var last core.Number for i := 0; i < count; i++ { @@ -45,7 +45,7 @@ func TestGaugeNonMonotonic(t *testing.T) { agg.Update(ctx, x, record) } - agg.Collect(ctx, record, batcher) + agg.Checkpoint(ctx, record) require.Equal(t, last, agg.LastValue(), "Same last value - non-monotonic") }) @@ -57,7 +57,7 @@ func TestGaugeMonotonic(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { agg := New() - batcher, record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, true) + record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, true) small := profile.Random(+1) last := small @@ -67,7 +67,7 @@ func TestGaugeMonotonic(t *testing.T) { agg.Update(ctx, last, record) } - agg.Collect(ctx, record, batcher) + agg.Checkpoint(ctx, record) require.Equal(t, last, agg.LastValue(), "Same last value - monotonic") }) @@ -79,7 +79,7 @@ func TestGaugeMonotonicDescending(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { agg := New() - batcher, record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, true) + record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, true) first := profile.Random(+1) agg.Update(ctx, first, record) @@ -89,7 +89,7 @@ func TestGaugeMonotonicDescending(t *testing.T) { agg.Update(ctx, x, record) } - agg.Collect(ctx, record, batcher) + agg.Checkpoint(ctx, record) require.Equal(t, first, agg.LastValue(), "Same last value - monotonic") }) @@ -102,7 +102,7 @@ func TestGaugeNormalMerge(t *testing.T) { agg1 := New() agg2 := New() - batcher, record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, false) + record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, false) first1 := profile.Random(+1) first2 := profile.Random(+1) @@ -111,8 +111,8 @@ func TestGaugeNormalMerge(t *testing.T) { agg1.Update(ctx, first1, record) agg2.Update(ctx, first2, record) - agg1.Collect(ctx, record, batcher) - agg2.Collect(ctx, record, batcher) + agg1.Checkpoint(ctx, record) + agg2.Checkpoint(ctx, record) t1 := agg1.Timestamp() t2 := agg2.Timestamp() @@ -132,7 +132,7 @@ func TestGaugeMonotonicMerge(t *testing.T) { agg1 := New() agg2 := New() - batcher, record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, true) + record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, true) first1 := profile.Random(+1) agg1.Update(ctx, first1, record) @@ -141,8 +141,8 @@ func TestGaugeMonotonicMerge(t *testing.T) { first2.AddNumber(profile.NumberKind, first1) agg2.Update(ctx, first2, record) - agg1.Collect(ctx, record, batcher) - agg2.Collect(ctx, record, batcher) + agg1.Checkpoint(ctx, record) + agg2.Checkpoint(ctx, record) agg1.Merge(agg2, record.Descriptor()) diff --git a/sdk/metric/aggregator/maxsumcount/msc.go b/sdk/metric/aggregator/maxsumcount/msc.go index 2cb689e351b..25b960c4723 100644 --- a/sdk/metric/aggregator/maxsumcount/msc.go +++ b/sdk/metric/aggregator/maxsumcount/msc.go @@ -58,8 +58,8 @@ func (c *Aggregator) Max() (core.Number, error) { return c.checkpoint.max, nil } -// Collect checkpoints the current value (atomically) and exports it. -func (c *Aggregator) Collect(ctx context.Context, rec export.Record, exp export.Batcher) { +// Checkpoint checkpoints the current value (atomically) and exports it. +func (c *Aggregator) Checkpoint(ctx context.Context, rec export.Record) { // N.B. There is no atomic operation that can update all three // values at once without a memory allocation. // @@ -73,8 +73,6 @@ func (c *Aggregator) Collect(ctx context.Context, rec export.Record, exp export. c.checkpoint.count.SetUint64(c.current.count.SwapUint64Atomic(0)) c.checkpoint.sum = c.current.sum.SwapNumberAtomic(core.Number(0)) c.checkpoint.max = c.current.max.SwapNumberAtomic(core.Number(0)) - - exp.Process(ctx, rec, c) } // Update modifies the current value (atomically) for later export. diff --git a/sdk/metric/aggregator/maxsumcount/msc_test.go b/sdk/metric/aggregator/maxsumcount/msc_test.go index 7f71d97c739..cd26bfb8346 100644 --- a/sdk/metric/aggregator/maxsumcount/msc_test.go +++ b/sdk/metric/aggregator/maxsumcount/msc_test.go @@ -30,7 +30,7 @@ func TestMaxSumCountAbsolute(t *testing.T) { ctx := context.Background() test.RunProfiles(t, func(t *testing.T, profile test.Profile) { - batcher, record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, false) + record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, false) agg := New() @@ -42,7 +42,7 @@ func TestMaxSumCountAbsolute(t *testing.T) { agg.Update(ctx, x, record) } - agg.Collect(ctx, record, batcher) + agg.Checkpoint(ctx, record) all.Sort() @@ -66,7 +66,7 @@ func TestMaxSumCountMerge(t *testing.T) { ctx := context.Background() test.RunProfiles(t, func(t *testing.T, profile test.Profile) { - batcher, record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, false) + record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, false) agg1 := New() agg2 := New() @@ -84,8 +84,8 @@ func TestMaxSumCountMerge(t *testing.T) { agg2.Update(ctx, x, record) } - agg1.Collect(ctx, record, batcher) - agg2.Collect(ctx, record, batcher) + agg1.Checkpoint(ctx, record) + agg2.Checkpoint(ctx, record) agg1.Merge(agg2, record.Descriptor()) diff --git a/sdk/metric/aggregator/sampling/sampling.go b/sdk/metric/aggregator/sampling/sampling.go deleted file mode 100644 index 74303b9d0b1..00000000000 --- a/sdk/metric/aggregator/sampling/sampling.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sampling // import "go.opentelemetry.io/otel/sdk/metric/aggregator/sampling" - -// import "github.com/lightstep/varopt" - -// var _ = varopt.Varopt{} diff --git a/sdk/metric/aggregator/test/test.go b/sdk/metric/aggregator/test/test.go index ef748ad01e1..56be42a8311 100644 --- a/sdk/metric/aggregator/test/test.go +++ b/sdk/metric/aggregator/test/test.go @@ -15,7 +15,6 @@ package test import ( - "context" "math/rand" "sort" "testing" @@ -24,7 +23,6 @@ import ( export "go.opentelemetry.io/otel/sdk/export/metric" ) -var _ export.Batcher = &metricBatcher{} var _ export.Record = &metricRecord{} const Magnitude = 1000 @@ -52,16 +50,13 @@ func newProfiles() []Profile { } } -type metricBatcher struct { -} - type metricRecord struct { descriptor *export.Descriptor } -func NewAggregatorTest(mkind export.MetricKind, nkind core.NumberKind, alternate bool) (export.Batcher, export.Record) { +func NewAggregatorTest(mkind export.MetricKind, nkind core.NumberKind, alternate bool) export.Record { desc := export.NewDescriptor("test.name", mkind, nil, "", "", nkind, alternate) - return &metricBatcher{}, &metricRecord{descriptor: desc} + return &metricRecord{descriptor: desc} } func (t *metricRecord) Descriptor() *export.Descriptor { @@ -76,17 +71,6 @@ func (t *metricRecord) EncodedLabels() string { return "" } -func (m *metricBatcher) AggregatorFor(rec export.Record) export.Aggregator { - return nil -} - -func (m *metricBatcher) ReadCheckpoint() export.Producer { - return nil -} - -func (m *metricBatcher) Process(context.Context, export.Record, export.Aggregator) { -} - func RunProfiles(t *testing.T, f func(*testing.T, Profile)) { for _, profile := range newProfiles() { t.Run(profile.NumberKind.String(), func(t *testing.T) { diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 1cafe96deb7..31ddc4c1db3 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -354,14 +354,14 @@ func (m *SDK) Collect(ctx context.Context) { refcount := atomic.LoadInt64(&inuse.refcount) if refcount > 0 { - m.collect(ctx, inuse) + m.checkpoint(ctx, inuse) m.addPrimary(inuse) continue } modified := atomic.LoadInt64(&inuse.modifiedEpoch) collected := atomic.LoadInt64(&inuse.collectedEpoch) - m.collect(ctx, inuse) + m.checkpoint(ctx, inuse) if modified >= collected { atomic.StoreInt64(&inuse.collectedEpoch, m.currentEpoch) @@ -382,7 +382,7 @@ func (m *SDK) Collect(ctx context.Context) { atomic.StoreInt64(&chances.reclaim, 0) if chances.next.primary.load() == hazardRecord { - m.collect(ctx, chances) + m.checkpoint(ctx, chances) m.addPrimary(chances) } } @@ -390,9 +390,10 @@ func (m *SDK) Collect(ctx context.Context) { m.currentEpoch++ } -func (m *SDK) collect(ctx context.Context, r *record) { +func (m *SDK) checkpoint(ctx context.Context, r *record) { if r.recorder != nil { - r.recorder.Collect(ctx, r, m.batcher) + r.recorder.Checkpoint(ctx, r) + m.batcher.Process(ctx, r, r.recorder) } } From 3be8e6eaa49982cb71b3beabe63532ae2c2b4493 Mon Sep 17 00:00:00 2001 From: jmacd Date: Tue, 5 Nov 2019 16:57:17 -0800 Subject: [PATCH 24/73] Record->Identifier --- sdk/export/metric/metric.go | 12 ++++++------ sdk/metric/aggregator/array/array.go | 8 ++++---- sdk/metric/aggregator/counter/counter.go | 6 +++--- sdk/metric/aggregator/ddsketch/ddsketch.go | 6 +++--- sdk/metric/aggregator/gauge/gauge.go | 6 +++--- sdk/metric/aggregator/maxsumcount/msc.go | 6 +++--- sdk/metric/aggregator/test/test.go | 4 ++-- sdk/metric/batcher/defaultkeys/defaultkeys.go | 10 +++++----- sdk/metric/batcher/ungrouped/ungrouped.go | 12 ++++++------ sdk/metric/benchmark_test.go | 17 +++++++++-------- sdk/metric/doc.go | 8 +++++++- sdk/metric/monotone_test.go | 12 ++++++------ sdk/metric/sdk.go | 2 +- sdk/metric/selector/simple/simple.go | 4 ++-- sdk/metric/stress_test.go | 10 +++++----- 15 files changed, 65 insertions(+), 58 deletions(-) diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 558ce893dc5..3ce33f00d33 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -37,7 +37,7 @@ type Batcher interface { // must access the specific aggregator to receive the // exporter data, since the format of the data varies // by aggregation. - Process(context.Context, Record, Aggregator) + Process(context.Context, Identifier, Aggregator) // ReadCheckpoint is the interface used by exporters to access // aggregate checkpoints after collection. @@ -55,7 +55,7 @@ type AggregationSelector interface { // // Note: This is context-free because the handle should not be // bound to the incoming context. This call should not block. - AggregatorFor(Record) Aggregator + AggregatorFor(Identifier) Aggregator } // Aggregator implements a specific aggregation behavior, e.g., @@ -64,20 +64,20 @@ type Aggregator interface { // Update receives a new measured value and incorporates it // into the aggregation. Update() calls may arrive // concurrently. - Update(context.Context, core.Number, Record) + Update(context.Context, core.Number, Identifier) // Checkpoint is called during the SDK Collect() to finish one // period of aggregation. Checkpoint() is called in a // single-threaded context. - Checkpoint(context.Context, Record) + Checkpoint(context.Context, Identifier) // Merge combines state from two aggregators into one. Merge(Aggregator, *Descriptor) } -// Record is the unit of export, pairing a metric +// Identifier is the unit of export, pairing a metric // instrument and set of labels. -type Record interface { +type Identifier interface { // Descriptor() describes the metric instrument. Descriptor() *Descriptor diff --git a/sdk/metric/aggregator/array/array.go b/sdk/metric/aggregator/array/array.go index 2f31c65e137..96505b782bf 100644 --- a/sdk/metric/aggregator/array/array.go +++ b/sdk/metric/aggregator/array/array.go @@ -68,12 +68,12 @@ func (c *Aggregator) Quantile(q float64) (core.Number, error) { return c.checkpoint.Quantile(q) } -func (c *Aggregator) Checkpoint(ctx context.Context, rec export.Record) { +func (c *Aggregator) Checkpoint(ctx context.Context, ident export.Identifier) { c.lock.Lock() c.checkpoint, c.current = c.current, nil c.lock.Unlock() - desc := rec.Descriptor() + desc := ident.Descriptor() kind := desc.NumberKind() c.sort(kind) @@ -85,8 +85,8 @@ func (c *Aggregator) Checkpoint(ctx context.Context, rec export.Record) { } } -func (c *Aggregator) Update(_ context.Context, number core.Number, rec export.Record) { - desc := rec.Descriptor() +func (c *Aggregator) Update(_ context.Context, number core.Number, ident export.Identifier) { + desc := ident.Descriptor() kind := desc.NumberKind() if kind == core.Float64NumberKind && math.IsNaN(number.AsFloat64()) { diff --git a/sdk/metric/aggregator/counter/counter.go b/sdk/metric/aggregator/counter/counter.go index ec6df202dbe..553b01cf724 100644 --- a/sdk/metric/aggregator/counter/counter.go +++ b/sdk/metric/aggregator/counter/counter.go @@ -44,13 +44,13 @@ func (c *Aggregator) Sum() core.Number { } // Checkpoint checkpoints the current value (atomically) and exports it. -func (c *Aggregator) Checkpoint(ctx context.Context, rec export.Record) { +func (c *Aggregator) Checkpoint(ctx context.Context, _ export.Identifier) { c.checkpoint = c.current.SwapNumberAtomic(core.Number(0)) } // Update modifies the current value (atomically) for later export. -func (c *Aggregator) Update(_ context.Context, number core.Number, rec export.Record) { - desc := rec.Descriptor() +func (c *Aggregator) Update(_ context.Context, number core.Number, ident export.Identifier) { + desc := ident.Descriptor() kind := desc.NumberKind() if !desc.Alternate() && number.IsNegative(kind) { // TODO warn diff --git a/sdk/metric/aggregator/ddsketch/ddsketch.go b/sdk/metric/aggregator/ddsketch/ddsketch.go index 12019987658..083bc45893e 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch.go @@ -92,7 +92,7 @@ func (c *Aggregator) toNumber(f float64) core.Number { } // Checkpoint checkpoints the current value (atomically) and exports it. -func (c *Aggregator) Checkpoint(ctx context.Context, rec export.Record) { +func (c *Aggregator) Checkpoint(ctx context.Context, _ export.Identifier) { replace := sdk.NewDDSketch(c.cfg) c.lock.Lock() @@ -102,8 +102,8 @@ func (c *Aggregator) Checkpoint(ctx context.Context, rec export.Record) { } // Update modifies the current value (atomically) for later export. -func (c *Aggregator) Update(_ context.Context, number core.Number, rec export.Record) { - desc := rec.Descriptor() +func (c *Aggregator) Update(_ context.Context, number core.Number, ident export.Identifier) { + desc := ident.Descriptor() kind := desc.NumberKind() if !desc.Alternate() && number.IsNegative(kind) { diff --git a/sdk/metric/aggregator/gauge/gauge.go b/sdk/metric/aggregator/gauge/gauge.go index 0361abb4199..ed0224493c0 100644 --- a/sdk/metric/aggregator/gauge/gauge.go +++ b/sdk/metric/aggregator/gauge/gauge.go @@ -79,13 +79,13 @@ func (g *Aggregator) Timestamp() time.Time { } // Checkpoint checkpoints the current value (atomically) and exports it. -func (g *Aggregator) Checkpoint(ctx context.Context, rec export.Record) { +func (g *Aggregator) Checkpoint(ctx context.Context, _ export.Identifier) { g.checkpoint = atomic.LoadPointer(&g.current) } // Update modifies the current value (atomically) for later export. -func (g *Aggregator) Update(_ context.Context, number core.Number, rec export.Record) { - desc := rec.Descriptor() +func (g *Aggregator) Update(_ context.Context, number core.Number, ident export.Identifier) { + desc := ident.Descriptor() if !desc.Alternate() { g.updateNonMonotonic(number) } else { diff --git a/sdk/metric/aggregator/maxsumcount/msc.go b/sdk/metric/aggregator/maxsumcount/msc.go index 25b960c4723..41932b2b302 100644 --- a/sdk/metric/aggregator/maxsumcount/msc.go +++ b/sdk/metric/aggregator/maxsumcount/msc.go @@ -59,7 +59,7 @@ func (c *Aggregator) Max() (core.Number, error) { } // Checkpoint checkpoints the current value (atomically) and exports it. -func (c *Aggregator) Checkpoint(ctx context.Context, rec export.Record) { +func (c *Aggregator) Checkpoint(ctx context.Context, _ export.Identifier) { // N.B. There is no atomic operation that can update all three // values at once without a memory allocation. // @@ -76,8 +76,8 @@ func (c *Aggregator) Checkpoint(ctx context.Context, rec export.Record) { } // Update modifies the current value (atomically) for later export. -func (c *Aggregator) Update(_ context.Context, number core.Number, rec export.Record) { - desc := rec.Descriptor() +func (c *Aggregator) Update(_ context.Context, number core.Number, ident export.Identifier) { + desc := ident.Descriptor() kind := desc.NumberKind() if !desc.Alternate() && number.IsNegative(kind) { diff --git a/sdk/metric/aggregator/test/test.go b/sdk/metric/aggregator/test/test.go index 56be42a8311..32ababb7a8e 100644 --- a/sdk/metric/aggregator/test/test.go +++ b/sdk/metric/aggregator/test/test.go @@ -23,7 +23,7 @@ import ( export "go.opentelemetry.io/otel/sdk/export/metric" ) -var _ export.Record = &metricRecord{} +var _ export.Identifier = &metricRecord{} const Magnitude = 1000 @@ -54,7 +54,7 @@ type metricRecord struct { descriptor *export.Descriptor } -func NewAggregatorTest(mkind export.MetricKind, nkind core.NumberKind, alternate bool) export.Record { +func NewAggregatorTest(mkind export.MetricKind, nkind core.NumberKind, alternate bool) export.Identifier { desc := export.NewDescriptor("test.name", mkind, nil, "", "", nkind, alternate) return &metricRecord{descriptor: desc} } diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys.go b/sdk/metric/batcher/defaultkeys/defaultkeys.go index 3578a84680a..586edce1a42 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys.go @@ -58,12 +58,12 @@ func New(selector export.AggregationSelector, lencoder export.LabelEncoder, stat } } -func (b *Batcher) AggregatorFor(record export.Record) export.Aggregator { - return b.selector.AggregatorFor(record) +func (b *Batcher) AggregatorFor(ident export.Identifier) export.Aggregator { + return b.selector.AggregatorFor(ident) } -func (b *Batcher) Process(_ context.Context, record export.Record, agg export.Aggregator) { - desc := record.Descriptor() +func (b *Batcher) Process(_ context.Context, ident export.Identifier, agg export.Aggregator) { + desc := ident.Descriptor() keys := desc.Keys() // Cache the mapping from Descriptor->Key->Index @@ -89,7 +89,7 @@ func (b *Batcher) Process(_ context.Context, record export.Record, agg export.Ag // Note also the possibility to speed this computation of // "encoded" via "canon" in the form of a (Descriptor, // LabelSet)->(Labels, Encoded) cache. - for _, kv := range record.Labels() { + for _, kv := range ident.Labels() { pos, ok := ki[kv.Key] if !ok { continue diff --git a/sdk/metric/batcher/ungrouped/ungrouped.go b/sdk/metric/batcher/ungrouped/ungrouped.go index 1bc434e04dd..210b50e667e 100644 --- a/sdk/metric/batcher/ungrouped/ungrouped.go +++ b/sdk/metric/batcher/ungrouped/ungrouped.go @@ -52,21 +52,21 @@ func New(selector export.AggregationSelector, stateful bool) *Batcher { } } -func (b *Batcher) AggregatorFor(record export.Record) export.Aggregator { - return b.selector.AggregatorFor(record) +func (b *Batcher) AggregatorFor(ident export.Identifier) export.Aggregator { + return b.selector.AggregatorFor(ident) } -func (b *Batcher) Process(_ context.Context, record export.Record, agg export.Aggregator) { - desc := record.Descriptor() +func (b *Batcher) Process(_ context.Context, ident export.Identifier, agg export.Aggregator) { + desc := ident.Descriptor() key := batchKey{ descriptor: desc, - encoded: record.EncodedLabels(), + encoded: ident.EncodedLabels(), } value, ok := b.batchMap[key] if !ok { b.batchMap[key] = batchValue{ aggregator: agg, - labels: record.Labels(), + labels: ident.Labels(), } } else { value.aggregator.Merge(agg, desc) diff --git a/sdk/metric/benchmark_test.go b/sdk/metric/benchmark_test.go index 7f3bb8c83c0..0c538625d3f 100644 --- a/sdk/metric/benchmark_test.go +++ b/sdk/metric/benchmark_test.go @@ -45,25 +45,26 @@ func newFixture(b *testing.B) *benchFixture { return bf } -func (bf *benchFixture) AggregatorFor(rec export.Record) export.Aggregator { - switch rec.Descriptor().MetricKind() { +func (bf *benchFixture) AggregatorFor(ident export.Identifier) export.Aggregator { + descriptor := ident.Descriptor() + switch descriptor.MetricKind() { case export.CounterKind: return counter.New() case export.GaugeKind: return gauge.New() case export.MeasureKind: - if strings.HasSuffix(rec.Descriptor().Name(), "maxsumcount") { + if strings.HasSuffix(descriptor.Name(), "maxsumcount") { return maxsumcount.New() - } else if strings.HasSuffix(rec.Descriptor().Name(), "ddsketch") { - return ddsketch.New(ddsketch.NewDefaultConfig(), rec.Descriptor()) - } else if strings.HasSuffix(rec.Descriptor().Name(), "array") { - return ddsketch.New(ddsketch.NewDefaultConfig(), rec.Descriptor()) + } else if strings.HasSuffix(descriptor.Name(), "ddsketch") { + return ddsketch.New(ddsketch.NewDefaultConfig(), descriptor) + } else if strings.HasSuffix(descriptor.Name(), "array") { + return ddsketch.New(ddsketch.NewDefaultConfig(), descriptor) } } return nil } -func (bf *benchFixture) Process(ctx context.Context, rec export.Record, agg export.Aggregator) { +func (bf *benchFixture) Process(context.Context, export.Identifier, export.Aggregator) { } func (bf *benchFixture) ReadCheckpoint() export.Producer { diff --git a/sdk/metric/doc.go b/sdk/metric/doc.go index 3088b55760c..a9424f377db 100644 --- a/sdk/metric/doc.go +++ b/sdk/metric/doc.go @@ -92,9 +92,15 @@ Export Pipeline While the SDK serves to maintain a current set of records and coordinate collection, the behavior of a metrics export pipeline is configured through the export types in go.opentelemetry.io/otel/sdk/export/metric. + +AggregationSelector +LabelEncoder + They are briefly summarized here: -Aggregator: a specific algorithm for combining metric events +Aggregator + + AggregationSelector: decides which aggregator to use Batcher: determine the aggregation dimensions, group (and de-dup) records Descriptor: summarizes an instrument and its metadata diff --git a/sdk/metric/monotone_test.go b/sdk/metric/monotone_test.go index 0cc6f1f5dfc..b80c08c6763 100644 --- a/sdk/metric/monotone_test.go +++ b/sdk/metric/monotone_test.go @@ -37,7 +37,7 @@ type monotoneBatcher struct { currentTime *time.Time } -func (m *monotoneBatcher) AggregatorFor(rec export.Record) export.Aggregator { +func (m *monotoneBatcher) AggregatorFor(export.Identifier) export.Aggregator { return gauge.New() } @@ -45,11 +45,11 @@ func (m *monotoneBatcher) ReadCheckpoint() export.Producer { return nil } -func (m *monotoneBatcher) Process(_ context.Context, record export.Record, agg export.Aggregator) { - require.Equal(m.t, "my.gauge.name", record.Descriptor().Name()) - require.Equal(m.t, 1, len(record.Labels())) - require.Equal(m.t, "a", string(record.Labels()[0].Key)) - require.Equal(m.t, "b", record.Labels()[0].Value.Emit()) +func (m *monotoneBatcher) Process(_ context.Context, ident export.Identifier, agg export.Aggregator) { + require.Equal(m.t, "my.gauge.name", ident.Descriptor().Name()) + require.Equal(m.t, 1, len(ident.Labels())) + require.Equal(m.t, "a", string(ident.Labels()[0].Key)) + require.Equal(m.t, "b", ident.Labels()[0].Value.Emit()) gauge := agg.(*gauge.Aggregator) val := gauge.LastValue() diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 31ddc4c1db3..5cbe3bbed46 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -144,7 +144,7 @@ var ( _ api.LabelSet = &labels{} _ api.InstrumentImpl = &instrument{} _ api.HandleImpl = &record{} - _ export.Record = &record{} + _ export.Identifier = &record{} // hazardRecord is used as a pointer value that indicates the // value is not included in any list. (`nil` would be diff --git a/sdk/metric/selector/simple/simple.go b/sdk/metric/selector/simple/simple.go index 8a983da5721..e11f1d0817b 100644 --- a/sdk/metric/selector/simple/simple.go +++ b/sdk/metric/selector/simple/simple.go @@ -29,8 +29,8 @@ func New() export.AggregationSelector { return selector{} } -func (s selector) AggregatorFor(record export.Record) export.Aggregator { - switch record.Descriptor().MetricKind() { +func (s selector) AggregatorFor(ident export.Identifier) export.Aggregator { + switch ident.Descriptor().MetricKind() { case export.GaugeKind: return gauge.New() case export.MeasureKind: diff --git a/sdk/metric/stress_test.go b/sdk/metric/stress_test.go index 667b9e24231..5e96caf1543 100644 --- a/sdk/metric/stress_test.go +++ b/sdk/metric/stress_test.go @@ -227,8 +227,8 @@ func (f *testFixture) preCollect() { f.dupCheck = map[testKey]int{} } -func (f *testFixture) AggregatorFor(record export.Record) export.Aggregator { - switch record.Descriptor().MetricKind() { +func (f *testFixture) AggregatorFor(ident export.Identifier) export.Aggregator { + switch ident.Descriptor().MetricKind() { case export.CounterKind: return counter.New() case export.GaugeKind: @@ -242,10 +242,10 @@ func (f *testFixture) ReadCheckpoint() export.Producer { return nil } -func (f *testFixture) Process(ctx context.Context, record export.Record, agg export.Aggregator) { - desc := record.Descriptor() +func (f *testFixture) Process(ctx context.Context, ident export.Identifier, agg export.Aggregator) { + desc := ident.Descriptor() key := testKey{ - labels: canonicalizeLabels(record.Labels()), + labels: canonicalizeLabels(ident.Labels()), descriptor: desc, } if f.dupCheck[key] == 0 { From 214b8825e9695531ac669b285e632048be565120 Mon Sep 17 00:00:00 2001 From: jmacd Date: Tue, 5 Nov 2019 17:21:27 -0800 Subject: [PATCH 25/73] Remove export.Record a.k.a. Identifier --- sdk/export/metric/metric.go | 39 +++++++----------- sdk/metric/aggregator/array/array.go | 6 +-- sdk/metric/aggregator/array/array_test.go | 40 +++++++++---------- sdk/metric/aggregator/counter/counter.go | 5 +-- sdk/metric/aggregator/counter/counter_test.go | 36 ++++++++--------- sdk/metric/aggregator/ddsketch/ddsketch.go | 5 +-- .../aggregator/ddsketch/ddsketch_test.go | 30 +++++++------- sdk/metric/aggregator/gauge/gauge.go | 5 +-- sdk/metric/aggregator/gauge/gauge_test.go | 24 +++++------ sdk/metric/aggregator/maxsumcount/msc.go | 5 +-- sdk/metric/aggregator/maxsumcount/msc_test.go | 12 +++--- sdk/metric/aggregator/test/test.go | 22 +--------- sdk/metric/batcher/defaultkeys/defaultkeys.go | 9 ++--- sdk/metric/batcher/ungrouped/ungrouped.go | 11 +++-- sdk/metric/benchmark_test.go | 5 +-- sdk/metric/monotone_test.go | 12 +++--- sdk/metric/sdk.go | 22 +++------- sdk/metric/selector/simple/simple.go | 4 +- sdk/metric/stress_test.go | 9 ++--- 19 files changed, 125 insertions(+), 176 deletions(-) diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 3ce33f00d33..122b140e381 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -37,7 +37,11 @@ type Batcher interface { // must access the specific aggregator to receive the // exporter data, since the format of the data varies // by aggregation. - Process(context.Context, Identifier, Aggregator) + Process(ctx context.Context, + descriptor *Descriptor, + labels []core.KeyValue, + encodedLabels string, + aggregator Aggregator) // ReadCheckpoint is the interface used by exporters to access // aggregate checkpoints after collection. @@ -55,7 +59,7 @@ type AggregationSelector interface { // // Note: This is context-free because the handle should not be // bound to the incoming context. This call should not block. - AggregatorFor(Identifier) Aggregator + AggregatorFor(*Descriptor) Aggregator } // Aggregator implements a specific aggregation behavior, e.g., @@ -64,32 +68,17 @@ type Aggregator interface { // Update receives a new measured value and incorporates it // into the aggregation. Update() calls may arrive // concurrently. - Update(context.Context, core.Number, Identifier) + Update(context.Context, core.Number, *Descriptor) // Checkpoint is called during the SDK Collect() to finish one // period of aggregation. Checkpoint() is called in a // single-threaded context. - Checkpoint(context.Context, Identifier) + Checkpoint(context.Context, *Descriptor) // Merge combines state from two aggregators into one. Merge(Aggregator, *Descriptor) } -// Identifier is the unit of export, pairing a metric -// instrument and set of labels. -type Identifier interface { - // Descriptor() describes the metric instrument. - Descriptor() *Descriptor - - // Labels() describe the labsels corresponding the - // aggregation being performed. - Labels() []core.KeyValue - - // EncodedLabels are a unique string-encoded form of Labels() - // suitable for use as a map key. - EncodedLabels() string -} - // Exporter handles presentation of the checkpoint of aggregate // metrics. This is the final stage of a metrics export pipeline, // where metric data are formatted for a specific system. @@ -105,6 +94,12 @@ type LabelEncoder interface { EncodeLabels([]core.KeyValue) string } +// Producer allows a Exporter to access a checkpoint of +// aggregated metrics one at a time. +type Producer interface { + Foreach(func(Aggregator, ProducedRecord)) +} + // ProducedRecord type ProducedRecord struct { Descriptor *Descriptor @@ -113,12 +108,6 @@ type ProducedRecord struct { EncodedLabels string } -// Producer allows a Exporter to access a checkpoint of -// aggregated metrics one at a time. -type Producer interface { - Foreach(func(Aggregator, ProducedRecord)) -} - // Kind describes the kind of instrument. type MetricKind int8 diff --git a/sdk/metric/aggregator/array/array.go b/sdk/metric/aggregator/array/array.go index 96505b782bf..7481563c36d 100644 --- a/sdk/metric/aggregator/array/array.go +++ b/sdk/metric/aggregator/array/array.go @@ -68,12 +68,11 @@ func (c *Aggregator) Quantile(q float64) (core.Number, error) { return c.checkpoint.Quantile(q) } -func (c *Aggregator) Checkpoint(ctx context.Context, ident export.Identifier) { +func (c *Aggregator) Checkpoint(ctx context.Context, desc *export.Descriptor) { c.lock.Lock() c.checkpoint, c.current = c.current, nil c.lock.Unlock() - desc := ident.Descriptor() kind := desc.NumberKind() c.sort(kind) @@ -85,8 +84,7 @@ func (c *Aggregator) Checkpoint(ctx context.Context, ident export.Identifier) { } } -func (c *Aggregator) Update(_ context.Context, number core.Number, ident export.Identifier) { - desc := ident.Descriptor() +func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) { kind := desc.NumberKind() if kind == core.Float64NumberKind && math.IsNaN(number.AsFloat64()) { diff --git a/sdk/metric/aggregator/array/array_test.go b/sdk/metric/aggregator/array/array_test.go index ef814943b63..8833143ffeb 100644 --- a/sdk/metric/aggregator/array/array_test.go +++ b/sdk/metric/aggregator/array/array_test.go @@ -36,7 +36,7 @@ type updateTest struct { func (ut *updateTest) run(t *testing.T, profile test.Profile) { ctx := context.Background() - record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !ut.absolute) + descriptor := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !ut.absolute) agg := New() @@ -45,16 +45,16 @@ func (ut *updateTest) run(t *testing.T, profile test.Profile) { for i := 0; i < ut.count; i++ { x := profile.Random(+1) all.Append(x) - agg.Update(ctx, x, record) + agg.Update(ctx, x, descriptor) if !ut.absolute { y := profile.Random(-1) all.Append(y) - agg.Update(ctx, y, record) + agg.Update(ctx, y, descriptor) } } - agg.Checkpoint(ctx, record) + agg.Checkpoint(ctx, descriptor) all.Sort() @@ -106,7 +106,7 @@ type mergeTest struct { func (mt *mergeTest) run(t *testing.T, profile test.Profile) { ctx := context.Background() - record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !mt.absolute) + descriptor := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !mt.absolute) agg1 := New() agg2 := New() @@ -116,27 +116,27 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { for i := 0; i < mt.count; i++ { x1 := profile.Random(+1) all.Append(x1) - agg1.Update(ctx, x1, record) + agg1.Update(ctx, x1, descriptor) x2 := profile.Random(+1) all.Append(x2) - agg2.Update(ctx, x2, record) + agg2.Update(ctx, x2, descriptor) if !mt.absolute { y1 := profile.Random(-1) all.Append(y1) - agg1.Update(ctx, y1, record) + agg1.Update(ctx, y1, descriptor) y2 := profile.Random(-1) all.Append(y2) - agg2.Update(ctx, y2, record) + agg2.Update(ctx, y2, descriptor) } } - agg1.Checkpoint(ctx, record) - agg2.Checkpoint(ctx, record) + agg1.Checkpoint(ctx, descriptor) + agg2.Checkpoint(ctx, descriptor) - agg1.Merge(agg2, record.Descriptor()) + agg1.Merge(agg2, descriptor) all.Sort() @@ -198,14 +198,14 @@ func TestArrayErrors(t *testing.T) { ctx := context.Background() - record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, false) + descriptor := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, false) - agg.Update(ctx, core.Number(0), record) + agg.Update(ctx, core.Number(0), descriptor) if profile.NumberKind == core.Float64NumberKind { - agg.Update(ctx, core.NewFloat64Number(math.NaN()), record) + agg.Update(ctx, core.NewFloat64Number(math.NaN()), descriptor) } - agg.Checkpoint(ctx, record) + agg.Checkpoint(ctx, descriptor) require.Equal(t, int64(1), agg.Count(), "NaN value was not counted") @@ -226,7 +226,7 @@ func TestArrayErrors(t *testing.T) { func TestArrayFloat64(t *testing.T) { for _, absolute := range []bool{false, true} { t.Run(fmt.Sprint("Absolute=", absolute), func(t *testing.T) { - record := test.NewAggregatorTest(export.MeasureKind, core.Float64NumberKind, !absolute) + descriptor := test.NewAggregatorTest(export.MeasureKind, core.Float64NumberKind, !absolute) fpsf := func(sign int) []float64 { // Check behavior of a bunch of odd floating @@ -263,17 +263,17 @@ func TestArrayFloat64(t *testing.T) { for _, f := range fpsf(1) { all.Append(core.NewFloat64Number(f)) - agg.Update(ctx, core.NewFloat64Number(f), record) + agg.Update(ctx, core.NewFloat64Number(f), descriptor) } if !absolute { for _, f := range fpsf(-1) { all.Append(core.NewFloat64Number(f)) - agg.Update(ctx, core.NewFloat64Number(f), record) + agg.Update(ctx, core.NewFloat64Number(f), descriptor) } } - agg.Checkpoint(ctx, record) + agg.Checkpoint(ctx, descriptor) all.Sort() diff --git a/sdk/metric/aggregator/counter/counter.go b/sdk/metric/aggregator/counter/counter.go index 553b01cf724..410748e1462 100644 --- a/sdk/metric/aggregator/counter/counter.go +++ b/sdk/metric/aggregator/counter/counter.go @@ -44,13 +44,12 @@ func (c *Aggregator) Sum() core.Number { } // Checkpoint checkpoints the current value (atomically) and exports it. -func (c *Aggregator) Checkpoint(ctx context.Context, _ export.Identifier) { +func (c *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) { c.checkpoint = c.current.SwapNumberAtomic(core.Number(0)) } // Update modifies the current value (atomically) for later export. -func (c *Aggregator) Update(_ context.Context, number core.Number, ident export.Identifier) { - desc := ident.Descriptor() +func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) { kind := desc.NumberKind() if !desc.Alternate() && number.IsNegative(kind) { // TODO warn diff --git a/sdk/metric/aggregator/counter/counter_test.go b/sdk/metric/aggregator/counter/counter_test.go index 1c028036127..d1a1b74e938 100644 --- a/sdk/metric/aggregator/counter/counter_test.go +++ b/sdk/metric/aggregator/counter/counter_test.go @@ -33,16 +33,16 @@ func TestCounterMonotonic(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { agg := New() - record := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false) + descriptor := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false) sum := core.Number(0) for i := 0; i < count; i++ { x := profile.Random(+1) sum.AddNumber(profile.NumberKind, x) - agg.Update(ctx, x, record) + agg.Update(ctx, x, descriptor) } - agg.Checkpoint(ctx, record) + agg.Checkpoint(ctx, descriptor) require.Equal(t, sum, agg.Sum(), "Same sum - monotonic") }) @@ -54,15 +54,15 @@ func TestCounterMonotonicNegative(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { agg := New() - record := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false) + descriptor := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false) for i := 0; i < count; i++ { - agg.Update(ctx, profile.Random(-1), record) + agg.Update(ctx, profile.Random(-1), descriptor) } sum := profile.Random(+1) - agg.Update(ctx, sum, record) - agg.Checkpoint(ctx, record) + agg.Update(ctx, sum, descriptor) + agg.Checkpoint(ctx, descriptor) require.Equal(t, sum, agg.Sum(), "Same sum - monotonic") }) @@ -74,7 +74,7 @@ func TestCounterNonMonotonic(t *testing.T) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) { agg := New() - record := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, true) + descriptor := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, true) sum := core.Number(0) for i := 0; i < count; i++ { @@ -82,11 +82,11 @@ func TestCounterNonMonotonic(t *testing.T) { y := profile.Random(-1) sum.AddNumber(profile.NumberKind, x) sum.AddNumber(profile.NumberKind, y) - agg.Update(ctx, x, record) - agg.Update(ctx, y, record) + agg.Update(ctx, x, descriptor) + agg.Update(ctx, y, descriptor) } - agg.Checkpoint(ctx, record) + agg.Checkpoint(ctx, descriptor) require.Equal(t, sum, agg.Sum(), "Same sum - monotonic") }) @@ -99,22 +99,22 @@ func TestCounterMerge(t *testing.T) { agg1 := New() agg2 := New() - record := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false) + descriptor := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false) sum := core.Number(0) for i := 0; i < count; i++ { x := profile.Random(+1) sum.AddNumber(profile.NumberKind, x) - agg1.Update(ctx, x, record) - agg2.Update(ctx, x, record) + agg1.Update(ctx, x, descriptor) + agg2.Update(ctx, x, descriptor) } - agg1.Checkpoint(ctx, record) - agg2.Checkpoint(ctx, record) + agg1.Checkpoint(ctx, descriptor) + agg2.Checkpoint(ctx, descriptor) - agg1.Merge(agg2, record.Descriptor()) + agg1.Merge(agg2, descriptor) - sum.AddNumber(record.Descriptor().NumberKind(), sum) + sum.AddNumber(descriptor.NumberKind(), sum) require.Equal(t, sum, agg1.Sum(), "Same sum - monotonic") }) diff --git a/sdk/metric/aggregator/ddsketch/ddsketch.go b/sdk/metric/aggregator/ddsketch/ddsketch.go index 083bc45893e..0ca70a0957f 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch.go @@ -92,7 +92,7 @@ func (c *Aggregator) toNumber(f float64) core.Number { } // Checkpoint checkpoints the current value (atomically) and exports it. -func (c *Aggregator) Checkpoint(ctx context.Context, _ export.Identifier) { +func (c *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) { replace := sdk.NewDDSketch(c.cfg) c.lock.Lock() @@ -102,8 +102,7 @@ func (c *Aggregator) Checkpoint(ctx context.Context, _ export.Identifier) { } // Update modifies the current value (atomically) for later export. -func (c *Aggregator) Update(_ context.Context, number core.Number, ident export.Identifier) { - desc := ident.Descriptor() +func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) { kind := desc.NumberKind() if !desc.Alternate() && number.IsNegative(kind) { diff --git a/sdk/metric/aggregator/ddsketch/ddsketch_test.go b/sdk/metric/aggregator/ddsketch/ddsketch_test.go index c1c777abe7c..fd7f2f63965 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch_test.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch_test.go @@ -34,23 +34,23 @@ type updateTest struct { func (ut *updateTest) run(t *testing.T, profile test.Profile) { ctx := context.Background() - record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !ut.absolute) - agg := New(NewDefaultConfig(), record.Descriptor()) + descriptor := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !ut.absolute) + agg := New(NewDefaultConfig(), descriptor) all := test.NewNumbers(profile.NumberKind) for i := 0; i < count; i++ { x := profile.Random(+1) all.Append(x) - agg.Update(ctx, x, record) + agg.Update(ctx, x, descriptor) if !ut.absolute { y := profile.Random(-1) all.Append(y) - agg.Update(ctx, y, record) + agg.Update(ctx, y, descriptor) } } - agg.Checkpoint(ctx, record) + agg.Checkpoint(ctx, descriptor) all.Sort() @@ -96,40 +96,40 @@ type mergeTest struct { func (mt *mergeTest) run(t *testing.T, profile test.Profile) { ctx := context.Background() - record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !mt.absolute) + descriptor := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !mt.absolute) - agg1 := New(NewDefaultConfig(), record.Descriptor()) - agg2 := New(NewDefaultConfig(), record.Descriptor()) + agg1 := New(NewDefaultConfig(), descriptor) + agg2 := New(NewDefaultConfig(), descriptor) all := test.NewNumbers(profile.NumberKind) for i := 0; i < count; i++ { x := profile.Random(+1) all.Append(x) - agg1.Update(ctx, x, record) + agg1.Update(ctx, x, descriptor) if !mt.absolute { y := profile.Random(-1) all.Append(y) - agg1.Update(ctx, y, record) + agg1.Update(ctx, y, descriptor) } } for i := 0; i < count; i++ { x := profile.Random(+1) all.Append(x) - agg2.Update(ctx, x, record) + agg2.Update(ctx, x, descriptor) if !mt.absolute { y := profile.Random(-1) all.Append(y) - agg2.Update(ctx, y, record) + agg2.Update(ctx, y, descriptor) } } - agg1.Checkpoint(ctx, record) - agg2.Checkpoint(ctx, record) + agg1.Checkpoint(ctx, descriptor) + agg2.Checkpoint(ctx, descriptor) - agg1.Merge(agg2, record.Descriptor()) + agg1.Merge(agg2, descriptor) all.Sort() diff --git a/sdk/metric/aggregator/gauge/gauge.go b/sdk/metric/aggregator/gauge/gauge.go index ed0224493c0..adf60b6b579 100644 --- a/sdk/metric/aggregator/gauge/gauge.go +++ b/sdk/metric/aggregator/gauge/gauge.go @@ -79,13 +79,12 @@ func (g *Aggregator) Timestamp() time.Time { } // Checkpoint checkpoints the current value (atomically) and exports it. -func (g *Aggregator) Checkpoint(ctx context.Context, _ export.Identifier) { +func (g *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) { g.checkpoint = atomic.LoadPointer(&g.current) } // Update modifies the current value (atomically) for later export. -func (g *Aggregator) Update(_ context.Context, number core.Number, ident export.Identifier) { - desc := ident.Descriptor() +func (g *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) { if !desc.Alternate() { g.updateNonMonotonic(number) } else { diff --git a/sdk/metric/aggregator/gauge/gauge_test.go b/sdk/metric/aggregator/gauge/gauge_test.go index 49e51da7fa0..24b9290aad2 100644 --- a/sdk/metric/aggregator/gauge/gauge_test.go +++ b/sdk/metric/aggregator/gauge/gauge_test.go @@ -102,23 +102,23 @@ func TestGaugeNormalMerge(t *testing.T) { agg1 := New() agg2 := New() - record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, false) + descriptor := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, false) first1 := profile.Random(+1) first2 := profile.Random(+1) first1.AddNumber(profile.NumberKind, first2) - agg1.Update(ctx, first1, record) - agg2.Update(ctx, first2, record) + agg1.Update(ctx, first1, descriptor) + agg2.Update(ctx, first2, descriptor) - agg1.Checkpoint(ctx, record) - agg2.Checkpoint(ctx, record) + agg1.Checkpoint(ctx, descriptor) + agg2.Checkpoint(ctx, descriptor) t1 := agg1.Timestamp() t2 := agg2.Timestamp() require.True(t, t1.Before(t2)) - agg1.Merge(agg2, record.Descriptor()) + agg1.Merge(agg2, descriptor) require.Equal(t, t2, agg1.Timestamp(), "Merged timestamp - non-monotonic") require.Equal(t, first2, agg1.LastValue(), "Merged value - non-monotonic") @@ -132,19 +132,19 @@ func TestGaugeMonotonicMerge(t *testing.T) { agg1 := New() agg2 := New() - record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, true) + descriptor := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, true) first1 := profile.Random(+1) - agg1.Update(ctx, first1, record) + agg1.Update(ctx, first1, descriptor) first2 := profile.Random(+1) first2.AddNumber(profile.NumberKind, first1) - agg2.Update(ctx, first2, record) + agg2.Update(ctx, first2, descriptor) - agg1.Checkpoint(ctx, record) - agg2.Checkpoint(ctx, record) + agg1.Checkpoint(ctx, descriptor) + agg2.Checkpoint(ctx, descriptor) - agg1.Merge(agg2, record.Descriptor()) + agg1.Merge(agg2, descriptor) require.Equal(t, first2, agg1.LastValue(), "Merged value - monotonic") require.Equal(t, agg2.Timestamp(), agg1.Timestamp(), "Merged timestamp - monotonic") diff --git a/sdk/metric/aggregator/maxsumcount/msc.go b/sdk/metric/aggregator/maxsumcount/msc.go index 41932b2b302..2cc194296da 100644 --- a/sdk/metric/aggregator/maxsumcount/msc.go +++ b/sdk/metric/aggregator/maxsumcount/msc.go @@ -59,7 +59,7 @@ func (c *Aggregator) Max() (core.Number, error) { } // Checkpoint checkpoints the current value (atomically) and exports it. -func (c *Aggregator) Checkpoint(ctx context.Context, _ export.Identifier) { +func (c *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) { // N.B. There is no atomic operation that can update all three // values at once without a memory allocation. // @@ -76,8 +76,7 @@ func (c *Aggregator) Checkpoint(ctx context.Context, _ export.Identifier) { } // Update modifies the current value (atomically) for later export. -func (c *Aggregator) Update(_ context.Context, number core.Number, ident export.Identifier) { - desc := ident.Descriptor() +func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) { kind := desc.NumberKind() if !desc.Alternate() && number.IsNegative(kind) { diff --git a/sdk/metric/aggregator/maxsumcount/msc_test.go b/sdk/metric/aggregator/maxsumcount/msc_test.go index cd26bfb8346..105ca675de0 100644 --- a/sdk/metric/aggregator/maxsumcount/msc_test.go +++ b/sdk/metric/aggregator/maxsumcount/msc_test.go @@ -66,7 +66,7 @@ func TestMaxSumCountMerge(t *testing.T) { ctx := context.Background() test.RunProfiles(t, func(t *testing.T, profile test.Profile) { - record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, false) + descriptor := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, false) agg1 := New() agg2 := New() @@ -76,18 +76,18 @@ func TestMaxSumCountMerge(t *testing.T) { for i := 0; i < count; i++ { x := profile.Random(+1) all.Append(x) - agg1.Update(ctx, x, record) + agg1.Update(ctx, x, descriptor) } for i := 0; i < count; i++ { x := profile.Random(+1) all.Append(x) - agg2.Update(ctx, x, record) + agg2.Update(ctx, x, descriptor) } - agg1.Checkpoint(ctx, record) - agg2.Checkpoint(ctx, record) + agg1.Checkpoint(ctx, descriptor) + agg2.Checkpoint(ctx, descriptor) - agg1.Merge(agg2, record.Descriptor()) + agg1.Merge(agg2, descriptor) all.Sort() diff --git a/sdk/metric/aggregator/test/test.go b/sdk/metric/aggregator/test/test.go index 32ababb7a8e..84b447a43f4 100644 --- a/sdk/metric/aggregator/test/test.go +++ b/sdk/metric/aggregator/test/test.go @@ -23,8 +23,6 @@ import ( export "go.opentelemetry.io/otel/sdk/export/metric" ) -var _ export.Identifier = &metricRecord{} - const Magnitude = 1000 type Profile struct { @@ -50,25 +48,9 @@ func newProfiles() []Profile { } } -type metricRecord struct { - descriptor *export.Descriptor -} - -func NewAggregatorTest(mkind export.MetricKind, nkind core.NumberKind, alternate bool) export.Identifier { +func NewAggregatorTest(mkind export.MetricKind, nkind core.NumberKind, alternate bool) *export.Descriptor { desc := export.NewDescriptor("test.name", mkind, nil, "", "", nkind, alternate) - return &metricRecord{descriptor: desc} -} - -func (t *metricRecord) Descriptor() *export.Descriptor { - return t.descriptor -} - -func (t *metricRecord) Labels() []core.KeyValue { - return nil -} - -func (t *metricRecord) EncodedLabels() string { - return "" + return desc } func RunProfiles(t *testing.T, f func(*testing.T, Profile)) { diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys.go b/sdk/metric/batcher/defaultkeys/defaultkeys.go index 586edce1a42..a0bf3407b5d 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys.go @@ -58,12 +58,11 @@ func New(selector export.AggregationSelector, lencoder export.LabelEncoder, stat } } -func (b *Batcher) AggregatorFor(ident export.Identifier) export.Aggregator { - return b.selector.AggregatorFor(ident) +func (b *Batcher) AggregatorFor(descriptor *export.Descriptor) export.Aggregator { + return b.selector.AggregatorFor(descriptor) } -func (b *Batcher) Process(_ context.Context, ident export.Identifier, agg export.Aggregator) { - desc := ident.Descriptor() +func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels []core.KeyValue, _ string, agg export.Aggregator) { keys := desc.Keys() // Cache the mapping from Descriptor->Key->Index @@ -89,7 +88,7 @@ func (b *Batcher) Process(_ context.Context, ident export.Identifier, agg export // Note also the possibility to speed this computation of // "encoded" via "canon" in the form of a (Descriptor, // LabelSet)->(Labels, Encoded) cache. - for _, kv := range ident.Labels() { + for _, kv := range labels { pos, ok := ki[kv.Key] if !ok { continue diff --git a/sdk/metric/batcher/ungrouped/ungrouped.go b/sdk/metric/batcher/ungrouped/ungrouped.go index 210b50e667e..8fc3f543fdd 100644 --- a/sdk/metric/batcher/ungrouped/ungrouped.go +++ b/sdk/metric/batcher/ungrouped/ungrouped.go @@ -52,21 +52,20 @@ func New(selector export.AggregationSelector, stateful bool) *Batcher { } } -func (b *Batcher) AggregatorFor(ident export.Identifier) export.Aggregator { - return b.selector.AggregatorFor(ident) +func (b *Batcher) AggregatorFor(descriptor *export.Descriptor) export.Aggregator { + return b.selector.AggregatorFor(descriptor) } -func (b *Batcher) Process(_ context.Context, ident export.Identifier, agg export.Aggregator) { - desc := ident.Descriptor() +func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels []core.KeyValue, encodedLabels string, agg export.Aggregator) { key := batchKey{ descriptor: desc, - encoded: ident.EncodedLabels(), + encoded: encodedLabels, } value, ok := b.batchMap[key] if !ok { b.batchMap[key] = batchValue{ aggregator: agg, - labels: ident.Labels(), + labels: labels, } } else { value.aggregator.Merge(agg, desc) diff --git a/sdk/metric/benchmark_test.go b/sdk/metric/benchmark_test.go index 0c538625d3f..14f2a5c22a5 100644 --- a/sdk/metric/benchmark_test.go +++ b/sdk/metric/benchmark_test.go @@ -45,8 +45,7 @@ func newFixture(b *testing.B) *benchFixture { return bf } -func (bf *benchFixture) AggregatorFor(ident export.Identifier) export.Aggregator { - descriptor := ident.Descriptor() +func (bf *benchFixture) AggregatorFor(descriptor *export.Descriptor) export.Aggregator { switch descriptor.MetricKind() { case export.CounterKind: return counter.New() @@ -64,7 +63,7 @@ func (bf *benchFixture) AggregatorFor(ident export.Identifier) export.Aggregator return nil } -func (bf *benchFixture) Process(context.Context, export.Identifier, export.Aggregator) { +func (bf *benchFixture) Process(context.Context, *export.Descriptor, []core.KeyValue, string, export.Aggregator) { } func (bf *benchFixture) ReadCheckpoint() export.Producer { diff --git a/sdk/metric/monotone_test.go b/sdk/metric/monotone_test.go index b80c08c6763..0142a0a4e58 100644 --- a/sdk/metric/monotone_test.go +++ b/sdk/metric/monotone_test.go @@ -37,7 +37,7 @@ type monotoneBatcher struct { currentTime *time.Time } -func (m *monotoneBatcher) AggregatorFor(export.Identifier) export.Aggregator { +func (m *monotoneBatcher) AggregatorFor(*export.Descriptor) export.Aggregator { return gauge.New() } @@ -45,11 +45,11 @@ func (m *monotoneBatcher) ReadCheckpoint() export.Producer { return nil } -func (m *monotoneBatcher) Process(_ context.Context, ident export.Identifier, agg export.Aggregator) { - require.Equal(m.t, "my.gauge.name", ident.Descriptor().Name()) - require.Equal(m.t, 1, len(ident.Labels())) - require.Equal(m.t, "a", string(ident.Labels()[0].Key)) - require.Equal(m.t, "b", ident.Labels()[0].Value.Emit()) +func (m *monotoneBatcher) Process(_ context.Context, desc *export.Descriptor, labels []core.KeyValue, _ string, agg export.Aggregator) { + require.Equal(m.t, "my.gauge.name", desc.Name()) + require.Equal(m.t, 1, len(labels)) + require.Equal(m.t, "a", string(labels[0].Key)) + require.Equal(m.t, "b", labels[0].Value.Emit()) gauge := agg.(*gauge.Aggregator) val := gauge.LastValue() diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 5cbe3bbed46..a0245907202 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -144,7 +144,6 @@ var ( _ api.LabelSet = &labels{} _ api.InstrumentImpl = &instrument{} _ api.HandleImpl = &record{} - _ export.Identifier = &record{} // hazardRecord is used as a pointer value that indicates the // value is not included in any list. (`nil` would be @@ -181,7 +180,8 @@ func (i *instrument) acquireHandle(ls *labels) *record { atomic.AddInt64(&rec.refcount, 1) return rec } - rec.recorder = i.meter.batcher.AggregatorFor(rec) + // TODO: Fix the race here + rec.recorder = i.meter.batcher.AggregatorFor(rec.descriptor) i.meter.addPrimary(rec) return rec @@ -392,8 +392,8 @@ func (m *SDK) Collect(ctx context.Context) { func (m *SDK) checkpoint(ctx context.Context, r *record) { if r.recorder != nil { - r.recorder.Checkpoint(ctx, r) - m.batcher.Process(ctx, r, r.recorder) + r.recorder.Checkpoint(ctx, r.descriptor) + m.batcher.Process(ctx, r.descriptor, r.labels.sorted, r.labels.encoded, r.recorder) } } @@ -419,7 +419,7 @@ func (l *labels) Meter() api.Meter { func (r *record) RecordOne(ctx context.Context, number core.Number) { if r.recorder != nil { - r.recorder.Update(ctx, number, r) + r.recorder.Update(ctx, number, r.descriptor) } } @@ -455,15 +455,3 @@ func (r *record) mapkey() mapkey { encoded: r.labels.encoded, } } - -func (r *record) Descriptor() *export.Descriptor { - return r.descriptor -} - -func (r *record) Labels() []core.KeyValue { - return r.labels.sorted -} - -func (r *record) EncodedLabels() string { - return r.labels.encoded -} diff --git a/sdk/metric/selector/simple/simple.go b/sdk/metric/selector/simple/simple.go index e11f1d0817b..4dd5d0526a3 100644 --- a/sdk/metric/selector/simple/simple.go +++ b/sdk/metric/selector/simple/simple.go @@ -29,8 +29,8 @@ func New() export.AggregationSelector { return selector{} } -func (s selector) AggregatorFor(ident export.Identifier) export.Aggregator { - switch ident.Descriptor().MetricKind() { +func (s selector) AggregatorFor(descriptor *export.Descriptor) export.Aggregator { + switch descriptor.MetricKind() { case export.GaugeKind: return gauge.New() case export.MeasureKind: diff --git a/sdk/metric/stress_test.go b/sdk/metric/stress_test.go index 5e96caf1543..82a45f57717 100644 --- a/sdk/metric/stress_test.go +++ b/sdk/metric/stress_test.go @@ -227,8 +227,8 @@ func (f *testFixture) preCollect() { f.dupCheck = map[testKey]int{} } -func (f *testFixture) AggregatorFor(ident export.Identifier) export.Aggregator { - switch ident.Descriptor().MetricKind() { +func (f *testFixture) AggregatorFor(descriptor *export.Descriptor) export.Aggregator { + switch descriptor.MetricKind() { case export.CounterKind: return counter.New() case export.GaugeKind: @@ -242,10 +242,9 @@ func (f *testFixture) ReadCheckpoint() export.Producer { return nil } -func (f *testFixture) Process(ctx context.Context, ident export.Identifier, agg export.Aggregator) { - desc := ident.Descriptor() +func (f *testFixture) Process(ctx context.Context, desc *export.Descriptor, labels []core.KeyValue, _ string, agg export.Aggregator) { key := testKey{ - labels: canonicalizeLabels(ident.Labels()), + labels: canonicalizeLabels(labels), descriptor: desc, } if f.dupCheck[key] == 0 { From 048c8d9dd9bbe907d1fb8f73d5b33c22ad384501 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 6 Nov 2019 00:03:02 -0800 Subject: [PATCH 26/73] Checkpoint --- sdk/metric/doc.go | 70 ++++++++++++++++++++++++++++++++++++----------- 1 file changed, 54 insertions(+), 16 deletions(-) diff --git a/sdk/metric/doc.go b/sdk/metric/doc.go index a9424f377db..08e72b4db83 100644 --- a/sdk/metric/doc.go +++ b/sdk/metric/doc.go @@ -89,29 +89,67 @@ aggregator of the same type. Export Pipeline -While the SDK serves to maintain a current set of records and coordinate -collection, the behavior of a metrics export pipeline is configured -through the export types in go.opentelemetry.io/otel/sdk/export/metric. +While the SDK serves to maintain a current set of records and +coordinate collection, the behavior of a metrics export pipeline is +configured through the export types in +go.opentelemetry.io/otel/sdk/export/metric. It is important to keep +in mind the context these interfaces are called from. There are two +contexts, instrumentation context, where a user-level goroutine that +enters the SDK resulting in a new record, and collection context, +where a system-level thread performs a collection pass through the +SDK. + +Descriptor is a struct that describes the metric instrument to the +export pipeline, containing the name, recommended aggregation keys, +units, description, metric kind (counter, gauge, or measure), number +kind (int64 or float64), and whether the instrument has alternate +semantics or not (i.e., monotonic=false counter, monotonic=true gauge, +absolute=false measure). A Descriptor accompanies metric data as it +passes through the export pipeline. + +The AggregationSelector interface supports choosing the method of +aggregation to apply to a particular instrument. Given the +Descriptor, this AggregatorFor method returns an implementation of +Aggregator. If this interface returns nil, the metric will be +disabled. The aggregator should be matched to the capabilities of the +exporter. Selecting the aggregator for counter and gauge instruments +is relatively straightforward, but for measure instruments there are +numerous choices with different cost and quality tradeoffs. + +Aggregator is an interface which implements a concrete strategy for +aggregating metric updates. Several Aggregator implementations are +provided by the SDK. Aggregators may be lock-free or use locking, +depending on their structure and semantics. Aggregators implement an +Update method, called in instrumentation context, to receive a single +metric event. Aggregators implement a Checkpoint method, called in +collection context, to save a checkpoint of the current state. +Aggregators implement a Merge method, also called in collection +context, that combines state from two aggregators into one. Each SDK +record has an associated aggregator. + +Batcher -AggregationSelector LabelEncoder -They are briefly summarized here: +Producer -Aggregator +ProducedRecord +Exporter -AggregationSelector: decides which aggregator to use -Batcher: determine the aggregation dimensions, group (and de-dup) records -Descriptor: summarizes an instrument and its metadata -Record: interface to the SDK-internal record -LabelEncoder: defines a unique mapping from label set to encoded string -Producer: interface to the batcher's checkpoint -ProducedRecord: result of the batcher's grouping -Exporter: output produced records to their final destination +Controller + +metric.MeterProvider + + +TODO: think about name for Producer/ProducedRecord +TODO: factor this sort of test out + + if !desc.Alternate() && number.IsNegative(kind) { + // TODO warn + return + } -One final type, a Controller, implements the metric.MeterProvider -interface and is responsible for initiating collection. */ package metric // import "go.opentelemetry.io/otel/sdk/metric" From 657c064848484a47be68541780a38ac3c4e2ceaf Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 6 Nov 2019 04:46:58 -0800 Subject: [PATCH 27/73] Propagate errors to the SDK, remove a bunch of 'TODO warn' --- sdk/export/metric/metric.go | 6 ++-- sdk/metric/aggregator/array/array.go | 22 +++---------- sdk/metric/aggregator/array/array_test.go | 20 +++++------ sdk/metric/aggregator/counter/counter.go | 18 ++++------ sdk/metric/aggregator/counter/counter_test.go | 14 ++++---- sdk/metric/aggregator/ddsketch/ddsketch.go | 18 ++++------ .../aggregator/ddsketch/ddsketch_test.go | 12 +++---- sdk/metric/aggregator/errors.go | 33 +++++++++++++++++-- sdk/metric/aggregator/gauge/gauge.go | 26 +++++++-------- sdk/metric/aggregator/gauge/gauge_test.go | 16 ++++----- sdk/metric/aggregator/maxsumcount/msc.go | 15 ++++----- sdk/metric/aggregator/maxsumcount/msc_test.go | 6 ++-- sdk/metric/aggregator/test/test.go | 9 +++++ sdk/metric/batcher/defaultkeys/defaultkeys.go | 6 ++-- sdk/metric/batcher/ungrouped/ungrouped.go | 6 ++-- sdk/metric/benchmark_test.go | 3 +- sdk/metric/monotone_test.go | 3 +- sdk/metric/sdk.go | 26 +++++++++++---- sdk/metric/stress_test.go | 3 +- 19 files changed, 145 insertions(+), 117 deletions(-) diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 122b140e381..0b7aa3cbae9 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -41,7 +41,7 @@ type Batcher interface { descriptor *Descriptor, labels []core.KeyValue, encodedLabels string, - aggregator Aggregator) + aggregator Aggregator) error // ReadCheckpoint is the interface used by exporters to access // aggregate checkpoints after collection. @@ -68,7 +68,7 @@ type Aggregator interface { // Update receives a new measured value and incorporates it // into the aggregation. Update() calls may arrive // concurrently. - Update(context.Context, core.Number, *Descriptor) + Update(context.Context, core.Number, *Descriptor) error // Checkpoint is called during the SDK Collect() to finish one // period of aggregation. Checkpoint() is called in a @@ -76,7 +76,7 @@ type Aggregator interface { Checkpoint(context.Context, *Descriptor) // Merge combines state from two aggregators into one. - Merge(Aggregator, *Descriptor) + Merge(Aggregator, *Descriptor) error } // Exporter handles presentation of the checkpoint of aggregate diff --git a/sdk/metric/aggregator/array/array.go b/sdk/metric/aggregator/array/array.go index 7481563c36d..ab5d3e0c3a7 100644 --- a/sdk/metric/aggregator/array/array.go +++ b/sdk/metric/aggregator/array/array.go @@ -84,34 +84,22 @@ func (c *Aggregator) Checkpoint(ctx context.Context, desc *export.Descriptor) { } } -func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) { - kind := desc.NumberKind() - - if kind == core.Float64NumberKind && math.IsNaN(number.AsFloat64()) { - // TODO warn - // NOTE: add this to the specification. - return - } - - if !desc.Alternate() && number.IsNegative(kind) { - // TODO warn - return - } - +func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) error { c.lock.Lock() c.current = append(c.current, number) c.lock.Unlock() + return nil } -func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) { +func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { - // TODO warn - return + return aggregator.ErrInconsistentType } c.ckptSum.AddNumber(desc.NumberKind(), o.ckptSum) c.checkpoint = combine(c.checkpoint, o.checkpoint, desc.NumberKind()) + return nil } func (c *Aggregator) sort(kind core.NumberKind) { diff --git a/sdk/metric/aggregator/array/array_test.go b/sdk/metric/aggregator/array/array_test.go index 8833143ffeb..351af2ec346 100644 --- a/sdk/metric/aggregator/array/array_test.go +++ b/sdk/metric/aggregator/array/array_test.go @@ -45,12 +45,12 @@ func (ut *updateTest) run(t *testing.T, profile test.Profile) { for i := 0; i < ut.count; i++ { x := profile.Random(+1) all.Append(x) - agg.Update(ctx, x, descriptor) + test.CheckedUpdate(ctx, agg, x, descriptor) if !ut.absolute { y := profile.Random(-1) all.Append(y) - agg.Update(ctx, y, descriptor) + test.CheckedUpdate(ctx, agg, y, descriptor) } } @@ -116,20 +116,20 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { for i := 0; i < mt.count; i++ { x1 := profile.Random(+1) all.Append(x1) - agg1.Update(ctx, x1, descriptor) + test.CheckedUpdate(ctx, agg1, x1, descriptor) x2 := profile.Random(+1) all.Append(x2) - agg2.Update(ctx, x2, descriptor) + test.CheckedUpdate(ctx, agg2, x2, descriptor) if !mt.absolute { y1 := profile.Random(-1) all.Append(y1) - agg1.Update(ctx, y1, descriptor) + test.CheckedUpdate(ctx, agg1, y1, descriptor) y2 := profile.Random(-1) all.Append(y2) - agg2.Update(ctx, y2, descriptor) + test.CheckedUpdate(ctx, agg2, y2, descriptor) } } @@ -200,10 +200,10 @@ func TestArrayErrors(t *testing.T) { descriptor := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, false) - agg.Update(ctx, core.Number(0), descriptor) + test.CheckedUpdate(ctx, agg, core.Number(0), descriptor) if profile.NumberKind == core.Float64NumberKind { - agg.Update(ctx, core.NewFloat64Number(math.NaN()), descriptor) + test.CheckedUpdate(ctx, agg, core.NewFloat64Number(math.NaN()), descriptor) } agg.Checkpoint(ctx, descriptor) @@ -263,13 +263,13 @@ func TestArrayFloat64(t *testing.T) { for _, f := range fpsf(1) { all.Append(core.NewFloat64Number(f)) - agg.Update(ctx, core.NewFloat64Number(f), descriptor) + test.CheckedUpdate(ctx, agg, core.NewFloat64Number(f), descriptor) } if !absolute { for _, f := range fpsf(-1) { all.Append(core.NewFloat64Number(f)) - agg.Update(ctx, core.NewFloat64Number(f), descriptor) + test.CheckedUpdate(ctx, agg, core.NewFloat64Number(f), descriptor) } } diff --git a/sdk/metric/aggregator/counter/counter.go b/sdk/metric/aggregator/counter/counter.go index 410748e1462..f457e2a8278 100644 --- a/sdk/metric/aggregator/counter/counter.go +++ b/sdk/metric/aggregator/counter/counter.go @@ -19,6 +19,7 @@ import ( "go.opentelemetry.io/otel/api/core" export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator" ) // Aggregator aggregates counter events. @@ -49,21 +50,16 @@ func (c *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) { } // Update modifies the current value (atomically) for later export. -func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) { - kind := desc.NumberKind() - if !desc.Alternate() && number.IsNegative(kind) { - // TODO warn - return - } - - c.current.AddNumberAtomic(kind, number) +func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) error { + c.current.AddNumberAtomic(desc.NumberKind(), number) + return nil } -func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) { +func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { - // TODO warn - return + return aggregator.ErrInconsistentType } c.checkpoint.AddNumber(desc.NumberKind(), o.checkpoint) + return nil } diff --git a/sdk/metric/aggregator/counter/counter_test.go b/sdk/metric/aggregator/counter/counter_test.go index d1a1b74e938..6d0dd189a6e 100644 --- a/sdk/metric/aggregator/counter/counter_test.go +++ b/sdk/metric/aggregator/counter/counter_test.go @@ -39,7 +39,7 @@ func TestCounterMonotonic(t *testing.T) { for i := 0; i < count; i++ { x := profile.Random(+1) sum.AddNumber(profile.NumberKind, x) - agg.Update(ctx, x, descriptor) + test.CheckedUpdate(ctx, agg, x, descriptor) } agg.Checkpoint(ctx, descriptor) @@ -57,11 +57,11 @@ func TestCounterMonotonicNegative(t *testing.T) { descriptor := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false) for i := 0; i < count; i++ { - agg.Update(ctx, profile.Random(-1), descriptor) + test.CheckedUpdate(ctx, agg, profile.Random(-1), descriptor) } sum := profile.Random(+1) - agg.Update(ctx, sum, descriptor) + test.CheckedUpdate(ctx, agg, sum, descriptor) agg.Checkpoint(ctx, descriptor) require.Equal(t, sum, agg.Sum(), "Same sum - monotonic") @@ -82,8 +82,8 @@ func TestCounterNonMonotonic(t *testing.T) { y := profile.Random(-1) sum.AddNumber(profile.NumberKind, x) sum.AddNumber(profile.NumberKind, y) - agg.Update(ctx, x, descriptor) - agg.Update(ctx, y, descriptor) + test.CheckedUpdate(ctx, agg, x, descriptor) + test.CheckedUpdate(ctx, agg, y, descriptor) } agg.Checkpoint(ctx, descriptor) @@ -105,8 +105,8 @@ func TestCounterMerge(t *testing.T) { for i := 0; i < count; i++ { x := profile.Random(+1) sum.AddNumber(profile.NumberKind, x) - agg1.Update(ctx, x, descriptor) - agg2.Update(ctx, x, descriptor) + test.CheckedUpdate(ctx, agg1, x, descriptor) + test.CheckedUpdate(ctx, agg2, x, descriptor) } agg1.Checkpoint(ctx, descriptor) diff --git a/sdk/metric/aggregator/ddsketch/ddsketch.go b/sdk/metric/aggregator/ddsketch/ddsketch.go index 0ca70a0957f..31296415279 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch.go @@ -102,25 +102,19 @@ func (c *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) { } // Update modifies the current value (atomically) for later export. -func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) { - kind := desc.NumberKind() - - if !desc.Alternate() && number.IsNegative(kind) { - // TODO warn - return - } - +func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) error { c.lock.Lock() defer c.lock.Unlock() - c.current.Add(number.CoerceToFloat64(kind)) + c.current.Add(number.CoerceToFloat64(desc.NumberKind())) + return nil } -func (c *Aggregator) Merge(oa export.Aggregator, d *export.Descriptor) { +func (c *Aggregator) Merge(oa export.Aggregator, d *export.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { - // TODO warn - return + return aggregator.ErrInconsistentType } c.checkpoint.Merge(o.checkpoint) + return nil } diff --git a/sdk/metric/aggregator/ddsketch/ddsketch_test.go b/sdk/metric/aggregator/ddsketch/ddsketch_test.go index fd7f2f63965..3364e0d69c6 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch_test.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch_test.go @@ -41,12 +41,12 @@ func (ut *updateTest) run(t *testing.T, profile test.Profile) { for i := 0; i < count; i++ { x := profile.Random(+1) all.Append(x) - agg.Update(ctx, x, descriptor) + test.CheckedUpdate(ctx, agg, x, descriptor) if !ut.absolute { y := profile.Random(-1) all.Append(y) - agg.Update(ctx, y, descriptor) + test.CheckedUpdate(ctx, agg, y, descriptor) } } @@ -105,24 +105,24 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { for i := 0; i < count; i++ { x := profile.Random(+1) all.Append(x) - agg1.Update(ctx, x, descriptor) + test.CheckedUpdate(ctx, agg1, x, descriptor) if !mt.absolute { y := profile.Random(-1) all.Append(y) - agg1.Update(ctx, y, descriptor) + test.CheckedUpdate(ctx, agg1, y, descriptor) } } for i := 0; i < count; i++ { x := profile.Random(+1) all.Append(x) - agg2.Update(ctx, x, descriptor) + test.CheckedUpdate(ctx, agg2, x, descriptor) if !mt.absolute { y := profile.Random(-1) all.Append(y) - agg2.Update(ctx, y, descriptor) + test.CheckedUpdate(ctx, agg2, y, descriptor) } } diff --git a/sdk/metric/aggregator/errors.go b/sdk/metric/aggregator/errors.go index 5ecffda666b..dd24c5631b2 100644 --- a/sdk/metric/aggregator/errors.go +++ b/sdk/metric/aggregator/errors.go @@ -14,9 +14,36 @@ package aggregator -import "fmt" +import ( + "fmt" + "math" + + "go.opentelemetry.io/otel/api/core" + export "go.opentelemetry.io/otel/sdk/export/metric" +) var ( - ErrEmptyDataSet = fmt.Errorf("The result is not defined on an empty data set") - ErrInvalidQuantile = fmt.Errorf("The requested quantile is out of range") + ErrEmptyDataSet = fmt.Errorf("The result is not defined on an empty data set") + ErrInvalidQuantile = fmt.Errorf("The requested quantile is out of range") + ErrNegativeInput = fmt.Errorf("Negative value is out of range for this instrumentr") + ErrNaNInput = fmt.Errorf("NaN value is an invalid input") + ErrNonMonotoneInput = fmt.Errorf("The new value is not monotone") + ErrInconsistentType = fmt.Errorf("Cannot merge different aggregator types") ) + +func RangeTest(number core.Number, descriptor *export.Descriptor) error { + numberKind := descriptor.NumberKind() + + if numberKind == core.Float64NumberKind && math.IsNaN(number.AsFloat64()) { + // NOTE: add this to the specification. + return ErrNaNInput + } + + switch descriptor.MetricKind() { + case export.CounterKind, export.MeasureKind: + if !descriptor.Alternate() && number.IsNegative(numberKind) { + return ErrNegativeInput + } + } + return nil +} diff --git a/sdk/metric/aggregator/gauge/gauge.go b/sdk/metric/aggregator/gauge/gauge.go index adf60b6b579..5f9e3c117ec 100644 --- a/sdk/metric/aggregator/gauge/gauge.go +++ b/sdk/metric/aggregator/gauge/gauge.go @@ -22,6 +22,7 @@ import ( "go.opentelemetry.io/otel/api/core" export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator" ) // Note: This aggregator enforces the behavior of monotonic gauges to @@ -84,12 +85,12 @@ func (g *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) { } // Update modifies the current value (atomically) for later export. -func (g *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) { +func (g *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) error { if !desc.Alternate() { g.updateNonMonotonic(number) - } else { - g.updateMonotonic(number, desc) + return nil } + return g.updateMonotonic(number, desc) } func (g *Aggregator) updateNonMonotonic(number core.Number) { @@ -100,7 +101,7 @@ func (g *Aggregator) updateNonMonotonic(number core.Number) { atomic.StorePointer(&g.current, unsafe.Pointer(ngd)) } -func (g *Aggregator) updateMonotonic(number core.Number, desc *export.Descriptor) { +func (g *Aggregator) updateMonotonic(number core.Number, desc *export.Descriptor) error { ngd := &gaugeData{ timestamp: time.Now(), value: number, @@ -111,21 +112,19 @@ func (g *Aggregator) updateMonotonic(number core.Number, desc *export.Descriptor gd := (*gaugeData)(atomic.LoadPointer(&g.current)) if gd.value.CompareNumber(kind, number) > 0 { - // TODO warn - return + return aggregator.ErrNonMonotoneInput } if atomic.CompareAndSwapPointer(&g.current, unsafe.Pointer(gd), unsafe.Pointer(ngd)) { - return + return nil } } } -func (g *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) { +func (g *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { - // TODO warn - return + return aggregator.ErrInconsistentType } ggd := (*gaugeData)(atomic.LoadPointer(&g.checkpoint)) @@ -136,18 +135,19 @@ func (g *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) { cmp := ggd.value.CompareNumber(desc.NumberKind(), ogd.value) if cmp > 0 { - return + return nil } if cmp < 0 { g.checkpoint = unsafe.Pointer(ogd) - return + return nil } } // Non-monotonic gauge or equal values if ggd.timestamp.After(ogd.timestamp) { - return + return nil } g.checkpoint = unsafe.Pointer(ogd) + return nil } diff --git a/sdk/metric/aggregator/gauge/gauge_test.go b/sdk/metric/aggregator/gauge/gauge_test.go index 24b9290aad2..7815896bd97 100644 --- a/sdk/metric/aggregator/gauge/gauge_test.go +++ b/sdk/metric/aggregator/gauge/gauge_test.go @@ -42,7 +42,7 @@ func TestGaugeNonMonotonic(t *testing.T) { for i := 0; i < count; i++ { x := profile.Random(rand.Intn(1)*2 - 1) last = x - agg.Update(ctx, x, record) + test.CheckedUpdate(ctx, agg, x, record) } agg.Checkpoint(ctx, record) @@ -64,7 +64,7 @@ func TestGaugeMonotonic(t *testing.T) { for i := 0; i < count; i++ { x := profile.Random(+1) last.AddNumber(profile.NumberKind, x) - agg.Update(ctx, last, record) + test.CheckedUpdate(ctx, agg, last, record) } agg.Checkpoint(ctx, record) @@ -82,11 +82,11 @@ func TestGaugeMonotonicDescending(t *testing.T) { record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, true) first := profile.Random(+1) - agg.Update(ctx, first, record) + test.CheckedUpdate(ctx, agg, first, record) for i := 0; i < count; i++ { x := profile.Random(-1) - agg.Update(ctx, x, record) + test.CheckedUpdate(ctx, agg, x, record) } agg.Checkpoint(ctx, record) @@ -108,8 +108,8 @@ func TestGaugeNormalMerge(t *testing.T) { first2 := profile.Random(+1) first1.AddNumber(profile.NumberKind, first2) - agg1.Update(ctx, first1, descriptor) - agg2.Update(ctx, first2, descriptor) + test.CheckedUpdate(ctx, agg1, first1, descriptor) + test.CheckedUpdate(ctx, agg2, first2, descriptor) agg1.Checkpoint(ctx, descriptor) agg2.Checkpoint(ctx, descriptor) @@ -135,11 +135,11 @@ func TestGaugeMonotonicMerge(t *testing.T) { descriptor := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, true) first1 := profile.Random(+1) - agg1.Update(ctx, first1, descriptor) + test.CheckedUpdate(ctx, agg1, first1, descriptor) first2 := profile.Random(+1) first2.AddNumber(profile.NumberKind, first1) - agg2.Update(ctx, first2, descriptor) + test.CheckedUpdate(ctx, agg2, first2, descriptor) agg1.Checkpoint(ctx, descriptor) agg2.Checkpoint(ctx, descriptor) diff --git a/sdk/metric/aggregator/maxsumcount/msc.go b/sdk/metric/aggregator/maxsumcount/msc.go index 2cc194296da..ba90d6f8a12 100644 --- a/sdk/metric/aggregator/maxsumcount/msc.go +++ b/sdk/metric/aggregator/maxsumcount/msc.go @@ -19,6 +19,7 @@ import ( "go.opentelemetry.io/otel/api/core" export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator" ) type ( @@ -76,14 +77,9 @@ func (c *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) { } // Update modifies the current value (atomically) for later export. -func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) { +func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) error { kind := desc.NumberKind() - if !desc.Alternate() && number.IsNegative(kind) { - // TODO warn - return - } - c.current.count.AddUint64Atomic(1) c.current.sum.AddNumberAtomic(kind, number) @@ -97,13 +93,13 @@ func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export. break } } + return nil } -func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) { +func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { - // TODO warn - return + return aggregator.ErrInconsistentType } c.checkpoint.sum.AddNumber(desc.NumberKind(), o.checkpoint.sum) @@ -112,4 +108,5 @@ func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) { if c.checkpoint.max.CompareNumber(desc.NumberKind(), o.checkpoint.max) < 0 { c.checkpoint.max.SetNumber(o.checkpoint.max) } + return nil } diff --git a/sdk/metric/aggregator/maxsumcount/msc_test.go b/sdk/metric/aggregator/maxsumcount/msc_test.go index 105ca675de0..77d560cdb1a 100644 --- a/sdk/metric/aggregator/maxsumcount/msc_test.go +++ b/sdk/metric/aggregator/maxsumcount/msc_test.go @@ -39,7 +39,7 @@ func TestMaxSumCountAbsolute(t *testing.T) { for i := 0; i < count; i++ { x := profile.Random(+1) all.Append(x) - agg.Update(ctx, x, record) + test.CheckedUpdate(ctx, agg, x, record) } agg.Checkpoint(ctx, record) @@ -76,12 +76,12 @@ func TestMaxSumCountMerge(t *testing.T) { for i := 0; i < count; i++ { x := profile.Random(+1) all.Append(x) - agg1.Update(ctx, x, descriptor) + test.CheckedUpdate(ctx, agg1, x, descriptor) } for i := 0; i < count; i++ { x := profile.Random(+1) all.Append(x) - agg2.Update(ctx, x, descriptor) + test.CheckedUpdate(ctx, agg2, x, descriptor) } agg1.Checkpoint(ctx, descriptor) diff --git a/sdk/metric/aggregator/test/test.go b/sdk/metric/aggregator/test/test.go index 84b447a43f4..68db76f8263 100644 --- a/sdk/metric/aggregator/test/test.go +++ b/sdk/metric/aggregator/test/test.go @@ -15,12 +15,14 @@ package test import ( + "context" "math/rand" "sort" "testing" "go.opentelemetry.io/otel/api/core" export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator" ) const Magnitude = 1000 @@ -121,3 +123,10 @@ func (n *Numbers) Median() core.Number { // specified quantile. return n.numbers[len(n.numbers)/2] } + +// Performs the same range test the SDK does on behalf of the aggregator. +func CheckedUpdate(ctx context.Context, agg export.Aggregator, number core.Number, descriptor *export.Descriptor) { + if err := aggregator.RangeTest(number, descriptor); err == nil { + agg.Update(ctx, number, descriptor) + } +} diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys.go b/sdk/metric/batcher/defaultkeys/defaultkeys.go index a0bf3407b5d..f36a1b48cf8 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys.go @@ -62,7 +62,7 @@ func (b *Batcher) AggregatorFor(descriptor *export.Descriptor) export.Aggregator return b.selector.AggregatorFor(descriptor) } -func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels []core.KeyValue, _ string, agg export.Aggregator) { +func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels []core.KeyValue, _ string, agg export.Aggregator) error { keys := desc.Keys() // Cache the mapping from Descriptor->Key->Index @@ -107,9 +107,9 @@ func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels []c labels: canon, descriptor: desc, } - } else { - rag.aggregator.Merge(agg, desc) + return nil } + return rag.aggregator.Merge(agg, desc) } func (b *Batcher) ReadCheckpoint() export.Producer { diff --git a/sdk/metric/batcher/ungrouped/ungrouped.go b/sdk/metric/batcher/ungrouped/ungrouped.go index 8fc3f543fdd..995cfc3eb79 100644 --- a/sdk/metric/batcher/ungrouped/ungrouped.go +++ b/sdk/metric/batcher/ungrouped/ungrouped.go @@ -56,7 +56,7 @@ func (b *Batcher) AggregatorFor(descriptor *export.Descriptor) export.Aggregator return b.selector.AggregatorFor(descriptor) } -func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels []core.KeyValue, encodedLabels string, agg export.Aggregator) { +func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels []core.KeyValue, encodedLabels string, agg export.Aggregator) error { key := batchKey{ descriptor: desc, encoded: encodedLabels, @@ -67,9 +67,9 @@ func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels []c aggregator: agg, labels: labels, } - } else { - value.aggregator.Merge(agg, desc) + return nil } + return value.aggregator.Merge(agg, desc) } func (b *Batcher) ReadCheckpoint() export.Producer { diff --git a/sdk/metric/benchmark_test.go b/sdk/metric/benchmark_test.go index 14f2a5c22a5..35622773ff0 100644 --- a/sdk/metric/benchmark_test.go +++ b/sdk/metric/benchmark_test.go @@ -63,7 +63,8 @@ func (bf *benchFixture) AggregatorFor(descriptor *export.Descriptor) export.Aggr return nil } -func (bf *benchFixture) Process(context.Context, *export.Descriptor, []core.KeyValue, string, export.Aggregator) { +func (bf *benchFixture) Process(context.Context, *export.Descriptor, []core.KeyValue, string, export.Aggregator) error { + return nil } func (bf *benchFixture) ReadCheckpoint() export.Producer { diff --git a/sdk/metric/monotone_test.go b/sdk/metric/monotone_test.go index 0142a0a4e58..2c18cf8b3ca 100644 --- a/sdk/metric/monotone_test.go +++ b/sdk/metric/monotone_test.go @@ -45,7 +45,7 @@ func (m *monotoneBatcher) ReadCheckpoint() export.Producer { return nil } -func (m *monotoneBatcher) Process(_ context.Context, desc *export.Descriptor, labels []core.KeyValue, _ string, agg export.Aggregator) { +func (m *monotoneBatcher) Process(_ context.Context, desc *export.Descriptor, labels []core.KeyValue, _ string, agg export.Aggregator) error { require.Equal(m.t, "my.gauge.name", desc.Name()) require.Equal(m.t, 1, len(labels)) require.Equal(m.t, "a", string(labels[0].Key)) @@ -58,6 +58,7 @@ func (m *monotoneBatcher) Process(_ context.Context, desc *export.Descriptor, la m.currentValue = &val m.currentTime = &ts m.collections++ + return nil } func TestMonotoneGauge(t *testing.T) { diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index a0245907202..c7a35b34174 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -25,6 +25,7 @@ import ( "go.opentelemetry.io/otel/api/metric" api "go.opentelemetry.io/otel/api/metric" export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator" ) type ( @@ -180,7 +181,6 @@ func (i *instrument) acquireHandle(ls *labels) *record { atomic.AddInt64(&rec.refcount, 1) return rec } - // TODO: Fix the race here rec.recorder = i.meter.batcher.AggregatorFor(rec.descriptor) i.meter.addPrimary(rec) @@ -391,9 +391,14 @@ func (m *SDK) Collect(ctx context.Context) { } func (m *SDK) checkpoint(ctx context.Context, r *record) { - if r.recorder != nil { - r.recorder.Checkpoint(ctx, r.descriptor) - m.batcher.Process(ctx, r.descriptor, r.labels.sorted, r.labels.encoded, r.recorder) + if r.recorder == nil { + return + } + r.recorder.Checkpoint(ctx, r.descriptor) + err := m.batcher.Process(ctx, r.descriptor, r.labels.sorted, r.labels.encoded, r.recorder) + + if err != nil { + // TODO warn } } @@ -418,8 +423,17 @@ func (l *labels) Meter() api.Meter { } func (r *record) RecordOne(ctx context.Context, number core.Number) { - if r.recorder != nil { - r.recorder.Update(ctx, number, r.descriptor) + if r.recorder == nil { + // The instrument is disabled according to the AggregationSelector. + return + } + if err := aggregator.RangeTest(number, r.descriptor); err != nil { + // TODO warn + return + } + if err := r.recorder.Update(ctx, number, r.descriptor); err != nil { + // TODO warn + return } } diff --git a/sdk/metric/stress_test.go b/sdk/metric/stress_test.go index 82a45f57717..a9bb1cb05a7 100644 --- a/sdk/metric/stress_test.go +++ b/sdk/metric/stress_test.go @@ -242,7 +242,7 @@ func (f *testFixture) ReadCheckpoint() export.Producer { return nil } -func (f *testFixture) Process(ctx context.Context, desc *export.Descriptor, labels []core.KeyValue, _ string, agg export.Aggregator) { +func (f *testFixture) Process(ctx context.Context, desc *export.Descriptor, labels []core.KeyValue, _ string, agg export.Aggregator) error { key := testKey{ labels: canonicalizeLabels(labels), descriptor: desc, @@ -264,6 +264,7 @@ func (f *testFixture) Process(ctx context.Context, desc *export.Descriptor, labe default: panic("Not used in this test") } + return nil } func stressTest(t *testing.T, impl testImpl) { From 0d78cfa5c65526bd08e8f1e09dfb64a815d94c16 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 6 Nov 2019 11:23:54 -0800 Subject: [PATCH 28/73] Checkpoint --- exporter/metric/stdout/stdout.go | 9 ++-- sdk/export/metric/metric.go | 52 ++++++++++++++++--- sdk/metric/batcher/defaultkeys/defaultkeys.go | 17 +++--- sdk/metric/batcher/ungrouped/ungrouped.go | 16 +++--- sdk/metric/benchmark_test.go | 2 +- sdk/metric/doc.go | 6 --- sdk/metric/monotone_test.go | 2 +- sdk/metric/sdk.go | 2 +- sdk/metric/stress_test.go | 2 +- 9 files changed, 71 insertions(+), 37 deletions(-) diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index f1451cd04bc..2d05eb82172 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -76,9 +76,10 @@ func New(options Options) *Exporter { func (e *Exporter) Export(_ context.Context, producer export.Producer) { var batch expoBatch - producer.Foreach(func(agg export.Aggregator, record export.ProducedRecord) { - desc := record.Descriptor - labels := record.Labels // HERE TODO + producer.Foreach(func(record export.Record) { + desc := record.Descriptor() + labels := record.Labels() + agg := record.Aggregator() var expose expoLine if sum, ok := agg.(aggregator.Sum); ok { @@ -107,7 +108,7 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) { if len(labels) > 0 { sb.WriteRune('{') - sb.WriteString(record.EncodedLabels) + sb.WriteString(record.EncodedLabels()) sb.WriteRune('}') } diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 0b7aa3cbae9..973f6b33bc3 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -41,6 +41,7 @@ type Batcher interface { descriptor *Descriptor, labels []core.KeyValue, encodedLabels string, + labelEncoder LabelEncoder, aggregator Aggregator) error // ReadCheckpoint is the interface used by exporters to access @@ -97,15 +98,52 @@ type LabelEncoder interface { // Producer allows a Exporter to access a checkpoint of // aggregated metrics one at a time. type Producer interface { - Foreach(func(Aggregator, ProducedRecord)) + Foreach(func(Record)) +} + +// Record contains the exported data for a single metric instrument +// and label set. +type Record struct { + aggregator Aggregator + descriptor *Descriptor + labels []core.KeyValue + encoder LabelEncoder + encodedLabels string +} + +// NewRecord allows Batcher implementations to construct export records. +func NewRecord(aggregator Aggregator, + descriptor *Descriptor, + labels []core.KeyValue, + encoder LabelEncoder, + encodedLabels string) Record { + return Record{ + aggregator: aggregator, + descriptor: descriptor, + labels: labels, + encoder: encoder, + encodedLabels: encodedLabels, + } +} + +func (r *Record) Aggregator() Aggregator { + return r.aggregator +} + +func (r *Record) Descriptor() *Descriptor { + return r.descriptor +} + +func (r *Record) Labels() []core.KeyValue { + return r.labels +} + +func (r *Record) LabelEncoder() LabelEncoder { + return r.encoder } -// ProducedRecord -type ProducedRecord struct { - Descriptor *Descriptor - Labels []core.KeyValue - Encoder LabelEncoder - EncodedLabels string +func (r *Record) EncodedLabels() string { + return r.encodedLabels } // Kind describes the kind of instrument. diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys.go b/sdk/metric/batcher/defaultkeys/defaultkeys.go index f36a1b48cf8..fceb8dfdc2d 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys.go @@ -62,7 +62,7 @@ func (b *Batcher) AggregatorFor(descriptor *export.Descriptor) export.Aggregator return b.selector.AggregatorFor(descriptor) } -func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels []core.KeyValue, _ string, agg export.Aggregator) error { +func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels []core.KeyValue, _ string, _ export.LabelEncoder, agg export.Aggregator) error { keys := desc.Keys() // Cache the mapping from Descriptor->Key->Index @@ -123,14 +123,13 @@ func (b *Batcher) ReadCheckpoint() export.Producer { } } -func (p *producer) Foreach(f func(export.Aggregator, export.ProducedRecord)) { +func (p *producer) Foreach(f func(export.Record)) { for encoded, entry := range p.aggMap { - pr := export.ProducedRecord{ - Descriptor: entry.descriptor, - Labels: entry.labels, - Encoder: p.lencoder, - EncodedLabels: encoded, - } - f(entry.aggregator, pr) + f(export.NewRecord(entry.aggregator, + entry.descriptor, + entry.labels, + p.lencoder, + encoded, + )) } } diff --git a/sdk/metric/batcher/ungrouped/ungrouped.go b/sdk/metric/batcher/ungrouped/ungrouped.go index 995cfc3eb79..d2bd8e688c8 100644 --- a/sdk/metric/batcher/ungrouped/ungrouped.go +++ b/sdk/metric/batcher/ungrouped/ungrouped.go @@ -36,6 +36,7 @@ type ( batchValue struct { aggregator export.Aggregator labels []core.KeyValue + lencoder export.LabelEncoder } batchMap map[batchKey]batchValue @@ -56,7 +57,7 @@ func (b *Batcher) AggregatorFor(descriptor *export.Descriptor) export.Aggregator return b.selector.AggregatorFor(descriptor) } -func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels []core.KeyValue, encodedLabels string, agg export.Aggregator) error { +func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels []core.KeyValue, encodedLabels string, labelEncoder export.LabelEncoder, agg export.Aggregator) error { key := batchKey{ descriptor: desc, encoded: encodedLabels, @@ -66,6 +67,7 @@ func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels []c b.batchMap[key] = batchValue{ aggregator: agg, labels: labels, + lencoder: labelEncoder, } return nil } @@ -80,12 +82,12 @@ func (b *Batcher) ReadCheckpoint() export.Producer { return checkpoint } -func (c batchMap) Foreach(f func(export.Aggregator, export.ProducedRecord)) { +func (c batchMap) Foreach(f func(export.Record)) { for key, value := range c { - pr := export.ProducedRecord{ - Descriptor: key.descriptor, - Labels: value.labels, - } - f(value.aggregator, pr) + f(export.NewRecord(value.aggregator, + key.descriptor, + value.labels, + value.lencoder, + key.encoded)) } } diff --git a/sdk/metric/benchmark_test.go b/sdk/metric/benchmark_test.go index 35622773ff0..3cbd5c5c698 100644 --- a/sdk/metric/benchmark_test.go +++ b/sdk/metric/benchmark_test.go @@ -63,7 +63,7 @@ func (bf *benchFixture) AggregatorFor(descriptor *export.Descriptor) export.Aggr return nil } -func (bf *benchFixture) Process(context.Context, *export.Descriptor, []core.KeyValue, string, export.Aggregator) error { +func (bf *benchFixture) Process(context.Context, *export.Descriptor, []core.KeyValue, string, export.LabelEncoder, export.Aggregator) error { return nil } diff --git a/sdk/metric/doc.go b/sdk/metric/doc.go index 08e72b4db83..d83dbc58484 100644 --- a/sdk/metric/doc.go +++ b/sdk/metric/doc.go @@ -143,12 +143,6 @@ metric.MeterProvider TODO: think about name for Producer/ProducedRecord -TODO: factor this sort of test out - - if !desc.Alternate() && number.IsNegative(kind) { - // TODO warn - return - } */ diff --git a/sdk/metric/monotone_test.go b/sdk/metric/monotone_test.go index 2c18cf8b3ca..a4eb62a4bae 100644 --- a/sdk/metric/monotone_test.go +++ b/sdk/metric/monotone_test.go @@ -45,7 +45,7 @@ func (m *monotoneBatcher) ReadCheckpoint() export.Producer { return nil } -func (m *monotoneBatcher) Process(_ context.Context, desc *export.Descriptor, labels []core.KeyValue, _ string, agg export.Aggregator) error { +func (m *monotoneBatcher) Process(_ context.Context, desc *export.Descriptor, labels []core.KeyValue, _ string, _ export.LabelEncoder, agg export.Aggregator) error { require.Equal(m.t, "my.gauge.name", desc.Name()) require.Equal(m.t, 1, len(labels)) require.Equal(m.t, "a", string(labels[0].Key)) diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index c7a35b34174..0adf7d8aa77 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -395,7 +395,7 @@ func (m *SDK) checkpoint(ctx context.Context, r *record) { return } r.recorder.Checkpoint(ctx, r.descriptor) - err := m.batcher.Process(ctx, r.descriptor, r.labels.sorted, r.labels.encoded, r.recorder) + err := m.batcher.Process(ctx, r.descriptor, r.labels.sorted, r.labels.encoded, m.lencoder, r.recorder) if err != nil { // TODO warn diff --git a/sdk/metric/stress_test.go b/sdk/metric/stress_test.go index a9bb1cb05a7..50d2fc31d6e 100644 --- a/sdk/metric/stress_test.go +++ b/sdk/metric/stress_test.go @@ -242,7 +242,7 @@ func (f *testFixture) ReadCheckpoint() export.Producer { return nil } -func (f *testFixture) Process(ctx context.Context, desc *export.Descriptor, labels []core.KeyValue, _ string, agg export.Aggregator) error { +func (f *testFixture) Process(ctx context.Context, desc *export.Descriptor, labels []core.KeyValue, _ string, _ export.LabelEncoder, agg export.Aggregator) error { key := testKey{ labels: canonicalizeLabels(labels), descriptor: desc, From f81aa3445e5bf171901d1cbf8e975fe673a45f11 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 6 Nov 2019 13:25:35 -0800 Subject: [PATCH 29/73] Introduce export.Labels --- exporter/metric/stdout/stdout.go | 4 +- sdk/export/metric/metric.go | 66 +++++++++++-------- sdk/metric/batcher/defaultkeys/defaultkeys.go | 19 +++--- sdk/metric/batcher/ungrouped/ungrouped.go | 15 ++--- sdk/metric/benchmark_test.go | 2 +- sdk/metric/monotone_test.go | 8 +-- sdk/metric/sdk.go | 3 +- sdk/metric/stress_test.go | 4 +- 8 files changed, 65 insertions(+), 56 deletions(-) diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index 2d05eb82172..1212cbe826f 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -106,9 +106,9 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) { sb.WriteString(desc.Name()) - if len(labels) > 0 { + if labels.Len() > 0 { sb.WriteRune('{') - sb.WriteString(record.EncodedLabels()) + sb.WriteString(labels.Encoded()) sb.WriteRune('}') } diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 973f6b33bc3..508fc5dd099 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -39,9 +39,7 @@ type Batcher interface { // by aggregation. Process(ctx context.Context, descriptor *Descriptor, - labels []core.KeyValue, - encodedLabels string, - labelEncoder LabelEncoder, + labels Labels, aggregator Aggregator) error // ReadCheckpoint is the interface used by exporters to access @@ -104,25 +102,47 @@ type Producer interface { // Record contains the exported data for a single metric instrument // and label set. type Record struct { - aggregator Aggregator - descriptor *Descriptor - labels []core.KeyValue - encoder LabelEncoder - encodedLabels string + descriptor *Descriptor + labels Labels + aggregator Aggregator +} + +type Labels struct { + ordered []core.KeyValue + encoder LabelEncoder + encoded string +} + +func NewLabels(ordered []core.KeyValue, encoder LabelEncoder, encoded string) Labels { + return Labels{ + ordered: ordered, + encoder: encoder, + encoded: encoded, + } +} + +func (l *Labels) Ordered() []core.KeyValue { + return l.ordered +} + +func (l *Labels) Encoder() LabelEncoder { + return l.encoder +} + +func (l *Labels) Encoded() string { + return l.encoded +} + +func (l *Labels) Len() int { + return len(l.ordered) } // NewRecord allows Batcher implementations to construct export records. -func NewRecord(aggregator Aggregator, - descriptor *Descriptor, - labels []core.KeyValue, - encoder LabelEncoder, - encodedLabels string) Record { +func NewRecord(descriptor *Descriptor, labels Labels, aggregator Aggregator) Record { return Record{ - aggregator: aggregator, - descriptor: descriptor, - labels: labels, - encoder: encoder, - encodedLabels: encodedLabels, + descriptor: descriptor, + labels: labels, + aggregator: aggregator, } } @@ -134,18 +154,10 @@ func (r *Record) Descriptor() *Descriptor { return r.descriptor } -func (r *Record) Labels() []core.KeyValue { +func (r *Record) Labels() Labels { return r.labels } -func (r *Record) LabelEncoder() LabelEncoder { - return r.encoder -} - -func (r *Record) EncodedLabels() string { - return r.encodedLabels -} - // Kind describes the kind of instrument. type MetricKind int8 diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys.go b/sdk/metric/batcher/defaultkeys/defaultkeys.go index fceb8dfdc2d..219c499378a 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys.go @@ -31,9 +31,9 @@ type ( } aggEntry struct { - aggregator export.Aggregator descriptor *export.Descriptor - labels []core.KeyValue + labels export.Labels + aggregator export.Aggregator } dkiMap map[*export.Descriptor]map[core.Key]int @@ -62,7 +62,7 @@ func (b *Batcher) AggregatorFor(descriptor *export.Descriptor) export.Aggregator return b.selector.AggregatorFor(descriptor) } -func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels []core.KeyValue, _ string, _ export.LabelEncoder, agg export.Aggregator) error { +func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels export.Labels, agg export.Aggregator) error { keys := desc.Keys() // Cache the mapping from Descriptor->Key->Index @@ -88,7 +88,7 @@ func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels []c // Note also the possibility to speed this computation of // "encoded" via "canon" in the form of a (Descriptor, // LabelSet)->(Labels, Encoded) cache. - for _, kv := range labels { + for _, kv := range labels.Ordered() { pos, ok := ki[kv.Key] if !ok { continue @@ -103,9 +103,9 @@ func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels []c rag, ok := b.agg[encoded] if !ok { b.agg[encoded] = aggEntry{ - aggregator: agg, - labels: canon, descriptor: desc, + labels: export.NewLabels(canon, b.lencoder, encoded), + aggregator: agg, } return nil } @@ -124,12 +124,11 @@ func (b *Batcher) ReadCheckpoint() export.Producer { } func (p *producer) Foreach(f func(export.Record)) { - for encoded, entry := range p.aggMap { - f(export.NewRecord(entry.aggregator, + for _, entry := range p.aggMap { + f(export.NewRecord( entry.descriptor, entry.labels, - p.lencoder, - encoded, + entry.aggregator, )) } } diff --git a/sdk/metric/batcher/ungrouped/ungrouped.go b/sdk/metric/batcher/ungrouped/ungrouped.go index d2bd8e688c8..49eb0233a22 100644 --- a/sdk/metric/batcher/ungrouped/ungrouped.go +++ b/sdk/metric/batcher/ungrouped/ungrouped.go @@ -17,7 +17,6 @@ package ungrouped // import "go.opentelemetry.io/otel/sdk/metric/batcher/ungroup import ( "context" - "go.opentelemetry.io/otel/api/core" export "go.opentelemetry.io/otel/sdk/export/metric" ) @@ -35,8 +34,7 @@ type ( batchValue struct { aggregator export.Aggregator - labels []core.KeyValue - lencoder export.LabelEncoder + labels export.Labels } batchMap map[batchKey]batchValue @@ -57,17 +55,16 @@ func (b *Batcher) AggregatorFor(descriptor *export.Descriptor) export.Aggregator return b.selector.AggregatorFor(descriptor) } -func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels []core.KeyValue, encodedLabels string, labelEncoder export.LabelEncoder, agg export.Aggregator) error { +func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels export.Labels, agg export.Aggregator) error { key := batchKey{ descriptor: desc, - encoded: encodedLabels, + encoded: labels.Encoded(), } value, ok := b.batchMap[key] if !ok { b.batchMap[key] = batchValue{ aggregator: agg, labels: labels, - lencoder: labelEncoder, } return nil } @@ -84,10 +81,10 @@ func (b *Batcher) ReadCheckpoint() export.Producer { func (c batchMap) Foreach(f func(export.Record)) { for key, value := range c { - f(export.NewRecord(value.aggregator, + f(export.NewRecord( key.descriptor, value.labels, - value.lencoder, - key.encoded)) + value.aggregator, + )) } } diff --git a/sdk/metric/benchmark_test.go b/sdk/metric/benchmark_test.go index 3cbd5c5c698..eff6e2829ba 100644 --- a/sdk/metric/benchmark_test.go +++ b/sdk/metric/benchmark_test.go @@ -63,7 +63,7 @@ func (bf *benchFixture) AggregatorFor(descriptor *export.Descriptor) export.Aggr return nil } -func (bf *benchFixture) Process(context.Context, *export.Descriptor, []core.KeyValue, string, export.LabelEncoder, export.Aggregator) error { +func (bf *benchFixture) Process(context.Context, *export.Descriptor, export.Labels, export.Aggregator) error { return nil } diff --git a/sdk/metric/monotone_test.go b/sdk/metric/monotone_test.go index a4eb62a4bae..5f607ae4db1 100644 --- a/sdk/metric/monotone_test.go +++ b/sdk/metric/monotone_test.go @@ -45,11 +45,11 @@ func (m *monotoneBatcher) ReadCheckpoint() export.Producer { return nil } -func (m *monotoneBatcher) Process(_ context.Context, desc *export.Descriptor, labels []core.KeyValue, _ string, _ export.LabelEncoder, agg export.Aggregator) error { +func (m *monotoneBatcher) Process(_ context.Context, desc *export.Descriptor, labels export.Labels, agg export.Aggregator) error { require.Equal(m.t, "my.gauge.name", desc.Name()) - require.Equal(m.t, 1, len(labels)) - require.Equal(m.t, "a", string(labels[0].Key)) - require.Equal(m.t, "b", labels[0].Value.Emit()) + require.Equal(m.t, 1, labels.Len()) + require.Equal(m.t, "a", string(labels.Ordered()[0].Key)) + require.Equal(m.t, "b", labels.Ordered()[0].Value.Emit()) gauge := agg.(*gauge.Aggregator) val := gauge.LastValue() diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 0adf7d8aa77..0ebc038442f 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -395,7 +395,8 @@ func (m *SDK) checkpoint(ctx context.Context, r *record) { return } r.recorder.Checkpoint(ctx, r.descriptor) - err := m.batcher.Process(ctx, r.descriptor, r.labels.sorted, r.labels.encoded, m.lencoder, r.recorder) + labels := export.NewLabels(r.labels.sorted, m.lencoder, r.labels.encoded) + err := m.batcher.Process(ctx, r.descriptor, labels, r.recorder) if err != nil { // TODO warn diff --git a/sdk/metric/stress_test.go b/sdk/metric/stress_test.go index 50d2fc31d6e..6590bd42ca7 100644 --- a/sdk/metric/stress_test.go +++ b/sdk/metric/stress_test.go @@ -242,9 +242,9 @@ func (f *testFixture) ReadCheckpoint() export.Producer { return nil } -func (f *testFixture) Process(ctx context.Context, desc *export.Descriptor, labels []core.KeyValue, _ string, _ export.LabelEncoder, agg export.Aggregator) error { +func (f *testFixture) Process(ctx context.Context, desc *export.Descriptor, labels export.Labels, agg export.Aggregator) error { key := testKey{ - labels: canonicalizeLabels(labels), + labels: canonicalizeLabels(labels.Ordered()), descriptor: desc, } if f.dupCheck[key] == 0 { From 94580ae572a522a4f490e159059252b2268d60b5 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 6 Nov 2019 16:19:14 -0800 Subject: [PATCH 30/73] Comments in export/metric.go --- sdk/export/metric/metric.go | 218 ++++++++++++++---- sdk/metric/batcher/defaultkeys/defaultkeys.go | 2 +- sdk/metric/sdk.go | 2 +- 3 files changed, 177 insertions(+), 45 deletions(-) diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 508fc5dd099..de194a42209 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -21,60 +21,115 @@ import ( "go.opentelemetry.io/otel/api/unit" ) -// Batcher is responsible for deciding which kind of aggregation -// to use and gathering exported results from the SDK. The standard SDK -// supports binding only one of these interfaces, i.e., a single exporter. +// Batcher is responsible for deciding which kind of aggregation to +// use (via AggregationSelector), gathering exported results from the +// SDK during collection, and deciding over which dimensions to group +// the exported data. // -// Multiple-exporters could be implemented by implementing this interface -// for a group of Batcher. +// The SDK supports binding only one of these interfaces, as it has +// the sole responsibility of determining which Aggregator to use for +// each record. +// +// The embedded AggregationSelector interface is called (concurrently) +// in instrumentation context to select the appropriate Aggregator for +// an instrument. +// +// The `Process` method is called during collection in a +// single-threaded context from the SDK, after the aggregator is +// checkpointed, allowing the batcher to build the set of metrics +// currently being exported. +// +// The `ReadCheckpoint` method is called during collection in a +// single-threaded context from the Exporter, giving the exporter +// access to a producer for iterating over the complete checkpoint. type Batcher interface { // AggregationSelector is responsible for selecting the - // concrete type of aggregation used for a metric in the SDK. + // concrete type of Aggregator used for a metric in the SDK. + // + // This may be a static decision based on fields of the + // Descriptor, or it could use an external configuration + // source to customize the treatment of each metric + // instrument. + // + // The result from AggregatorSelector.AggregatorFor should be + // deterministic given a metric instrument and label set, + // since occasionally the SDK will have multiple Aggregators + // for the same metric, due to tolerated race conditions. AggregationSelector - // Process receives pairs of records and aggregators - // during the SDK Collect(). Exporter implementations - // must access the specific aggregator to receive the - // exporter data, since the format of the data varies - // by aggregation. + // Process is called by the SDK once per internal record, + // passing the descriptor, the corresponding labels, and the + // checkpointed aggregator. The Batcher should be prepared to + // process duplicate (Descriptor, Labels) pairs during this + // pass due to race conditions, but this will usually be the + // ordinary course of events, as Aggregators are merged to + // reduce their dimensionality (i.e., group-by). + // + // The Context argument originates from the controller that + // orchestrates collection. Process(ctx context.Context, descriptor *Descriptor, labels Labels, aggregator Aggregator) error - // ReadCheckpoint is the interface used by exporters to access - // aggregate checkpoints after collection. + // ReadCheckpoint is the interface used by the controller to + // access the fully aggregated checkpoint after collection. + // + // The returned Producer is passed to the Exporter. ReadCheckpoint() Producer } -// AggregationSelector supports selecting the kind of aggregator -// to use at runtime for a specific metric instrument. +// AggregationSelector supports selecting the kind of Aggregator to +// use at runtime for a specific metric instrument. type AggregationSelector interface { - // AggregatorFor should return the kind of aggregator suited - // to the requested export. Returning `nil` indicates to - // ignore this metric instrument. Although it is not - // required, this should return a consistent type to avoid - // confusion in later stages of the metrics export process. + // AggregatorFor returns the kind of aggregator suited to the + // requested export. Returning `nil` indicates to ignore this + // metric instrument. This must return a consistent type to + // avoid confusion in later stages of the metrics export + // process, i.e., when Merging multiple aggregators for a + // specific instrument. // - // Note: This is context-free because the handle should not be - // bound to the incoming context. This call should not block. + // Note: This is context-free because the aggregator should + // not relate to the incoming context. This call should not + // block. AggregatorFor(*Descriptor) Aggregator } -// Aggregator implements a specific aggregation behavior, e.g., -// a counter, a gauge, a histogram. +// Aggregator implements a specific aggregation behavior, e.g., a +// behavior to track a sequence of updates to a counter, a gauge, or a +// measure instrument. For the most part, counter and gauge semantics +// are fixed and the provided implementations should be used. Measure +// metrics offer a wide range of potential tradeoffs and several +// implementations are provided. +// +// Note that any Aggregator may be attached to any instrument--this is +// the result of the OpenTelemetry API/SDK separation. It is possible +// to attach a counter aggregator to a measure instrument (to compute +// a simple sum) or a gauge instrument to a measure instrument (to +// compute the last value). type Aggregator interface { // Update receives a new measured value and incorporates it // into the aggregation. Update() calls may arrive - // concurrently. + // concurrently as the SDK does not provide synchronization. + // + // Descriptor.NumberKind() should be consulted to determine + // whether the provided number is an int64 or float64. + // + // The Context argument comes from user-level code and could be + // inspected for distributed or span context. Update(context.Context, core.Number, *Descriptor) error - // Checkpoint is called during the SDK Collect() to finish one + // Checkpoint is called in collection context to finish one // period of aggregation. Checkpoint() is called in a - // single-threaded context. + // single-threaded context, no locking is required. + // + // The Context argument originates from the controller that + // orchestrates collection. Checkpoint(context.Context, *Descriptor) - // Merge combines state from two aggregators into one. + // Merge combines state from the argument aggregator into this + // one. Merge() is called in a single-threaded context, no + // locking is required. Merge(Aggregator, *Descriptor) error } @@ -82,20 +137,47 @@ type Aggregator interface { // metrics. This is the final stage of a metrics export pipeline, // where metric data are formatted for a specific system. type Exporter interface { + // Export is called immediately after completing a collection + // pass in the SDK. + // + // The Context comes from the controller that initiated + // collection. + // + // The Producer interface refers to the Batcher that just + // completed collection. Export(context.Context, Producer) } -// LabelEncoder enables an optimization for export pipelines that -// use text to encode their label sets. This interface allows configuring -// the encoder used in the SDK and/or the Batcher so that by the -// time the exporter is called, the same encoding may be used. +// LabelEncoder enables an optimization for export pipelines that use +// text to encode their label sets. +// +// This interface allows configuring the encoder used in the SDK +// and/or the Batcher so that by the time the exporter is called, the +// same encoding may be used. +// +// If none is provided, a default will be used. type LabelEncoder interface { + // EncodeLabels is called (concurrently) in instrumentation + // context. It should return a unique representation of the + // labels suitable for the SDK to use as a map key. + // + // The exported Labels object retains a reference to its + // LabelEncoder to determine which encoding was used. + // + // The expectation is that Exporters with a pre-determined to + // syntax for serialized label sets should implement + // LabelEncoder, thus avoiding duplicate computation in the + // export path. EncodeLabels([]core.KeyValue) string } -// Producer allows a Exporter to access a checkpoint of -// aggregated metrics one at a time. +// Producer allows a controller to access a complete checkpoint of +// aggregated metrics from the Batcher. This is passed to the +// Exporter which may then use Foreach to iterate over the collection +// of aggregated metrics. type Producer interface { + // Foreach iterates over all metrics that were updated during + // the last collection period. Foreach(func(Record)) } @@ -107,37 +189,51 @@ type Record struct { aggregator Aggregator } +// Labels stores complete information about a computed label set, +// including the labels in an appropriate order (as defined by the +// Batcher). If the batcher does not re-order labels, they are +// presented in sorted order by the SDK. type Labels struct { ordered []core.KeyValue - encoder LabelEncoder encoded string + encoder LabelEncoder } -func NewLabels(ordered []core.KeyValue, encoder LabelEncoder, encoded string) Labels { +// NewLabels builds a Labels object, consisting of an ordered set of +// labels, a unique encoded representation, and the encoder that +// produced it. +func NewLabels(ordered []core.KeyValue, encoded string, encoder LabelEncoder) Labels { return Labels{ ordered: ordered, - encoder: encoder, encoded: encoded, + encoder: encoder, } } +// Ordered returns the labels in a specified order, according to the +// Batcher. func (l *Labels) Ordered() []core.KeyValue { return l.ordered } -func (l *Labels) Encoder() LabelEncoder { - return l.encoder -} - +// Encoded is a pre-encoded form of the ordered labels. func (l *Labels) Encoded() string { return l.encoded } +// Encoder is the encoder that computed the Encoded() representation. +func (l *Labels) Encoder() LabelEncoder { + return l.encoder +} + +// Len returns the number of labels. func (l *Labels) Len() int { return len(l.ordered) } -// NewRecord allows Batcher implementations to construct export records. +// NewRecord allows Batcher implementations to construct export +// records. The Descriptor, Labels, and Aggregator represent +// aggregate metric events recieved over a single collection period. func NewRecord(descriptor *Descriptor, labels Labels, aggregator Aggregator) Record { return Record{ descriptor: descriptor, @@ -146,14 +242,19 @@ func NewRecord(descriptor *Descriptor, labels Labels, aggregator Aggregator) Rec } } +// Aggregator returns the checkpointed aggregator. It is safe to +// access the checkpointed state without locking. func (r *Record) Aggregator() Aggregator { return r.aggregator } +// Descriptor describes the metric instrument being exported. func (r *Record) Descriptor() *Descriptor { return r.descriptor } +// Labels describes the labels associated with the instrument and the +// aggregated data. func (r *Record) Labels() Labels { return r.labels } @@ -162,12 +263,21 @@ func (r *Record) Labels() Labels { type MetricKind int8 const ( + // Counter kind indicates a counter instrument. CounterKind MetricKind = iota + + // Gauge kind indicates a gauge instrument. GaugeKind + + // Measure kind indicates a measure instrument. MeasureKind ) // Descriptor describes a metric instrument to the exporter. +// +// Descriptors are created once per instrument and a pointer to the +// descriptor may be used to uniquely identfy the instrument in an +// exporter. type Descriptor struct { name string metricKind MetricKind @@ -179,7 +289,11 @@ type Descriptor struct { } // NewDescriptor builds a new descriptor, for use by `Meter` -// implementations. +// implementations in constructing new metric instruments. +// +// Descriptors are created once per instrument and a pointer to the +// descriptor may be used to uniquely identfy the instrument in an +// exporter. func NewDescriptor( name string, metricKind MetricKind, @@ -200,30 +314,48 @@ func NewDescriptor( } } +// Name returns the metric instrument's name. func (d *Descriptor) Name() string { return d.name } +// MetricKind returns the kind of instrument: counter, gauge, or +// measure. func (d *Descriptor) MetricKind() MetricKind { return d.metricKind } +// Keys returns the recommended keys included in the metric +// definition. These keys may be used by a Batcher as a default set +// of grouping keys for the metric instrument. func (d *Descriptor) Keys() []core.Key { return d.keys } +// Description provides a human-readable description of the metric +// instrument. func (d *Descriptor) Description() string { return d.description } +// Unit describes the units of the metric instrument. Unitless +// metrics return the empty string. func (d *Descriptor) Unit() unit.Unit { return d.unit } +// NumberKind returns whether this instrument is declared over int64 +// or a float64 values. func (d *Descriptor) NumberKind() core.NumberKind { return d.numberKind } +// Alternate returns true when the non-default behavior of the +// instrument was selected. It returns true if: +// +// - A counter instrument is non-monotonic +// - A gauge instrument is monotonic +// - A measure instrument is non-absolute func (d *Descriptor) Alternate() bool { return d.alternate } diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys.go b/sdk/metric/batcher/defaultkeys/defaultkeys.go index 219c499378a..c35c2781346 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys.go @@ -104,7 +104,7 @@ func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels exp if !ok { b.agg[encoded] = aggEntry{ descriptor: desc, - labels: export.NewLabels(canon, b.lencoder, encoded), + labels: export.NewLabels(canon, encoded, b.lencoder), aggregator: agg, } return nil diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 0ebc038442f..726136eddc5 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -395,7 +395,7 @@ func (m *SDK) checkpoint(ctx context.Context, r *record) { return } r.recorder.Checkpoint(ctx, r.descriptor) - labels := export.NewLabels(r.labels.sorted, m.lencoder, r.labels.encoded) + labels := export.NewLabels(r.labels.sorted, r.labels.encoded, m.lencoder) err := m.batcher.Process(ctx, r.descriptor, labels, r.recorder) if err != nil { From 2dee9261cdf61fb80921b54493b6360be8618db9 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 6 Nov 2019 16:21:04 -0800 Subject: [PATCH 31/73] Comment --- sdk/metric/doc.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/sdk/metric/doc.go b/sdk/metric/doc.go index d83dbc58484..d909203f5e4 100644 --- a/sdk/metric/doc.go +++ b/sdk/metric/doc.go @@ -127,7 +127,7 @@ Aggregators implement a Merge method, also called in collection context, that combines state from two aggregators into one. Each SDK record has an associated aggregator. -Batcher +Batcher is an interface which implements LabelEncoder @@ -141,9 +141,5 @@ Controller metric.MeterProvider - -TODO: think about name for Producer/ProducedRecord - - */ package metric // import "go.opentelemetry.io/otel/sdk/metric" From a7623d82698f17bdef5c36fe7bffd8a1fac10831 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 6 Nov 2019 16:23:26 -0800 Subject: [PATCH 32/73] More merge --- sdk/metric/sdk.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 71d71145612..6791e3e17a8 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -178,10 +178,9 @@ func (i *instrument) acquireHandle(ls *labels) *record { refcount: 1, collectedEpoch: -1, modifiedEpoch: 0, + recorder: i.meter.batcher.AggregatorFor(i.descriptor), } - rec.recorder = i.meter.batcher.AggregatorFor(rec.descriptor) - // Load/Store: there's a memory allocation to place `mk` into // an interface here. if actual, loaded := i.meter.current.LoadOrStore(mk, rec); loaded { From 8fcfe95f7b07d8d71e0c32d5fbde516e45237042 Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 6 Nov 2019 16:35:57 -0800 Subject: [PATCH 33/73] More doc --- sdk/metric/doc.go | 50 +++++++++++++++++++++++++++++++++++------------ 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/sdk/metric/doc.go b/sdk/metric/doc.go index d909203f5e4..9394d0fca00 100644 --- a/sdk/metric/doc.go +++ b/sdk/metric/doc.go @@ -127,19 +127,43 @@ Aggregators implement a Merge method, also called in collection context, that combines state from two aggregators into one. Each SDK record has an associated aggregator. -Batcher is an interface which implements - -LabelEncoder - -Producer - -ProducedRecord - -Exporter - -Controller - -metric.MeterProvider +Batcher is an interface which sits between the SDK and an exporter. +The Batcher embeds an AggregationSelector, used by the SDK to assign +new Aggregators. The Batcher supports a Process() API for submitting +checkpointed aggregators to the batcher, and a ReadCheckpoint() API +for producing a complete checkpoint for the exporter. Two default +Batcher implementations are provided, the "defaultkeys" Batcher groups +aggregate metrics by their recommended Descriptor.Keys(), the +"ungrouped" Batcher aggregates metrics at full dimensionality. + +LabelEncoder is an optional optimization that allows an exporter to +provide the serialization logic for labels. This allows avoiding +duplicate serialization of labels, once as a unique key in the SDK (or +Batcher) and once in the exporter. + +Producer is an interface between the Batcher and the controller. +After completing a collection pass, the Batcher.ReadCheckpoint() +method returns a Producer, which the Exporter uses to iterate over all +the updated metrics. + +Record is a struct containing the state of an individual exported +metric. This is the result of one collection interface for one +instrument and one label set. + +Labels is a struct containing an ordered set of labels, the +corresponding unique encoding, and the encoder that produced it. + +Exporter is the final stage of an export pipeline. It is called with +a Producer capable of enumerating all the updated metrics. + +Controller is not an export interface per se, but it orchestrates the +export pipeline. For example, a "push" controller will establish a +periodic timer to regularly collect and export metrics. A "pull" +controller will await a pull request before initiating metric +collection. Either way, the job of the controller is to call the SDK +Collect() method, then read the checkpoint, then invoke the exporter. +Controllers are expected to implement the public metric.MeterProvider +API, meaning they can be installed as the global Meter provider. */ package metric // import "go.opentelemetry.io/otel/sdk/metric" From df3b3afd13770c2859801b9ee3587b77f7b99aaf Mon Sep 17 00:00:00 2001 From: jmacd Date: Wed, 6 Nov 2019 16:44:46 -0800 Subject: [PATCH 34/73] Complete example --- example/basic/main.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/example/basic/main.go b/example/basic/main.go index 9fbd12abaca..d325053961a 100644 --- a/example/basic/main.go +++ b/example/basic/main.go @@ -56,19 +56,19 @@ func initTracer() { global.SetTraceProvider(tp) } -func initMeter() { +func initMeter() *push.Controller { selector := simple.New() - exporter := metricstdout.New(metricstdout.Options{PrettyPrint: true}) + exporter := metricstdout.New(metricstdout.Options{PrettyPrint: false}) batcher := defaultkeys.New(selector, metricsdk.DefaultLabelEncoder(), true) pusher := push.New(batcher, exporter, time.Second) pusher.Start() - defer pusher.Stop() global.SetMeterProvider(pusher) + return pusher } func main() { - initMeter() + defer initMeter().Stop() initTracer() // Note: Have to get the meter and tracer after the global is From 4121362b549b1febd8eb8fa74bd52e8b99f0da3e Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 7 Nov 2019 09:53:43 -0800 Subject: [PATCH 35/73] Lint fixes --- go.sum | 1 - sdk/export/metric/metric.go | 2 +- sdk/metric/aggregator/array/array_test.go | 25 +++++++++---------- sdk/metric/aggregator/counter/counter_test.go | 16 ++++++------ .../aggregator/ddsketch/ddsketch_test.go | 14 +++++------ sdk/metric/aggregator/gauge/gauge_test.go | 25 +++++++++++-------- sdk/metric/aggregator/maxsumcount/msc_test.go | 8 +++--- sdk/metric/aggregator/test/test.go | 13 ++++++++-- sdk/metric/sdk.go | 1 + 9 files changed, 59 insertions(+), 46 deletions(-) diff --git a/go.sum b/go.sum index bc0be47acd3..cadf1b265e7 100644 --- a/go.sum +++ b/go.sum @@ -277,7 +277,6 @@ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opentelemetry.io v0.1.0 h1:EANZoRCOP+A3faIlw/iN6YEWoYb1vleZRKm1EvH8T48= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index de194a42209..b79ebe79ad0 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -233,7 +233,7 @@ func (l *Labels) Len() int { // NewRecord allows Batcher implementations to construct export // records. The Descriptor, Labels, and Aggregator represent -// aggregate metric events recieved over a single collection period. +// aggregate metric events received over a single collection period. func NewRecord(descriptor *Descriptor, labels Labels, aggregator Aggregator) Record { return Record{ descriptor: descriptor, diff --git a/sdk/metric/aggregator/array/array_test.go b/sdk/metric/aggregator/array/array_test.go index 351af2ec346..99ab8e99a8d 100644 --- a/sdk/metric/aggregator/array/array_test.go +++ b/sdk/metric/aggregator/array/array_test.go @@ -34,8 +34,6 @@ type updateTest struct { } func (ut *updateTest) run(t *testing.T, profile test.Profile) { - ctx := context.Background() - descriptor := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !ut.absolute) agg := New() @@ -45,15 +43,16 @@ func (ut *updateTest) run(t *testing.T, profile test.Profile) { for i := 0; i < ut.count; i++ { x := profile.Random(+1) all.Append(x) - test.CheckedUpdate(ctx, agg, x, descriptor) + test.CheckedUpdate(t, agg, x, descriptor) if !ut.absolute { y := profile.Random(-1) all.Append(y) - test.CheckedUpdate(ctx, agg, y, descriptor) + test.CheckedUpdate(t, agg, y, descriptor) } } + ctx := context.Background() agg.Checkpoint(ctx, descriptor) all.Sort() @@ -116,27 +115,27 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { for i := 0; i < mt.count; i++ { x1 := profile.Random(+1) all.Append(x1) - test.CheckedUpdate(ctx, agg1, x1, descriptor) + test.CheckedUpdate(t, agg1, x1, descriptor) x2 := profile.Random(+1) all.Append(x2) - test.CheckedUpdate(ctx, agg2, x2, descriptor) + test.CheckedUpdate(t, agg2, x2, descriptor) if !mt.absolute { y1 := profile.Random(-1) all.Append(y1) - test.CheckedUpdate(ctx, agg1, y1, descriptor) + test.CheckedUpdate(t, agg1, y1, descriptor) y2 := profile.Random(-1) all.Append(y2) - test.CheckedUpdate(ctx, agg2, y2, descriptor) + test.CheckedUpdate(t, agg2, y2, descriptor) } } agg1.Checkpoint(ctx, descriptor) agg2.Checkpoint(ctx, descriptor) - agg1.Merge(agg2, descriptor) + test.CheckedMerge(t, agg1, agg2, descriptor) all.Sort() @@ -200,10 +199,10 @@ func TestArrayErrors(t *testing.T) { descriptor := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, false) - test.CheckedUpdate(ctx, agg, core.Number(0), descriptor) + test.CheckedUpdate(t, agg, core.Number(0), descriptor) if profile.NumberKind == core.Float64NumberKind { - test.CheckedUpdate(ctx, agg, core.NewFloat64Number(math.NaN()), descriptor) + test.CheckedUpdate(t, agg, core.NewFloat64Number(math.NaN()), descriptor) } agg.Checkpoint(ctx, descriptor) @@ -263,13 +262,13 @@ func TestArrayFloat64(t *testing.T) { for _, f := range fpsf(1) { all.Append(core.NewFloat64Number(f)) - test.CheckedUpdate(ctx, agg, core.NewFloat64Number(f), descriptor) + test.CheckedUpdate(t, agg, core.NewFloat64Number(f), descriptor) } if !absolute { for _, f := range fpsf(-1) { all.Append(core.NewFloat64Number(f)) - test.CheckedUpdate(ctx, agg, core.NewFloat64Number(f), descriptor) + test.CheckedUpdate(t, agg, core.NewFloat64Number(f), descriptor) } } diff --git a/sdk/metric/aggregator/counter/counter_test.go b/sdk/metric/aggregator/counter/counter_test.go index 6d0dd189a6e..a876bcb879c 100644 --- a/sdk/metric/aggregator/counter/counter_test.go +++ b/sdk/metric/aggregator/counter/counter_test.go @@ -39,7 +39,7 @@ func TestCounterMonotonic(t *testing.T) { for i := 0; i < count; i++ { x := profile.Random(+1) sum.AddNumber(profile.NumberKind, x) - test.CheckedUpdate(ctx, agg, x, descriptor) + test.CheckedUpdate(t, agg, x, descriptor) } agg.Checkpoint(ctx, descriptor) @@ -57,11 +57,11 @@ func TestCounterMonotonicNegative(t *testing.T) { descriptor := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false) for i := 0; i < count; i++ { - test.CheckedUpdate(ctx, agg, profile.Random(-1), descriptor) + test.CheckedUpdate(t, agg, profile.Random(-1), descriptor) } sum := profile.Random(+1) - test.CheckedUpdate(ctx, agg, sum, descriptor) + test.CheckedUpdate(t, agg, sum, descriptor) agg.Checkpoint(ctx, descriptor) require.Equal(t, sum, agg.Sum(), "Same sum - monotonic") @@ -82,8 +82,8 @@ func TestCounterNonMonotonic(t *testing.T) { y := profile.Random(-1) sum.AddNumber(profile.NumberKind, x) sum.AddNumber(profile.NumberKind, y) - test.CheckedUpdate(ctx, agg, x, descriptor) - test.CheckedUpdate(ctx, agg, y, descriptor) + test.CheckedUpdate(t, agg, x, descriptor) + test.CheckedUpdate(t, agg, y, descriptor) } agg.Checkpoint(ctx, descriptor) @@ -105,14 +105,14 @@ func TestCounterMerge(t *testing.T) { for i := 0; i < count; i++ { x := profile.Random(+1) sum.AddNumber(profile.NumberKind, x) - test.CheckedUpdate(ctx, agg1, x, descriptor) - test.CheckedUpdate(ctx, agg2, x, descriptor) + test.CheckedUpdate(t, agg1, x, descriptor) + test.CheckedUpdate(t, agg2, x, descriptor) } agg1.Checkpoint(ctx, descriptor) agg2.Checkpoint(ctx, descriptor) - agg1.Merge(agg2, descriptor) + test.CheckedMerge(t, agg1, agg2, descriptor) sum.AddNumber(descriptor.NumberKind(), sum) diff --git a/sdk/metric/aggregator/ddsketch/ddsketch_test.go b/sdk/metric/aggregator/ddsketch/ddsketch_test.go index 3364e0d69c6..53911aa44c9 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch_test.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch_test.go @@ -41,12 +41,12 @@ func (ut *updateTest) run(t *testing.T, profile test.Profile) { for i := 0; i < count; i++ { x := profile.Random(+1) all.Append(x) - test.CheckedUpdate(ctx, agg, x, descriptor) + test.CheckedUpdate(t, agg, x, descriptor) if !ut.absolute { y := profile.Random(-1) all.Append(y) - test.CheckedUpdate(ctx, agg, y, descriptor) + test.CheckedUpdate(t, agg, y, descriptor) } } @@ -105,31 +105,31 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { for i := 0; i < count; i++ { x := profile.Random(+1) all.Append(x) - test.CheckedUpdate(ctx, agg1, x, descriptor) + test.CheckedUpdate(t, agg1, x, descriptor) if !mt.absolute { y := profile.Random(-1) all.Append(y) - test.CheckedUpdate(ctx, agg1, y, descriptor) + test.CheckedUpdate(t, agg1, y, descriptor) } } for i := 0; i < count; i++ { x := profile.Random(+1) all.Append(x) - test.CheckedUpdate(ctx, agg2, x, descriptor) + test.CheckedUpdate(t, agg2, x, descriptor) if !mt.absolute { y := profile.Random(-1) all.Append(y) - test.CheckedUpdate(ctx, agg2, y, descriptor) + test.CheckedUpdate(t, agg2, y, descriptor) } } agg1.Checkpoint(ctx, descriptor) agg2.Checkpoint(ctx, descriptor) - agg1.Merge(agg2, descriptor) + test.CheckedMerge(t, agg1, agg2, descriptor) all.Sort() diff --git a/sdk/metric/aggregator/gauge/gauge_test.go b/sdk/metric/aggregator/gauge/gauge_test.go index 7815896bd97..f4482a67dee 100644 --- a/sdk/metric/aggregator/gauge/gauge_test.go +++ b/sdk/metric/aggregator/gauge/gauge_test.go @@ -23,6 +23,7 @@ import ( "go.opentelemetry.io/otel/api/core" export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator" "go.opentelemetry.io/otel/sdk/metric/aggregator/test" ) @@ -42,7 +43,7 @@ func TestGaugeNonMonotonic(t *testing.T) { for i := 0; i < count; i++ { x := profile.Random(rand.Intn(1)*2 - 1) last = x - test.CheckedUpdate(ctx, agg, x, record) + test.CheckedUpdate(t, agg, x, record) } agg.Checkpoint(ctx, record) @@ -64,7 +65,7 @@ func TestGaugeMonotonic(t *testing.T) { for i := 0; i < count; i++ { x := profile.Random(+1) last.AddNumber(profile.NumberKind, x) - test.CheckedUpdate(ctx, agg, last, record) + test.CheckedUpdate(t, agg, last, record) } agg.Checkpoint(ctx, record) @@ -82,11 +83,15 @@ func TestGaugeMonotonicDescending(t *testing.T) { record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, true) first := profile.Random(+1) - test.CheckedUpdate(ctx, agg, first, record) + test.CheckedUpdate(t, agg, first, record) for i := 0; i < count; i++ { x := profile.Random(-1) - test.CheckedUpdate(ctx, agg, x, record) + + err := agg.Update(ctx, x, record) + if err != aggregator.ErrNonMonotoneInput { + t.Error("Expected ErrNonMonotoneInput", err) + } } agg.Checkpoint(ctx, record) @@ -108,8 +113,8 @@ func TestGaugeNormalMerge(t *testing.T) { first2 := profile.Random(+1) first1.AddNumber(profile.NumberKind, first2) - test.CheckedUpdate(ctx, agg1, first1, descriptor) - test.CheckedUpdate(ctx, agg2, first2, descriptor) + test.CheckedUpdate(t, agg1, first1, descriptor) + test.CheckedUpdate(t, agg2, first2, descriptor) agg1.Checkpoint(ctx, descriptor) agg2.Checkpoint(ctx, descriptor) @@ -118,7 +123,7 @@ func TestGaugeNormalMerge(t *testing.T) { t2 := agg2.Timestamp() require.True(t, t1.Before(t2)) - agg1.Merge(agg2, descriptor) + test.CheckedMerge(t, agg1, agg2, descriptor) require.Equal(t, t2, agg1.Timestamp(), "Merged timestamp - non-monotonic") require.Equal(t, first2, agg1.LastValue(), "Merged value - non-monotonic") @@ -135,16 +140,16 @@ func TestGaugeMonotonicMerge(t *testing.T) { descriptor := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, true) first1 := profile.Random(+1) - test.CheckedUpdate(ctx, agg1, first1, descriptor) + test.CheckedUpdate(t, agg1, first1, descriptor) first2 := profile.Random(+1) first2.AddNumber(profile.NumberKind, first1) - test.CheckedUpdate(ctx, agg2, first2, descriptor) + test.CheckedUpdate(t, agg2, first2, descriptor) agg1.Checkpoint(ctx, descriptor) agg2.Checkpoint(ctx, descriptor) - agg1.Merge(agg2, descriptor) + test.CheckedMerge(t, agg1, agg2, descriptor) require.Equal(t, first2, agg1.LastValue(), "Merged value - monotonic") require.Equal(t, agg2.Timestamp(), agg1.Timestamp(), "Merged timestamp - monotonic") diff --git a/sdk/metric/aggregator/maxsumcount/msc_test.go b/sdk/metric/aggregator/maxsumcount/msc_test.go index 77d560cdb1a..47680677faf 100644 --- a/sdk/metric/aggregator/maxsumcount/msc_test.go +++ b/sdk/metric/aggregator/maxsumcount/msc_test.go @@ -39,7 +39,7 @@ func TestMaxSumCountAbsolute(t *testing.T) { for i := 0; i < count; i++ { x := profile.Random(+1) all.Append(x) - test.CheckedUpdate(ctx, agg, x, record) + test.CheckedUpdate(t, agg, x, record) } agg.Checkpoint(ctx, record) @@ -76,18 +76,18 @@ func TestMaxSumCountMerge(t *testing.T) { for i := 0; i < count; i++ { x := profile.Random(+1) all.Append(x) - test.CheckedUpdate(ctx, agg1, x, descriptor) + test.CheckedUpdate(t, agg1, x, descriptor) } for i := 0; i < count; i++ { x := profile.Random(+1) all.Append(x) - test.CheckedUpdate(ctx, agg2, x, descriptor) + test.CheckedUpdate(t, agg2, x, descriptor) } agg1.Checkpoint(ctx, descriptor) agg2.Checkpoint(ctx, descriptor) - agg1.Merge(agg2, descriptor) + test.CheckedMerge(t, agg1, agg2, descriptor) all.Sort() diff --git a/sdk/metric/aggregator/test/test.go b/sdk/metric/aggregator/test/test.go index 68db76f8263..914d68db205 100644 --- a/sdk/metric/aggregator/test/test.go +++ b/sdk/metric/aggregator/test/test.go @@ -125,8 +125,17 @@ func (n *Numbers) Median() core.Number { } // Performs the same range test the SDK does on behalf of the aggregator. -func CheckedUpdate(ctx context.Context, agg export.Aggregator, number core.Number, descriptor *export.Descriptor) { +func CheckedUpdate(t *testing.T, agg export.Aggregator, number core.Number, descriptor *export.Descriptor) { + ctx := context.Background() if err := aggregator.RangeTest(number, descriptor); err == nil { - agg.Update(ctx, number, descriptor) + if err := agg.Update(ctx, number, descriptor); err != nil { + t.Error("Unexpected Update failure", err) + } + } +} + +func CheckedMerge(t *testing.T, aggInto, aggFrom export.Aggregator, descriptor *export.Descriptor) { + if err := aggInto.Merge(aggFrom, descriptor); err != nil { + t.Error("Unexpected Merge failure", err) } } diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 6791e3e17a8..c83cbc7cb9c 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -407,6 +407,7 @@ func (m *SDK) checkpoint(ctx context.Context, r *record) { if err != nil { // TODO warn + _ = err } } From 41d3f7ba966ac5f2519206193ee5aa268c6c5dab Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 7 Nov 2019 11:20:50 -0800 Subject: [PATCH 36/73] Add a testable example --- exporter/metric/stdout/stdout.go | 17 +++++++-- sdk/metric/example_test.go | 60 ++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+), 3 deletions(-) create mode 100644 sdk/metric/example_test.go diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index 1212cbe826f..174fe6b5cb6 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -39,6 +39,10 @@ type Options struct { // making it print "pretty". Default is false. PrettyPrint bool + // DoNotPrintTime suppresses timestamp printing. This is + // useful to create testable examples or if the are being + DoNotPrintTime bool + // Quantiles are the desired aggregation quantiles for measure // metric data, used when the configured aggregator supports // quantiles. @@ -50,7 +54,7 @@ type Options struct { } type expoBatch struct { - Timestamp time.Time `json:"time,omitempty"` + Timestamp *time.Time `json:"time,omitempty"` Updates []expoLine `json:"updates,omitempty"` } @@ -60,7 +64,9 @@ type expoLine struct { Sum interface{} `json:"sum,omitempty"` Count interface{} `json:"count,omitempty"` LastValue interface{} `json:"last,omitempty"` - Timestamp time.Time `json:"time,omitempty"` + + // Note: this is a pointer because omitempty doesn't work when time.IsZero() + Timestamp *time.Time `json:"time,omitempty"` } var _ export.Exporter = &Exporter{} @@ -76,6 +82,10 @@ func New(options Options) *Exporter { func (e *Exporter) Export(_ context.Context, producer export.Producer) { var batch expoBatch + if !e.options.DoNotPrintTime { + ts := time.Now() + batch.Timestamp = &ts + } producer.Foreach(func(record export.Record) { desc := record.Descriptor() labels := record.Labels() @@ -86,8 +96,9 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) { expose.Sum = sum.Sum().Emit(desc.NumberKind()) } else if lv, ok := agg.(aggregator.LastValue); ok { + ts := lv.Timestamp() expose.LastValue = lv.LastValue().Emit(desc.NumberKind()) - expose.Timestamp = lv.Timestamp() + expose.Timestamp = &ts } else if msc, ok := agg.(aggregator.MaxSumCount); ok { expose.Max = msc.Max().Emit(desc.NumberKind()) diff --git a/sdk/metric/example_test.go b/sdk/metric/example_test.go new file mode 100644 index 00000000000..bab9d46ea32 --- /dev/null +++ b/sdk/metric/example_test.go @@ -0,0 +1,60 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric_test + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/api/key" + "go.opentelemetry.io/otel/api/metric" + "go.opentelemetry.io/otel/exporter/metric/stdout" + sdk "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/batcher/defaultkeys" + "go.opentelemetry.io/otel/sdk/metric/controller/push" + "go.opentelemetry.io/otel/sdk/metric/selector/simple" +) + +func ExampleSetupMeter() { + selector := simple.New() + exporter := stdout.New(stdout.Options{ + PrettyPrint: true, + DoNotPrintTime: true, // This makes the output deterministic + }) + batcher := defaultkeys.New(selector, sdk.DefaultLabelEncoder(), true) + pusher := push.New(batcher, exporter, time.Second) + pusher.Start() + defer pusher.Stop() + + ctx := context.Background() + + key := key.New("key") + meter := pusher.GetMeter("example") + + counter := meter.NewInt64Counter("a.counter", metric.WithKeys(key)) + labels := meter.Labels(key.String("value")) + + counter.Add(ctx, 100, labels) + + // Output: + // { + // "updates": [ + // { + // "name": "a.counter{key=value}", + // "sum": "100" + // } + // ] + // } +} From 72872fa37227d858dbb58723ffb380f9a4be3a73 Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 7 Nov 2019 11:35:05 -0800 Subject: [PATCH 37/73] Lint --- sdk/metric/example_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/metric/example_test.go b/sdk/metric/example_test.go index bab9d46ea32..e13c5421ce6 100644 --- a/sdk/metric/example_test.go +++ b/sdk/metric/example_test.go @@ -27,7 +27,7 @@ import ( "go.opentelemetry.io/otel/sdk/metric/selector/simple" ) -func ExampleSetupMeter() { +func ExampleNew() { selector := simple.New() exporter := stdout.New(stdout.Options{ PrettyPrint: true, From 0160c3d0cf1b9a897028cf521e94444ef2f6d843 Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 7 Nov 2019 23:45:38 -0800 Subject: [PATCH 38/73] Let Export return an error --- exporter/metric/stdout/stdout.go | 6 +++--- sdk/export/metric/metric.go | 2 +- sdk/metric/controller/push/push.go | 7 ++++++- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index 174fe6b5cb6..5208c221dc8 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -80,7 +80,7 @@ func New(options Options) *Exporter { } } -func (e *Exporter) Export(_ context.Context, producer export.Producer) { +func (e *Exporter) Export(_ context.Context, producer export.Producer) error { var batch expoBatch if !e.options.DoNotPrintTime { ts := time.Now() @@ -137,9 +137,9 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) { } if err != nil { - fmt.Fprintf(e.options.File, "JSON encode error: %v\n", err) - return + return err } fmt.Fprintln(e.options.File, string(data)) + return nil } diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index b79ebe79ad0..17720878359 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -145,7 +145,7 @@ type Exporter interface { // // The Producer interface refers to the Batcher that just // completed collection. - Export(context.Context, Producer) + Export(context.Context, Producer) error } // LabelEncoder enables an optimization for export pipelines that use diff --git a/sdk/metric/controller/push/push.go b/sdk/metric/controller/push/push.go index 234361b888b..e2e6bc030f5 100644 --- a/sdk/metric/controller/push/push.go +++ b/sdk/metric/controller/push/push.go @@ -86,5 +86,10 @@ func (c *Controller) run() { func (c *Controller) tick() { ctx := context.Background() c.sdk.Collect(ctx) - c.exporter.Export(ctx, c.batcher.ReadCheckpoint()) + err := c.exporter.Export(ctx, c.batcher.ReadCheckpoint()) + + if err != nil { + // TODO: report this error + _ = err + } } From 77e4c3f28270a797b70076ef427bbdee37f9893f Mon Sep 17 00:00:00 2001 From: jmacd Date: Fri, 8 Nov 2019 14:43:44 -0800 Subject: [PATCH 39/73] add a basic stdout exporter test --- example/basic/main.go | 9 ++- exporter/metric/stdout/stdout.go | 85 +++++++++++++++++++++------ exporter/metric/stdout/stdout_test.go | 79 +++++++++++++++++++++++++ exporter/metric/test/test.go | 30 ++++++++++ sdk/metric/aggregator/api.go | 4 +- sdk/metric/example_test.go | 6 +- 6 files changed, 188 insertions(+), 25 deletions(-) create mode 100644 exporter/metric/stdout/stdout_test.go create mode 100644 exporter/metric/test/test.go diff --git a/example/basic/main.go b/example/basic/main.go index d325053961a..4e9914175ba 100644 --- a/example/basic/main.go +++ b/example/basic/main.go @@ -45,20 +45,23 @@ func initTracer() { var err error exp, err := tracestdout.NewExporter(tracestdout.Options{PrettyPrint: false}) if err != nil { - log.Panicf("failed to initialize stdout exporter %v\n", err) + log.Panicf("failed to initialize trace stdout exporter %v", err) return } tp, err := sdktrace.NewProvider(sdktrace.WithSyncer(exp), sdktrace.WithConfig(sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()})) if err != nil { - log.Panicf("failed to initialize trace provider %v\n", err) + log.Panicf("failed to initialize trace provider %v", err) } global.SetTraceProvider(tp) } func initMeter() *push.Controller { selector := simple.New() - exporter := metricstdout.New(metricstdout.Options{PrettyPrint: false}) + exporter, err := metricstdout.New(metricstdout.Options{PrettyPrint: false}) + if err != nil { + log.Panicf("failed to initialize metric stdout exporter %v", err) + } batcher := defaultkeys.New(selector, metricsdk.DefaultLabelEncoder(), true) pusher := push.New(batcher, exporter, time.Second) pusher.Start() diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index 5208c221dc8..ea7a5ad2080 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -18,7 +18,10 @@ import ( "context" "encoding/json" "fmt" + "io" + "math" "os" + "strconv" "strings" "time" @@ -33,7 +36,7 @@ type Exporter struct { // Options are the options to be used when initializing a stdout export. type Options struct { // File is the destination. If not set, os.Stdout is used. - File *os.File + File io.Writer // PrettyPrint will pretty the json representation of the span, // making it print "pretty". Default is false. @@ -65,23 +68,40 @@ type expoLine struct { Count interface{} `json:"count,omitempty"` LastValue interface{} `json:"last,omitempty"` + Quantiles interface{} `json:"quantiles,omitempty"` + // Note: this is a pointer because omitempty doesn't work when time.IsZero() Timestamp *time.Time `json:"time,omitempty"` } +type expoQuantile struct { + Q string `json:"q"` + V string `json:"v"` +} + var _ export.Exporter = &Exporter{} -func New(options Options) *Exporter { +func New(options Options) (*Exporter, error) { if options.File == nil { options.File = os.Stdout } + if options.Quantiles == nil { + options.Quantiles = []float64{0.5, 0.9, 0.99} + } else { + for _, q := range options.Quantiles { + if q < 0 || q > 1 { + return nil, aggregator.ErrInvalidQuantile + } + } + } return &Exporter{ options: options, - } + }, nil } func (e *Exporter) Export(_ context.Context, producer export.Producer) error { var batch expoBatch + var errors []error if !e.options.DoNotPrintTime { ts := time.Now() batch.Timestamp = &ts @@ -90,27 +110,46 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) error { desc := record.Descriptor() labels := record.Labels() agg := record.Aggregator() + kind := desc.NumberKind() var expose expoLine if sum, ok := agg.(aggregator.Sum); ok { - expose.Sum = sum.Sum().Emit(desc.NumberKind()) + expose.Sum = sum.Sum().Emit(kind) } else if lv, ok := agg.(aggregator.LastValue); ok { ts := lv.Timestamp() - expose.LastValue = lv.LastValue().Emit(desc.NumberKind()) + expose.LastValue = lv.LastValue().Emit(kind) expose.Timestamp = &ts } else if msc, ok := agg.(aggregator.MaxSumCount); ok { - expose.Max = msc.Max().Emit(desc.NumberKind()) - expose.Sum = msc.Sum().Emit(desc.NumberKind()) - expose.Count = msc.Count().Emit(desc.NumberKind()) - - } else if dist, ok := agg.(aggregator.Distribution); ok { - expose.Max = dist.Max().Emit(desc.NumberKind()) - expose.Sum = dist.Sum().Emit(desc.NumberKind()) - expose.Count = dist.Count().Emit(desc.NumberKind()) - - // TODO print one configured quantile per line + expose.Sum = msc.Sum().Emit(kind) + expose.Count = msc.Count().Emit(kind) + + if max, err := msc.Max(); err != nil { + errors = append(errors, err) + expose.Max = math.NaN() + } else { + expose.Max = max.Emit(kind) + } + + if dist, ok := agg.(aggregator.Distribution); ok && len(e.options.Quantiles) != 0 { + summary := make([]expoQuantile, len(e.options.Quantiles)) + expose.Quantiles = summary + + for i, q := range e.options.Quantiles { + var vstr string + if value, err := dist.Quantile(q); err != nil { + errors = append(errors, err) + vstr = fmt.Sprint(math.NaN()) + } else { + vstr = value.Emit(kind) + } + summary[i] = expoQuantile{ + Q: strconv.FormatFloat(q, 'f', -1, 64), + V: vstr, + } + } + } } var sb strings.Builder @@ -136,10 +175,18 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) error { data, err = json.Marshal(batch) } - if err != nil { - return err + if err == nil { + fmt.Fprintln(e.options.File, string(data)) + } else { + errors = append(errors, err) } - fmt.Fprintln(e.options.File, string(data)) - return nil + switch len(errors) { + case 0: + return nil + case 1: + return fmt.Errorf("Stdout exporter: %w", errors[0]) + default: + return fmt.Errorf("Stdout exporter: %v", errors) + } } diff --git a/exporter/metric/stdout/stdout_test.go b/exporter/metric/stdout/stdout_test.go new file mode 100644 index 00000000000..89b3365be4d --- /dev/null +++ b/exporter/metric/stdout/stdout_test.go @@ -0,0 +1,79 @@ +package stdout_test + +import ( + "bytes" + "context" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/api/core" + "go.opentelemetry.io/otel/api/key" + "go.opentelemetry.io/otel/exporter/metric/stdout" + "go.opentelemetry.io/otel/exporter/metric/test" + export "go.opentelemetry.io/otel/sdk/export/metric" + sdk "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator" + "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" + aggtest "go.opentelemetry.io/otel/sdk/metric/aggregator/test" +) + +type testFixture struct { + t *testing.T + ctx context.Context + exporter *stdout.Exporter + output *bytes.Buffer +} + +func newFixture(t *testing.T, options stdout.Options) testFixture { + buf := &bytes.Buffer{} + options.File = buf + options.DoNotPrintTime = true + exp, err := stdout.New(options) + if err != nil { + t.Fatal("Error building fixture: ", err) + } + return testFixture{ + t: t, + ctx: context.Background(), + exporter: exp, + output: buf, + } +} + +func (fix testFixture) Output() string { + return strings.TrimSpace(fix.output.String()) +} + +func (fix testFixture) Export(producer export.Producer) { + err := fix.exporter.Export(fix.ctx, producer) + if err != nil { + fix.t.Error("export failed: ", err) + } +} + +func TestStdoutInvalidQuantile(t *testing.T) { + _, err := stdout.New(stdout.Options{ + Quantiles: []float64{1.1, 0.9}, + }) + require.Error(t, err, "Invalid quantile error expected") + require.Equal(t, aggregator.ErrInvalidQuantile, err) +} + +func TestStdoutCounterFormat(t *testing.T) { + fix := newFixture(t, stdout.Options{}) + + producer := test.NewProducer(sdk.DefaultLabelEncoder()) + + desc := export.NewDescriptor("test.name", export.CounterKind, nil, "", "", core.Int64NumberKind, false) + cagg := counter.New() + aggtest.CheckedUpdate(fix.t, cagg, core.NewInt64Number(123), desc) + cagg.Checkpoint(fix.ctx, desc) + + producer.Add(desc, cagg, key.String("A", "B"), key.String("C", "D")) + + fix.Export(producer) + + require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","sum":"123"}]}`, fix.Output()) +} diff --git a/exporter/metric/test/test.go b/exporter/metric/test/test.go new file mode 100644 index 00000000000..2a4aec517f3 --- /dev/null +++ b/exporter/metric/test/test.go @@ -0,0 +1,30 @@ +package test + +import ( + "go.opentelemetry.io/otel/api/core" + export "go.opentelemetry.io/otel/sdk/export/metric" +) + +type Producer struct { + encoder export.LabelEncoder + updates []export.Record +} + +func NewProducer(encoder export.LabelEncoder) *Producer { + return &Producer{ + encoder: encoder, + } +} + +func (p *Producer) Add(desc *export.Descriptor, agg export.Aggregator, labels ...core.KeyValue) { + encoded := p.encoder.EncodeLabels(labels) + elabels := export.NewLabels(labels, encoded, p.encoder) + + p.updates = append(p.updates, export.NewRecord(desc, elabels, agg)) +} + +func (p *Producer) Foreach(f func(export.Record)) { + for _, r := range p.updates { + f(r) + } +} diff --git a/sdk/metric/aggregator/api.go b/sdk/metric/aggregator/api.go index 5f9125b8ead..60660cfeb6e 100644 --- a/sdk/metric/aggregator/api.go +++ b/sdk/metric/aggregator/api.go @@ -33,11 +33,11 @@ type ( } Max interface { - Max() core.Number + Max() (core.Number, error) } Quantile interface { - Quantile() core.Number + Quantile(float64) (core.Number, error) } LastValue interface { diff --git a/sdk/metric/example_test.go b/sdk/metric/example_test.go index e13c5421ce6..ccce7543ad7 100644 --- a/sdk/metric/example_test.go +++ b/sdk/metric/example_test.go @@ -16,6 +16,7 @@ package metric_test import ( "context" + "fmt" "time" "go.opentelemetry.io/otel/api/key" @@ -29,10 +30,13 @@ import ( func ExampleNew() { selector := simple.New() - exporter := stdout.New(stdout.Options{ + exporter, err := stdout.New(stdout.Options{ PrettyPrint: true, DoNotPrintTime: true, // This makes the output deterministic }) + if err != nil { + panic(fmt.Sprintln("Could not initialize stdout exporter:", err)) + } batcher := defaultkeys.New(selector, sdk.DefaultLabelEncoder(), true) pusher := push.New(batcher, exporter, time.Second) pusher.Start() From dc23ae19f3e86bab5f7df833ff37ff2cdd5e6fe2 Mon Sep 17 00:00:00 2001 From: jmacd Date: Fri, 8 Nov 2019 16:55:58 -0800 Subject: [PATCH 40/73] Add measure test; fix aggregator APIs --- exporter/metric/stdout/stdout.go | 21 +++++----- exporter/metric/stdout/stdout_test.go | 47 ++++++++++++++++++++++ sdk/metric/aggregator/api.go | 2 +- sdk/metric/aggregator/array/array.go | 2 + sdk/metric/aggregator/counter/counter.go | 1 + sdk/metric/aggregator/ddsketch/ddsketch.go | 2 + sdk/metric/aggregator/gauge/gauge.go | 1 + sdk/metric/aggregator/maxsumcount/msc.go | 1 + 8 files changed, 66 insertions(+), 11 deletions(-) diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index ea7a5ad2080..30a35ff9830 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -113,17 +113,9 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) error { kind := desc.NumberKind() var expose expoLine - if sum, ok := agg.(aggregator.Sum); ok { - expose.Sum = sum.Sum().Emit(kind) - - } else if lv, ok := agg.(aggregator.LastValue); ok { - ts := lv.Timestamp() - expose.LastValue = lv.LastValue().Emit(kind) - expose.Timestamp = &ts - - } else if msc, ok := agg.(aggregator.MaxSumCount); ok { + if msc, ok := agg.(aggregator.MaxSumCount); ok { expose.Sum = msc.Sum().Emit(kind) - expose.Count = msc.Count().Emit(kind) + expose.Count = msc.Count() if max, err := msc.Max(); err != nil { errors = append(errors, err) @@ -142,6 +134,8 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) error { errors = append(errors, err) vstr = fmt.Sprint(math.NaN()) } else { + // TODO: Update core float formatting to use -1 + // precision. The trailing zeros here are distracting. vstr = value.Emit(kind) } summary[i] = expoQuantile{ @@ -150,6 +144,13 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) error { } } } + } else if sum, ok := agg.(aggregator.Sum); ok { + expose.Sum = sum.Sum().Emit(kind) + + } else if lv, ok := agg.(aggregator.LastValue); ok { + ts := lv.Timestamp() + expose.LastValue = lv.LastValue().Emit(kind) + expose.Timestamp = &ts } var sb strings.Builder diff --git a/exporter/metric/stdout/stdout_test.go b/exporter/metric/stdout/stdout_test.go index 89b3365be4d..91ea5ec13c3 100644 --- a/exporter/metric/stdout/stdout_test.go +++ b/exporter/metric/stdout/stdout_test.go @@ -15,6 +15,7 @@ import ( export "go.opentelemetry.io/otel/sdk/export/metric" sdk "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/aggregator" + "go.opentelemetry.io/otel/sdk/metric/aggregator/array" "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" aggtest "go.opentelemetry.io/otel/sdk/metric/aggregator/test" ) @@ -77,3 +78,49 @@ func TestStdoutCounterFormat(t *testing.T) { require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","sum":"123"}]}`, fix.Output()) } + +func TestStdoutMeasureFormat(t *testing.T) { + fix := newFixture(t, stdout.Options{ + PrettyPrint: true, + }) + + producer := test.NewProducer(sdk.DefaultLabelEncoder()) + + desc := export.NewDescriptor("test.name", export.MeasureKind, nil, "", "", core.Float64NumberKind, false) + magg := array.New() + + for i := 0; i < 1000; i++ { + aggtest.CheckedUpdate(fix.t, magg, core.NewFloat64Number(float64(i)), desc) + } + + magg.Checkpoint(fix.ctx, desc) + + producer.Add(desc, magg, key.String("A", "B"), key.String("C", "D")) + + fix.Export(producer) + + require.Equal(t, `{ + "updates": [ + { + "name": "test.name{A=B,C=D}", + "max": "999.000000", + "sum": "499500.000000", + "count": 1000, + "quantiles": [ + { + "q": "0.5", + "v": "500.000000" + }, + { + "q": "0.9", + "v": "900.000000" + }, + { + "q": "0.99", + "v": "990.000000" + } + ] + } + ] +}`, fix.Output()) +} diff --git a/sdk/metric/aggregator/api.go b/sdk/metric/aggregator/api.go index 60660cfeb6e..0f3855637da 100644 --- a/sdk/metric/aggregator/api.go +++ b/sdk/metric/aggregator/api.go @@ -29,7 +29,7 @@ type ( } Count interface { - Count() core.Number + Count() int64 } Max interface { diff --git a/sdk/metric/aggregator/array/array.go b/sdk/metric/aggregator/array/array.go index ab5d3e0c3a7..0ad4fa29df7 100644 --- a/sdk/metric/aggregator/array/array.go +++ b/sdk/metric/aggregator/array/array.go @@ -38,6 +38,8 @@ type ( ) var _ export.Aggregator = &Aggregator{} +var _ aggregator.MaxSumCount = &Aggregator{} +var _ aggregator.Distribution = &Aggregator{} func New() *Aggregator { return &Aggregator{} diff --git a/sdk/metric/aggregator/counter/counter.go b/sdk/metric/aggregator/counter/counter.go index f457e2a8278..91c04e20c99 100644 --- a/sdk/metric/aggregator/counter/counter.go +++ b/sdk/metric/aggregator/counter/counter.go @@ -32,6 +32,7 @@ type Aggregator struct { } var _ export.Aggregator = &Aggregator{} +var _ aggregator.Sum = &Aggregator{} // New returns a new counter aggregator. This aggregator computes an // atomic sum. diff --git a/sdk/metric/aggregator/ddsketch/ddsketch.go b/sdk/metric/aggregator/ddsketch/ddsketch.go index 31296415279..4db2f5b809c 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch.go @@ -36,6 +36,8 @@ type Aggregator struct { } var _ export.Aggregator = &Aggregator{} +var _ aggregator.MaxSumCount = &Aggregator{} +var _ aggregator.Distribution = &Aggregator{} // New returns a new DDSketch aggregator. func New(cfg *sdk.Config, desc *export.Descriptor) *Aggregator { diff --git a/sdk/metric/aggregator/gauge/gauge.go b/sdk/metric/aggregator/gauge/gauge.go index 5f9e3c117ec..73ac2b78d2a 100644 --- a/sdk/metric/aggregator/gauge/gauge.go +++ b/sdk/metric/aggregator/gauge/gauge.go @@ -56,6 +56,7 @@ type ( ) var _ export.Aggregator = &Aggregator{} +var _ aggregator.LastValue = &Aggregator{} // An unset gauge has zero timestamp and zero value. var unsetGauge = &gaugeData{} diff --git a/sdk/metric/aggregator/maxsumcount/msc.go b/sdk/metric/aggregator/maxsumcount/msc.go index ba90d6f8a12..fee3caf312d 100644 --- a/sdk/metric/aggregator/maxsumcount/msc.go +++ b/sdk/metric/aggregator/maxsumcount/msc.go @@ -38,6 +38,7 @@ type ( ) var _ export.Aggregator = &Aggregator{} +var _ aggregator.MaxSumCount = &Aggregator{} // New returns a new measure aggregator for computing max, sum, and count. func New() *Aggregator { From dfbc88148ea28c63e8613e9a29f51bae02c00282 Mon Sep 17 00:00:00 2001 From: jmacd Date: Fri, 8 Nov 2019 21:18:30 -0800 Subject: [PATCH 41/73] Use JSON numbers, not strings --- api/core/number.go | 16 ++++ exporter/metric/stdout/stdout.go | 28 +++---- exporter/metric/stdout/stdout_test.go | 112 +++++++++++++++++++++++--- 3 files changed, 132 insertions(+), 24 deletions(-) diff --git a/api/core/number.go b/api/core/number.go index c68fa6b51d7..9881fb6f0b8 100644 --- a/api/core/number.go +++ b/api/core/number.go @@ -18,6 +18,7 @@ package core import ( "fmt" + "math" "sync/atomic" ) @@ -569,6 +570,21 @@ func (n Number) Emit(kind NumberKind) string { } } +// AsInterface returns the number as an interface{}, typically used +// for NumberKind-correct JSON conversion. +func (n Number) AsInterface(kind NumberKind) interface{} { + switch kind { + case Int64NumberKind: + return n.AsInt64() + case Float64NumberKind: + return n.AsFloat64() + case Uint64NumberKind: + return n.AsUint64() + default: + return math.NaN() + } +} + // - private stuff func (n Number) compareWithZero(kind NumberKind) int { diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index 30a35ff9830..51a42680be7 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -21,7 +21,6 @@ import ( "io" "math" "os" - "strconv" "strings" "time" @@ -75,8 +74,8 @@ type expoLine struct { } type expoQuantile struct { - Q string `json:"q"` - V string `json:"v"` + Q interface{} `json:"q"` + V interface{} `json:"v"` } var _ export.Exporter = &Exporter{} @@ -114,14 +113,14 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) error { var expose expoLine if msc, ok := agg.(aggregator.MaxSumCount); ok { - expose.Sum = msc.Sum().Emit(kind) + expose.Sum = msc.Sum().AsInterface(kind) expose.Count = msc.Count() if max, err := msc.Max(); err != nil { errors = append(errors, err) expose.Max = math.NaN() } else { - expose.Max = max.Emit(kind) + expose.Max = max.AsInterface(kind) } if dist, ok := agg.(aggregator.Distribution); ok && len(e.options.Quantiles) != 0 { @@ -129,28 +128,29 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) error { expose.Quantiles = summary for i, q := range e.options.Quantiles { - var vstr string + var vstr interface{} if value, err := dist.Quantile(q); err != nil { errors = append(errors, err) - vstr = fmt.Sprint(math.NaN()) + vstr = math.NaN() } else { - // TODO: Update core float formatting to use -1 - // precision. The trailing zeros here are distracting. - vstr = value.Emit(kind) + vstr = value.AsInterface(kind) } summary[i] = expoQuantile{ - Q: strconv.FormatFloat(q, 'f', -1, 64), + Q: q, V: vstr, } } } } else if sum, ok := agg.(aggregator.Sum); ok { - expose.Sum = sum.Sum().Emit(kind) + expose.Sum = sum.Sum().AsInterface(kind) } else if lv, ok := agg.(aggregator.LastValue); ok { ts := lv.Timestamp() - expose.LastValue = lv.LastValue().Emit(kind) - expose.Timestamp = &ts + expose.LastValue = lv.LastValue().AsInterface(kind) + + if !e.options.DoNotPrintTime { + expose.Timestamp = &ts + } } var sb strings.Builder diff --git a/exporter/metric/stdout/stdout_test.go b/exporter/metric/stdout/stdout_test.go index 91ea5ec13c3..474dfc7fd38 100644 --- a/exporter/metric/stdout/stdout_test.go +++ b/exporter/metric/stdout/stdout_test.go @@ -3,8 +3,10 @@ package stdout_test import ( "bytes" "context" + "encoding/json" "strings" "testing" + "time" "github.com/stretchr/testify/require" @@ -17,6 +19,8 @@ import ( "go.opentelemetry.io/otel/sdk/metric/aggregator" "go.opentelemetry.io/otel/sdk/metric/aggregator/array" "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" + "go.opentelemetry.io/otel/sdk/metric/aggregator/gauge" + "go.opentelemetry.io/otel/sdk/metric/aggregator/maxsumcount" aggtest "go.opentelemetry.io/otel/sdk/metric/aggregator/test" ) @@ -62,6 +66,59 @@ func TestStdoutInvalidQuantile(t *testing.T) { require.Equal(t, aggregator.ErrInvalidQuantile, err) } +func TestStdoutTimestamp(t *testing.T) { + var buf bytes.Buffer + exporter, err := stdout.New(stdout.Options{ + File: &buf, + DoNotPrintTime: false, + }) + if err != nil { + t.Fatal("Invalid options: ", err) + } + + before := time.Now() + + producer := test.NewProducer(sdk.DefaultLabelEncoder()) + + ctx := context.Background() + desc := export.NewDescriptor("test.name", export.GaugeKind, nil, "", "", core.Int64NumberKind, false) + gagg := gauge.New() + aggtest.CheckedUpdate(t, gagg, core.NewInt64Number(321), desc) + gagg.Checkpoint(ctx, desc) + + producer.Add(desc, gagg) + + exporter.Export(ctx, producer) + + after := time.Now() + + var printed map[string]interface{} + + if err := json.Unmarshal(buf.Bytes(), &printed); err != nil { + t.Fatal("JSON parse error: ", err) + } + + updateTS := printed["time"].(string) + updateTimestamp, err := time.Parse(time.RFC3339Nano, updateTS) + if err != nil { + t.Fatal("JSON parse error: ", updateTS, ": ", err) + } + + gaugeTS := printed["updates"].([]interface{})[0].(map[string]interface{})["time"].(string) + gaugeTimestamp, err := time.Parse(time.RFC3339Nano, gaugeTS) + if err != nil { + t.Fatal("JSON parse error: ", gaugeTS, ": ", err) + } + + require.True(t, updateTimestamp.After(before)) + require.True(t, updateTimestamp.Before(after)) + + require.True(t, gaugeTimestamp.After(before)) + require.True(t, gaugeTimestamp.Before(after)) + + require.True(t, gaugeTimestamp.Before(updateTimestamp)) +} + func TestStdoutCounterFormat(t *testing.T) { fix := newFixture(t, stdout.Options{}) @@ -76,7 +133,42 @@ func TestStdoutCounterFormat(t *testing.T) { fix.Export(producer) - require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","sum":"123"}]}`, fix.Output()) + require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","sum":123}]}`, fix.Output()) +} + +func TestStdoutGaugeFormat(t *testing.T) { + fix := newFixture(t, stdout.Options{}) + + producer := test.NewProducer(sdk.DefaultLabelEncoder()) + + desc := export.NewDescriptor("test.name", export.GaugeKind, nil, "", "", core.Float64NumberKind, false) + gagg := gauge.New() + aggtest.CheckedUpdate(fix.t, gagg, core.NewFloat64Number(123.456), desc) + gagg.Checkpoint(fix.ctx, desc) + + producer.Add(desc, gagg, key.String("A", "B"), key.String("C", "D")) + + fix.Export(producer) + + require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","last":123.456}]}`, fix.Output()) +} + +func TestStdoutMaxSumCount(t *testing.T) { + fix := newFixture(t, stdout.Options{}) + + producer := test.NewProducer(sdk.DefaultLabelEncoder()) + + desc := export.NewDescriptor("test.name", export.MeasureKind, nil, "", "", core.Float64NumberKind, false) + magg := maxsumcount.New() + aggtest.CheckedUpdate(fix.t, magg, core.NewFloat64Number(123.456), desc) + aggtest.CheckedUpdate(fix.t, magg, core.NewFloat64Number(876.543), desc) + magg.Checkpoint(fix.ctx, desc) + + producer.Add(desc, magg, key.String("A", "B"), key.String("C", "D")) + + fix.Export(producer) + + require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","max":876.543,"sum":999.999,"count":2}]}`, fix.Output()) } func TestStdoutMeasureFormat(t *testing.T) { @@ -90,7 +182,7 @@ func TestStdoutMeasureFormat(t *testing.T) { magg := array.New() for i := 0; i < 1000; i++ { - aggtest.CheckedUpdate(fix.t, magg, core.NewFloat64Number(float64(i)), desc) + aggtest.CheckedUpdate(fix.t, magg, core.NewFloat64Number(float64(i)+0.5), desc) } magg.Checkpoint(fix.ctx, desc) @@ -103,21 +195,21 @@ func TestStdoutMeasureFormat(t *testing.T) { "updates": [ { "name": "test.name{A=B,C=D}", - "max": "999.000000", - "sum": "499500.000000", + "max": 999.5, + "sum": 500000, "count": 1000, "quantiles": [ { - "q": "0.5", - "v": "500.000000" + "q": 0.5, + "v": 500.5 }, { - "q": "0.9", - "v": "900.000000" + "q": 0.9, + "v": 900.5 }, { - "q": "0.99", - "v": "990.000000" + "q": 0.99, + "v": 990.5 } ] } From 9ecdf514e9b04aa70926c64e8cabe1ac4445b944 Mon Sep 17 00:00:00 2001 From: jmacd Date: Fri, 8 Nov 2019 21:47:27 -0800 Subject: [PATCH 42/73] Test stdout exporter error --- api/core/number_test.go | 8 ++++++++ example/http-stackdriver/go.sum | 7 +++++++ example/http/go.sum | 5 +++++ example/namedtracer/go.sum | 7 +++++++ exporter/metric/stdout/stdout.go | 24 ++++++++-------------- exporter/metric/stdout/stdout_test.go | 24 +++++++++++++++++++++- exporter/trace/stackdriver/go.sum | 7 +++++++ sdk/metric/aggregator/ddsketch/ddsketch.go | 3 +++ 8 files changed, 69 insertions(+), 16 deletions(-) diff --git a/api/core/number_test.go b/api/core/number_test.go index f2b5abfc3db..685b48346c5 100644 --- a/api/core/number_test.go +++ b/api/core/number_test.go @@ -17,6 +17,8 @@ package core import ( "testing" "unsafe" + + "github.com/stretchr/testify/require" ) func TestNumber(t *testing.T) { @@ -157,3 +159,9 @@ func TestNumberZero(t *testing.T) { t.Errorf("Invalid zero representations") } } + +func TestNumberAsInterface(t *testing.T) { + require.Equal(t, 10, NewInt64Number(10).AsInterface(Int64NumberKind).(int64)) + require.Equal(t, 11.11, NewFloat64Number(11.11).AsInterface(Float64NumberKind).(float64)) + require.Equal(t, 100, NewUint64Number(100).AsInterface(Uint64NumberKind).(uint64)) +} diff --git a/example/http-stackdriver/go.sum b/example/http-stackdriver/go.sum index 5ed9e6b7ade..d2f1699701d 100644 --- a/example/http-stackdriver/go.sum +++ b/example/http-stackdriver/go.sum @@ -36,6 +36,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= @@ -135,9 +136,11 @@ github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgo github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= @@ -164,6 +167,7 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pelletier/go-toml v1.5.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= @@ -205,6 +209,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -367,6 +372,7 @@ google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRn gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -375,6 +381,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/example/http/go.sum b/example/http/go.sum index 39eb2dd97d7..4ec8774711e 100644 --- a/example/http/go.sum +++ b/example/http/go.sum @@ -21,6 +21,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= @@ -141,6 +142,7 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pelletier/go-toml v1.5.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= @@ -182,6 +184,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -283,6 +286,7 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= @@ -294,6 +298,7 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/example/namedtracer/go.sum b/example/namedtracer/go.sum index 789dc1b0023..a8786ca0f4c 100644 --- a/example/namedtracer/go.sum +++ b/example/namedtracer/go.sum @@ -21,6 +21,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= @@ -107,9 +108,11 @@ github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgo github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= @@ -136,6 +139,7 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pelletier/go-toml v1.5.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= @@ -177,6 +181,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -274,6 +279,7 @@ google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRn gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -282,6 +288,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index 51a42680be7..d553ce012e5 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -19,7 +19,6 @@ import ( "encoding/json" "fmt" "io" - "math" "os" "strings" "time" @@ -99,8 +98,10 @@ func New(options Options) (*Exporter, error) { } func (e *Exporter) Export(_ context.Context, producer export.Producer) error { + // N.B. Only return one aggError, if any occur. They're likely + // to be duplicates of the same error. + var aggError error var batch expoBatch - var errors []error if !e.options.DoNotPrintTime { ts := time.Now() batch.Timestamp = &ts @@ -117,8 +118,8 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) error { expose.Count = msc.Count() if max, err := msc.Max(); err != nil { - errors = append(errors, err) - expose.Max = math.NaN() + aggError = err + expose.Max = "NaN" } else { expose.Max = max.AsInterface(kind) } @@ -130,8 +131,8 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) error { for i, q := range e.options.Quantiles { var vstr interface{} if value, err := dist.Quantile(q); err != nil { - errors = append(errors, err) - vstr = math.NaN() + aggError = err + vstr = "NaN" } else { vstr = value.AsInterface(kind) } @@ -179,15 +180,8 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) error { if err == nil { fmt.Fprintln(e.options.File, string(data)) } else { - errors = append(errors, err) + return err } - switch len(errors) { - case 0: - return nil - case 1: - return fmt.Errorf("Stdout exporter: %w", errors[0]) - default: - return fmt.Errorf("Stdout exporter: %v", errors) - } + return aggError } diff --git a/exporter/metric/stdout/stdout_test.go b/exporter/metric/stdout/stdout_test.go index 474dfc7fd38..42dd0bb26fc 100644 --- a/exporter/metric/stdout/stdout_test.go +++ b/exporter/metric/stdout/stdout_test.go @@ -19,6 +19,7 @@ import ( "go.opentelemetry.io/otel/sdk/metric/aggregator" "go.opentelemetry.io/otel/sdk/metric/aggregator/array" "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" + "go.opentelemetry.io/otel/sdk/metric/aggregator/ddsketch" "go.opentelemetry.io/otel/sdk/metric/aggregator/gauge" "go.opentelemetry.io/otel/sdk/metric/aggregator/maxsumcount" aggtest "go.opentelemetry.io/otel/sdk/metric/aggregator/test" @@ -88,7 +89,9 @@ func TestStdoutTimestamp(t *testing.T) { producer.Add(desc, gagg) - exporter.Export(ctx, producer) + if err := exporter.Export(ctx, producer); err != nil { + t.Fatal("Unexpected export error: ", err) + } after := time.Now() @@ -216,3 +219,22 @@ func TestStdoutMeasureFormat(t *testing.T) { ] }`, fix.Output()) } + +func TestStdoutAggError(t *testing.T) { + fix := newFixture(t, stdout.Options{}) + + producer := test.NewProducer(sdk.DefaultLabelEncoder()) + + desc := export.NewDescriptor("test.name", export.MeasureKind, nil, "", "", core.Float64NumberKind, false) + magg := ddsketch.New(ddsketch.NewDefaultConfig(), desc) + magg.Checkpoint(fix.ctx, desc) + + producer.Add(desc, magg) + + err := fix.exporter.Export(fix.ctx, producer) + + // An error is returned and NaN values are printed. + require.Error(t, err) + require.Equal(t, aggregator.ErrEmptyDataSet, err) + require.Equal(t, `{"updates":[{"name":"test.name","max":"NaN","sum":0,"count":0,"quantiles":[{"q":0.5,"v":"NaN"},{"q":0.9,"v":"NaN"},{"q":0.99,"v":"NaN"}]}]}`, fix.Output()) +} diff --git a/exporter/trace/stackdriver/go.sum b/exporter/trace/stackdriver/go.sum index d4a9495302b..08e99865184 100644 --- a/exporter/trace/stackdriver/go.sum +++ b/exporter/trace/stackdriver/go.sum @@ -35,6 +35,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= @@ -134,9 +135,11 @@ github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgo github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= @@ -163,6 +166,7 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pelletier/go-toml v1.5.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= @@ -204,6 +208,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -363,6 +368,7 @@ google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRn gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -371,6 +377,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/sdk/metric/aggregator/ddsketch/ddsketch.go b/sdk/metric/aggregator/ddsketch/ddsketch.go index 4db2f5b809c..4a000ada365 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch.go @@ -79,6 +79,9 @@ func (c *Aggregator) Min() (core.Number, error) { // Quantile returns the estimated quantile of the checkpoint. func (c *Aggregator) Quantile(q float64) (core.Number, error) { + if c.checkpoint.Count() == 0 { + return core.Number(0), aggregator.ErrEmptyDataSet + } f := c.checkpoint.Quantile(q) if math.IsNaN(f) { return core.Number(0), aggregator.ErrInvalidQuantile From 60ab98bbd94987f2fda0fdba19c3ba034fc3c618 Mon Sep 17 00:00:00 2001 From: jmacd Date: Fri, 8 Nov 2019 22:06:30 -0800 Subject: [PATCH 43/73] Add a test for the call to RangeTest --- sdk/metric/correct_test.go | 84 ++++++++++++++++++++++++++++ sdk/metric/example_test.go | 2 +- sdk/metric/selector/simple/simple.go | 2 +- 3 files changed, 86 insertions(+), 2 deletions(-) create mode 100644 sdk/metric/correct_test.go diff --git a/sdk/metric/correct_test.go b/sdk/metric/correct_test.go new file mode 100644 index 00000000000..55322fed6b7 --- /dev/null +++ b/sdk/metric/correct_test.go @@ -0,0 +1,84 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/api/metric" + export "go.opentelemetry.io/otel/sdk/export/metric" + sdk "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator/array" + "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" +) + +type correctnessBatcher struct { + t *testing.T + agg export.Aggregator +} + +func (cb *correctnessBatcher) AggregatorFor(*export.Descriptor) export.Aggregator { + return cb.agg +} + +func (cb *correctnessBatcher) ReadCheckpoint() export.Producer { + cb.t.Fatal("Should not be called") + return nil +} + +func (cb *correctnessBatcher) Process(_ context.Context, desc *export.Descriptor, labels export.Labels, agg export.Aggregator) error { + return nil +} + +func TestInputRangeTestCounter(t *testing.T) { + ctx := context.Background() + cagg := counter.New() + batcher := &correctnessBatcher{ + t: t, + agg: cagg, + } + sdk := sdk.New(batcher, sdk.DefaultLabelEncoder()) + counter := sdk.NewInt64Counter("counter.name", metric.WithMonotonic(true)) + + counter.Add(ctx, -1, sdk.Labels()) + sdk.Collect(ctx) + require.Equal(t, int64(0), cagg.Sum().AsInt64()) + + counter.Add(ctx, 1, sdk.Labels()) + sdk.Collect(ctx) + require.Equal(t, int64(1), cagg.Sum().AsInt64()) +} + +func TestInputRangeTestMeasure(t *testing.T) { + ctx := context.Background() + magg := array.New() + batcher := &correctnessBatcher{ + t: t, + agg: magg, + } + sdk := sdk.New(batcher, sdk.DefaultLabelEncoder()) + measure := sdk.NewFloat64Measure("measure.name", metric.WithAbsolute(true)) + + measure.Record(ctx, -1, sdk.Labels()) + sdk.Collect(ctx) + require.Equal(t, int64(0), magg.Count()) + + measure.Record(ctx, 1, sdk.Labels()) + measure.Record(ctx, 2, sdk.Labels()) + sdk.Collect(ctx) + require.Equal(t, int64(2), magg.Count()) +} diff --git a/sdk/metric/example_test.go b/sdk/metric/example_test.go index ccce7543ad7..d905c560871 100644 --- a/sdk/metric/example_test.go +++ b/sdk/metric/example_test.go @@ -57,7 +57,7 @@ func ExampleNew() { // "updates": [ // { // "name": "a.counter{key=value}", - // "sum": "100" + // "sum": 100 // } // ] // } diff --git a/sdk/metric/selector/simple/simple.go b/sdk/metric/selector/simple/simple.go index 4dd5d0526a3..2feae03022a 100644 --- a/sdk/metric/selector/simple/simple.go +++ b/sdk/metric/selector/simple/simple.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package simple // import "go.opentelemetry.io/otel/sdk/metric/selector/simpler" +package simple // import "go.opentelemetry.io/otel/sdk/metric/selector/simple" import ( export "go.opentelemetry.io/otel/sdk/export/metric" From bf2f1e612f9328c2936c6b2c6b80a3d84579633b Mon Sep 17 00:00:00 2001 From: jmacd Date: Sat, 9 Nov 2019 12:24:12 -0800 Subject: [PATCH 44/73] Add error handler API to improve correctness test; return errors from RecordOne --- api/metric/noop.go | 6 ++- api/metric/sdkhelpers.go | 5 ++- sdk/metric/aggregator/errors.go | 2 +- sdk/metric/correct_test.go | 66 ++++++++++++++++++++++++++++++++- sdk/metric/sdk.go | 65 ++++++++++++++++++++++---------- 5 files changed, 118 insertions(+), 26 deletions(-) diff --git a/api/metric/noop.go b/api/metric/noop.go index a504a39e535..83260406d89 100644 --- a/api/metric/noop.go +++ b/api/metric/noop.go @@ -22,7 +22,8 @@ func (NoopProvider) GetMeter(name string) Meter { return NoopMeter{} } -func (noopHandle) RecordOne(context.Context, core.Number) { +func (noopHandle) RecordOne(context.Context, core.Number) error { + return nil } func (noopHandle) Release() { @@ -32,7 +33,8 @@ func (noopInstrument) AcquireHandle(LabelSet) HandleImpl { return noopHandle{} } -func (noopInstrument) RecordOne(context.Context, core.Number, LabelSet) { +func (noopInstrument) RecordOne(context.Context, core.Number, LabelSet) error { + return nil } func (noopInstrument) Meter() Meter { diff --git a/api/metric/sdkhelpers.go b/api/metric/sdkhelpers.go index dc3c6afcff7..42d653b996a 100644 --- a/api/metric/sdkhelpers.go +++ b/api/metric/sdkhelpers.go @@ -28,14 +28,15 @@ type InstrumentImpl interface { AcquireHandle(labels LabelSet) HandleImpl // RecordOne allows the SDK to observe a single metric event. - RecordOne(ctx context.Context, number core.Number, labels LabelSet) + // The error is returned for the SDK to report to the user. + RecordOne(ctx context.Context, number core.Number, labels LabelSet) error } // HandleImpl is the implementation-level interface to Set/Add/Record // individual metrics with precomputed labels. type HandleImpl interface { // RecordOne allows the SDK to observe a single metric event. - RecordOne(ctx context.Context, number core.Number) + RecordOne(ctx context.Context, number core.Number) error // Release frees the resources associated with this handle. It // does not affect the metric this handle was created through. diff --git a/sdk/metric/aggregator/errors.go b/sdk/metric/aggregator/errors.go index dd24c5631b2..f54fad0d54b 100644 --- a/sdk/metric/aggregator/errors.go +++ b/sdk/metric/aggregator/errors.go @@ -25,7 +25,7 @@ import ( var ( ErrEmptyDataSet = fmt.Errorf("The result is not defined on an empty data set") ErrInvalidQuantile = fmt.Errorf("The requested quantile is out of range") - ErrNegativeInput = fmt.Errorf("Negative value is out of range for this instrumentr") + ErrNegativeInput = fmt.Errorf("Negative value is out of range for this instrument") ErrNaNInput = fmt.Errorf("NaN value is an invalid input") ErrNonMonotoneInput = fmt.Errorf("The new value is not monotone") ErrInconsistentType = fmt.Errorf("Cannot merge different aggregator types") diff --git a/sdk/metric/correct_test.go b/sdk/metric/correct_test.go index 55322fed6b7..4440507636e 100644 --- a/sdk/metric/correct_test.go +++ b/sdk/metric/correct_test.go @@ -16,14 +16,18 @@ package metric_test import ( "context" + "math" "testing" "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/api/metric" export "go.opentelemetry.io/otel/sdk/export/metric" sdk "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator" "go.opentelemetry.io/otel/sdk/metric/aggregator/array" "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" + "go.opentelemetry.io/otel/sdk/metric/aggregator/gauge" ) type correctnessBatcher struct { @@ -52,15 +56,27 @@ func TestInputRangeTestCounter(t *testing.T) { agg: cagg, } sdk := sdk.New(batcher, sdk.DefaultLabelEncoder()) + + var err error + sdk.SetErrorHandler(func(sdkErr error) { + err = sdkErr + }) + counter := sdk.NewInt64Counter("counter.name", metric.WithMonotonic(true)) counter.Add(ctx, -1, sdk.Labels()) + require.Equal(t, aggregator.ErrNegativeInput, err) + err = nil + sdk.Collect(ctx) require.Equal(t, int64(0), cagg.Sum().AsInt64()) counter.Add(ctx, 1, sdk.Labels()) - sdk.Collect(ctx) + checkpointed := sdk.Collect(ctx) + require.Equal(t, int64(1), cagg.Sum().AsInt64()) + require.Equal(t, 1, checkpointed) + require.Nil(t, err) } func TestInputRangeTestMeasure(t *testing.T) { @@ -71,14 +87,60 @@ func TestInputRangeTestMeasure(t *testing.T) { agg: magg, } sdk := sdk.New(batcher, sdk.DefaultLabelEncoder()) + + var err error + sdk.SetErrorHandler(func(sdkErr error) { + err = sdkErr + }) + measure := sdk.NewFloat64Measure("measure.name", metric.WithAbsolute(true)) measure.Record(ctx, -1, sdk.Labels()) + require.Equal(t, aggregator.ErrNegativeInput, err) + err = nil + sdk.Collect(ctx) require.Equal(t, int64(0), magg.Count()) measure.Record(ctx, 1, sdk.Labels()) measure.Record(ctx, 2, sdk.Labels()) - sdk.Collect(ctx) + checkpointed := sdk.Collect(ctx) + require.Equal(t, int64(2), magg.Count()) + require.Equal(t, 1, checkpointed) + require.Nil(t, err) +} + +func TestDisabledInstrument(t *testing.T) { + ctx := context.Background() + batcher := &correctnessBatcher{ + t: t, + agg: nil, + } + sdk := sdk.New(batcher, sdk.DefaultLabelEncoder()) + measure := sdk.NewFloat64Measure("measure.name", metric.WithAbsolute(true)) + + measure.Record(ctx, -1, sdk.Labels()) + checkpointed := sdk.Collect(ctx) + + require.Equal(t, 0, checkpointed) +} + +func TestRecordNaN(t *testing.T) { + ctx := context.Background() + batcher := &correctnessBatcher{ + t: t, + agg: gauge.New(), + } + sdk := sdk.New(batcher, sdk.DefaultLabelEncoder()) + + var err error + sdk.SetErrorHandler(func(sdkErr error) { + err = sdkErr + }) + g := sdk.NewFloat64Gauge("gauge.name") + + require.Nil(t, err) + g.Set(ctx, math.NaN(), sdk.Labels()) + require.Error(t, err) } diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index c83cbc7cb9c..0b620f1ce2a 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -16,6 +16,8 @@ package metric import ( "context" + "fmt" + "os" "sort" "sync" "sync/atomic" @@ -61,6 +63,9 @@ type ( // collectLock prevents simultaneous calls to Collect(). collectLock sync.Mutex + + // errorHandler supports delivering errors to the user. + errorHandler ErrorHandler } instrument struct { @@ -127,6 +132,8 @@ type ( next doublePtr } + ErrorHandler func(error) + // singlePointer wraps an unsafe.Pointer and supports basic // load(), store(), clear(), and swapNil() operations. singlePtr struct { @@ -157,6 +164,10 @@ func (i *instrument) Meter() api.Meter { return i.meter } +func (m *SDK) SetErrorHandler(f ErrorHandler) { + m.errorHandler = f +} + func (i *instrument) acquireHandle(ls *labels) *record { // Create lookup key for sync.Map (one allocation) mk := mapkey{ @@ -199,11 +210,15 @@ func (i *instrument) AcquireHandle(ls api.LabelSet) api.HandleImpl { return i.acquireHandle(labs) } -func (i *instrument) RecordOne(ctx context.Context, number core.Number, ls api.LabelSet) { +func (i *instrument) RecordOne(ctx context.Context, number core.Number, ls api.LabelSet) error { ourLs := i.meter.labsFor(ls) h := i.acquireHandle(ourLs) defer h.Release() - h.RecordOne(ctx, number) + err := h.RecordOne(ctx, number) + if err != nil { + i.meter.errorHandler(err) + } + return err } // New constructs a new SDK for the given batcher. This SDK supports @@ -217,13 +232,18 @@ func (i *instrument) RecordOne(ctx context.Context, number core.Number, ls api.L // own periodic collection. func New(batcher export.Batcher, lencoder export.LabelEncoder) *SDK { m := &SDK{ - batcher: batcher, - lencoder: lencoder, + batcher: batcher, + lencoder: lencoder, + errorHandler: stderrError, } m.empty.meter = m return m } +func stderrError(err error) { + fmt.Fprintln(os.Stderr, "Metrics SDK error:", err) +} + // Labels returns a LabelSet corresponding to the arguments. Passed // labels are de-duplicated, with last-value-wins semantics. func (m *SDK) Labels(kvs ...core.KeyValue) api.LabelSet { @@ -350,10 +370,14 @@ func (m *SDK) saveFromReclaim(rec *record) { // // During the collection pass, the export.Batcher will receive // one Export() call per current aggregation. -func (m *SDK) Collect(ctx context.Context) { +// +// Returns the number of records that were checkpointed. +func (m *SDK) Collect(ctx context.Context) int { m.collectLock.Lock() defer m.collectLock.Unlock() + checkpointed := 0 + var next *record for inuse := m.records.primary.swapNil(); inuse != nil; inuse = next { next = inuse.next.primary.load() @@ -361,14 +385,14 @@ func (m *SDK) Collect(ctx context.Context) { refcount := atomic.LoadInt64(&inuse.refcount) if refcount > 0 { - m.checkpoint(ctx, inuse) + checkpointed += m.checkpoint(ctx, inuse) m.addPrimary(inuse) continue } modified := atomic.LoadInt64(&inuse.modifiedEpoch) collected := atomic.LoadInt64(&inuse.collectedEpoch) - m.checkpoint(ctx, inuse) + checkpointed += m.checkpoint(ctx, inuse) if modified >= collected { atomic.StoreInt64(&inuse.collectedEpoch, m.currentEpoch) @@ -389,32 +413,36 @@ func (m *SDK) Collect(ctx context.Context) { atomic.StoreInt64(&chances.reclaim, 0) if chances.next.primary.load() == hazardRecord { - m.checkpoint(ctx, chances) + checkpointed += m.checkpoint(ctx, chances) m.addPrimary(chances) } } m.currentEpoch++ + return checkpointed } -func (m *SDK) checkpoint(ctx context.Context, r *record) { +func (m *SDK) checkpoint(ctx context.Context, r *record) int { if r.recorder == nil { - return + return 0 } r.recorder.Checkpoint(ctx, r.descriptor) labels := export.NewLabels(r.labels.sorted, r.labels.encoded, m.lencoder) err := m.batcher.Process(ctx, r.descriptor, labels, r.recorder) if err != nil { - // TODO warn - _ = err + m.errorHandler(err) } + return 1 } // RecordBatch enters a batch of metric events. func (m *SDK) RecordBatch(ctx context.Context, ls api.LabelSet, measurements ...api.Measurement) { for _, meas := range measurements { - meas.InstrumentImpl().RecordOne(ctx, meas.Number(), ls) + err := meas.InstrumentImpl().RecordOne(ctx, meas.Number(), ls) + if err != nil { + m.errorHandler(err) + } } } @@ -431,19 +459,18 @@ func (l *labels) Meter() api.Meter { return l.meter } -func (r *record) RecordOne(ctx context.Context, number core.Number) { +func (r *record) RecordOne(ctx context.Context, number core.Number) error { if r.recorder == nil { // The instrument is disabled according to the AggregationSelector. - return + return nil } if err := aggregator.RangeTest(number, r.descriptor); err != nil { - // TODO warn - return + return err } if err := r.recorder.Update(ctx, number, r.descriptor); err != nil { - // TODO warn - return + return err } + return nil } func (r *record) Release() { From 419ed4fca166f39691b87367aaf46b3800a3f93f Mon Sep 17 00:00:00 2001 From: jmacd Date: Sat, 9 Nov 2019 12:30:27 -0800 Subject: [PATCH 45/73] Undo the previous -- do not expose errors --- api/metric/noop.go | 6 ++---- api/metric/sdkhelpers.go | 4 ++-- sdk/metric/monotone_test.go | 10 ++++++++++ sdk/metric/sdk.go | 28 +++++++++------------------- 4 files changed, 23 insertions(+), 25 deletions(-) diff --git a/api/metric/noop.go b/api/metric/noop.go index 83260406d89..a504a39e535 100644 --- a/api/metric/noop.go +++ b/api/metric/noop.go @@ -22,8 +22,7 @@ func (NoopProvider) GetMeter(name string) Meter { return NoopMeter{} } -func (noopHandle) RecordOne(context.Context, core.Number) error { - return nil +func (noopHandle) RecordOne(context.Context, core.Number) { } func (noopHandle) Release() { @@ -33,8 +32,7 @@ func (noopInstrument) AcquireHandle(LabelSet) HandleImpl { return noopHandle{} } -func (noopInstrument) RecordOne(context.Context, core.Number, LabelSet) error { - return nil +func (noopInstrument) RecordOne(context.Context, core.Number, LabelSet) { } func (noopInstrument) Meter() Meter { diff --git a/api/metric/sdkhelpers.go b/api/metric/sdkhelpers.go index 42d653b996a..37bcff12ff2 100644 --- a/api/metric/sdkhelpers.go +++ b/api/metric/sdkhelpers.go @@ -29,14 +29,14 @@ type InstrumentImpl interface { // RecordOne allows the SDK to observe a single metric event. // The error is returned for the SDK to report to the user. - RecordOne(ctx context.Context, number core.Number, labels LabelSet) error + RecordOne(ctx context.Context, number core.Number, labels LabelSet) } // HandleImpl is the implementation-level interface to Set/Add/Record // individual metrics with precomputed labels. type HandleImpl interface { // RecordOne allows the SDK to observe a single metric event. - RecordOne(ctx context.Context, number core.Number) error + RecordOne(ctx context.Context, number core.Number) // Release frees the resources associated with this handle. It // does not affect the metric this handle was created through. diff --git a/sdk/metric/monotone_test.go b/sdk/metric/monotone_test.go index 5f607ae4db1..d6eee3fc42b 100644 --- a/sdk/metric/monotone_test.go +++ b/sdk/metric/monotone_test.go @@ -26,6 +26,7 @@ import ( "go.opentelemetry.io/otel/api/metric" export "go.opentelemetry.io/otel/sdk/export/metric" sdk "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator" "go.opentelemetry.io/otel/sdk/metric/aggregator/gauge" ) @@ -68,6 +69,8 @@ func TestMonotoneGauge(t *testing.T) { } sdk := sdk.New(batcher, sdk.DefaultLabelEncoder()) + sdk.SetErrorHandler(func(error) { t.Fatal("Unexpected") }) + gauge := sdk.NewInt64Gauge("my.gauge.name", metric.WithMonotonic(true)) handle := gauge.AcquireHandle(sdk.Labels(key.String("a", "b"))) @@ -111,7 +114,14 @@ func TestMonotoneGauge(t *testing.T) { require.Equal(t, 4, batcher.collections) // Try to lower the value to 1, it will fail. + var err error + sdk.SetErrorHandler(func(sdkErr error) { + err = sdkErr + }) handle.Set(ctx, 1) + require.Equal(t, aggregator.ErrNonMonotoneInput, err) + sdk.SetErrorHandler(func(error) { t.Fatal("Unexpected") }) + sdk.Collect(ctx) // The value and timestamp are both unmodified diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 0b620f1ce2a..8b7815c4f11 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -210,15 +210,11 @@ func (i *instrument) AcquireHandle(ls api.LabelSet) api.HandleImpl { return i.acquireHandle(labs) } -func (i *instrument) RecordOne(ctx context.Context, number core.Number, ls api.LabelSet) error { +func (i *instrument) RecordOne(ctx context.Context, number core.Number, ls api.LabelSet) { ourLs := i.meter.labsFor(ls) h := i.acquireHandle(ourLs) defer h.Release() - err := h.RecordOne(ctx, number) - if err != nil { - i.meter.errorHandler(err) - } - return err + h.RecordOne(ctx, number) } // New constructs a new SDK for the given batcher. This SDK supports @@ -439,10 +435,7 @@ func (m *SDK) checkpoint(ctx context.Context, r *record) int { // RecordBatch enters a batch of metric events. func (m *SDK) RecordBatch(ctx context.Context, ls api.LabelSet, measurements ...api.Measurement) { for _, meas := range measurements { - err := meas.InstrumentImpl().RecordOne(ctx, meas.Number(), ls) - if err != nil { - m.errorHandler(err) - } + meas.InstrumentImpl().RecordOne(ctx, meas.Number(), ls) } } @@ -455,22 +448,19 @@ func (m *SDK) GetDescriptor(inst metric.InstrumentImpl) *export.Descriptor { return nil } -func (l *labels) Meter() api.Meter { - return l.meter -} - -func (r *record) RecordOne(ctx context.Context, number core.Number) error { +func (r *record) RecordOne(ctx context.Context, number core.Number) { if r.recorder == nil { // The instrument is disabled according to the AggregationSelector. - return nil + return } if err := aggregator.RangeTest(number, r.descriptor); err != nil { - return err + r.labels.meter.errorHandler(err) + return } if err := r.recorder.Update(ctx, number, r.descriptor); err != nil { - return err + r.labels.meter.errorHandler(err) + return } - return nil } func (r *record) Release() { From 095311267047e2738302be55877c6626c7b28431 Mon Sep 17 00:00:00 2001 From: jmacd Date: Sat, 9 Nov 2019 12:50:30 -0800 Subject: [PATCH 46/73] Add simple selector variations, test --- sdk/metric/aggregator/ddsketch/ddsketch.go | 9 ++- sdk/metric/selector/simple/simple.go | 71 ++++++++++++++++++++-- sdk/metric/selector/simple/simple_test.go | 57 +++++++++++++++++ 3 files changed, 128 insertions(+), 9 deletions(-) create mode 100644 sdk/metric/selector/simple/simple_test.go diff --git a/sdk/metric/aggregator/ddsketch/ddsketch.go b/sdk/metric/aggregator/ddsketch/ddsketch.go index 4a000ada365..5f1d51cc4db 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch.go @@ -26,10 +26,13 @@ import ( "go.opentelemetry.io/otel/sdk/metric/aggregator" ) +// Config is an alias for the underlying DDSketch config object. +type Config = sdk.Config + // Aggregator aggregates measure events. type Aggregator struct { lock sync.Mutex - cfg *sdk.Config + cfg *Config kind core.NumberKind current *sdk.DDSketch checkpoint *sdk.DDSketch @@ -40,7 +43,7 @@ var _ aggregator.MaxSumCount = &Aggregator{} var _ aggregator.Distribution = &Aggregator{} // New returns a new DDSketch aggregator. -func New(cfg *sdk.Config, desc *export.Descriptor) *Aggregator { +func New(cfg *Config, desc *export.Descriptor) *Aggregator { return &Aggregator{ cfg: cfg, kind: desc.NumberKind(), @@ -53,7 +56,7 @@ func New(cfg *sdk.Config, desc *export.Descriptor) *Aggregator { // TODO: The Config constructor should probably set minValue to -Inf // to aggregate metrics with absolute=false. This requires providing values // for alpha and maxNumBins -func NewDefaultConfig() *sdk.Config { +func NewDefaultConfig() *Config { return sdk.NewDefaultConfig() } diff --git a/sdk/metric/selector/simple/simple.go b/sdk/metric/selector/simple/simple.go index 2feae03022a..2bf9fa67b2b 100644 --- a/sdk/metric/selector/simple/simple.go +++ b/sdk/metric/selector/simple/simple.go @@ -16,20 +16,57 @@ package simple // import "go.opentelemetry.io/otel/sdk/metric/selector/simple" import ( export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator/array" "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" + "go.opentelemetry.io/otel/sdk/metric/aggregator/ddsketch" "go.opentelemetry.io/otel/sdk/metric/aggregator/gauge" "go.opentelemetry.io/otel/sdk/metric/aggregator/maxsumcount" ) -type selector struct{} +type ( + selectorInexpensive struct{} + selectorExact struct{} + selectorSketch struct { + config *ddsketch.Config + } +) + +var ( + _ export.AggregationSelector = selectorInexpensive{} + _ export.AggregationSelector = selectorSketch{} + _ export.AggregationSelector = selectorExact{} +) + +// NewWithInexpensiveMeasure returns a simple aggregation selector +// that uses counter, gauge, and maxsumcount aggregators for the three +// kinds of metric. This selector is faster and uses less memory than +// the others because maxsumcount does not aggregate quantile +// information. +func NewWithInexpensiveMeasure() export.AggregationSelector { + return selectorInexpensive{} +} + +// NewWithSketchMeasure returns a simple aggregation selector that +// uses counter, gauge, and ddsketch aggregators for the three kinds +// of metric. This selector uses more cpu and memory than the +// NewWithInexpensiveMeasure because it uses one DDSketch per distinct +// measure and labelset. +func NewWithSketchMeasure(config *ddsketch.Config) export.AggregationSelector { + return selectorSketch{ + config: config, + } +} -// New returns a simple aggregation selector that uses counter, gauge, -// and maxsumcount behavior for the three kinds of metric. -func New() export.AggregationSelector { - return selector{} +// NewWithExactMeasure returns a simple aggregation selector that uses +// counter, gauge, and array behavior for the three kinds of metric. +// This selector uses more memory than the NewWithSketchMeasure +// because it aggregates an array of all values, therefore is able to +// compute exact quantiles. +func NewWithExactMeasure() export.AggregationSelector { + return selectorExact{} } -func (s selector) AggregatorFor(descriptor *export.Descriptor) export.Aggregator { +func (selectorInexpensive) AggregatorFor(descriptor *export.Descriptor) export.Aggregator { switch descriptor.MetricKind() { case export.GaugeKind: return gauge.New() @@ -39,3 +76,25 @@ func (s selector) AggregatorFor(descriptor *export.Descriptor) export.Aggregator return counter.New() } } + +func (s selectorSketch) AggregatorFor(descriptor *export.Descriptor) export.Aggregator { + switch descriptor.MetricKind() { + case export.GaugeKind: + return gauge.New() + case export.MeasureKind: + return ddsketch.New(s.config, descriptor) + default: + return counter.New() + } +} + +func (selectorExact) AggregatorFor(descriptor *export.Descriptor) export.Aggregator { + switch descriptor.MetricKind() { + case export.GaugeKind: + return gauge.New() + case export.MeasureKind: + return array.New() + default: + return counter.New() + } +} diff --git a/sdk/metric/selector/simple/simple_test.go b/sdk/metric/selector/simple/simple_test.go new file mode 100644 index 00000000000..ee9dd62183e --- /dev/null +++ b/sdk/metric/selector/simple/simple_test.go @@ -0,0 +1,57 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package simple_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/api/core" + export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator/array" + "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" + "go.opentelemetry.io/otel/sdk/metric/aggregator/ddsketch" + "go.opentelemetry.io/otel/sdk/metric/aggregator/gauge" + "go.opentelemetry.io/otel/sdk/metric/aggregator/maxsumcount" + "go.opentelemetry.io/otel/sdk/metric/selector/simple" +) + +var ( + testGaugeDesc = export.NewDescriptor("gauge", export.GaugeKind, nil, "", "", core.Int64NumberKind, false) + testCounterDesc = export.NewDescriptor("counter", export.CounterKind, nil, "", "", core.Int64NumberKind, false) + testMeasureDesc = export.NewDescriptor("measure", export.MeasureKind, nil, "", "", core.Int64NumberKind, false) +) + +func TestInexpensiveMeasure(t *testing.T) { + inex := simple.NewWithInexpensiveMeasure() + require.NotPanics(t, func() { _ = inex.AggregatorFor(testGaugeDesc).(*gauge.Aggregator) }) + require.NotPanics(t, func() { _ = inex.AggregatorFor(testCounterDesc).(*counter.Aggregator) }) + require.NotPanics(t, func() { _ = inex.AggregatorFor(testMeasureDesc).(*maxsumcount.Aggregator) }) +} + +func TestSketchMeasure(t *testing.T) { + sk := simple.NewWithSketchMeasure(ddsketch.NewDefaultConfig()) + require.NotPanics(t, func() { _ = sk.AggregatorFor(testGaugeDesc).(*gauge.Aggregator) }) + require.NotPanics(t, func() { _ = sk.AggregatorFor(testCounterDesc).(*counter.Aggregator) }) + require.NotPanics(t, func() { _ = sk.AggregatorFor(testMeasureDesc).(*ddsketch.Aggregator) }) +} + +func TestExactMeasure(t *testing.T) { + ex := simple.NewWithExactMeasure() + require.NotPanics(t, func() { _ = ex.AggregatorFor(testGaugeDesc).(*gauge.Aggregator) }) + require.NotPanics(t, func() { _ = ex.AggregatorFor(testCounterDesc).(*counter.Aggregator) }) + require.NotPanics(t, func() { _ = ex.AggregatorFor(testMeasureDesc).(*array.Aggregator) }) +} From 8034ebe0e102a6a47eab49865b39df996e578913 Mon Sep 17 00:00:00 2001 From: jmacd Date: Sat, 9 Nov 2019 12:56:27 -0800 Subject: [PATCH 47/73] Repair examples --- api/core/number_test.go | 6 +++--- example/basic/main.go | 7 +++++-- sdk/metric/example_test.go | 2 +- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/api/core/number_test.go b/api/core/number_test.go index 685b48346c5..eca8307b234 100644 --- a/api/core/number_test.go +++ b/api/core/number_test.go @@ -161,7 +161,7 @@ func TestNumberZero(t *testing.T) { } func TestNumberAsInterface(t *testing.T) { - require.Equal(t, 10, NewInt64Number(10).AsInterface(Int64NumberKind).(int64)) - require.Equal(t, 11.11, NewFloat64Number(11.11).AsInterface(Float64NumberKind).(float64)) - require.Equal(t, 100, NewUint64Number(100).AsInterface(Uint64NumberKind).(uint64)) + require.Equal(t, int64(10), NewInt64Number(10).AsInterface(Int64NumberKind).(int64)) + require.Equal(t, float64(11.11), NewFloat64Number(11.11).AsInterface(Float64NumberKind).(float64)) + require.Equal(t, uint64(100), NewUint64Number(100).AsInterface(Uint64NumberKind).(uint64)) } diff --git a/example/basic/main.go b/example/basic/main.go index 4e9914175ba..06567750838 100644 --- a/example/basic/main.go +++ b/example/basic/main.go @@ -57,8 +57,11 @@ func initTracer() { } func initMeter() *push.Controller { - selector := simple.New() - exporter, err := metricstdout.New(metricstdout.Options{PrettyPrint: false}) + selector := simple.NewWithExactMeasure() + exporter, err := metricstdout.New(metricstdout.Options{ + Quantiles: []float64{0.5, 0.9, 0.99}, + PrettyPrint: false, + }) if err != nil { log.Panicf("failed to initialize metric stdout exporter %v", err) } diff --git a/sdk/metric/example_test.go b/sdk/metric/example_test.go index d905c560871..478aa08824a 100644 --- a/sdk/metric/example_test.go +++ b/sdk/metric/example_test.go @@ -29,7 +29,7 @@ import ( ) func ExampleNew() { - selector := simple.New() + selector := simple.NewWithInexpensiveMeasure() exporter, err := stdout.New(stdout.Options{ PrettyPrint: true, DoNotPrintTime: true, // This makes the output deterministic From a4720484c14999163a41752a939bff660d2c4814 Mon Sep 17 00:00:00 2001 From: jmacd Date: Sat, 9 Nov 2019 14:09:58 -0800 Subject: [PATCH 48/73] Test push controller error handling --- example/basic/go.sum | 2 + example/http-stackdriver/go.sum | 1 + example/http/go.sum | 1 + example/jaeger/go.sum | 1 + example/namedtracer/go.sum | 1 + exporter/metric/test/test.go | 4 + exporter/trace/jaeger/go.sum | 1 + exporter/trace/stackdriver/go.sum | 1 + go.mod | 1 + go.sum | 3 + sdk/metric/controller/push/push.go | 77 +++++++--- sdk/metric/controller/push/push_test.go | 184 ++++++++++++++++++++++++ sdk/metric/sdk.go | 4 +- 13 files changed, 263 insertions(+), 18 deletions(-) create mode 100644 sdk/metric/controller/push/push_test.go diff --git a/example/basic/go.sum b/example/basic/go.sum index aa7064a2684..fd0bbda7334 100644 --- a/example/basic/go.sum +++ b/example/basic/go.sum @@ -8,6 +8,8 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrU github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/benbjohnson/clock v1.0.0 h1:78Jk/r6m4wCi6sndMpty7A//t4dw/RW5fV4ZgDVfX1w= +github.com/benbjohnson/clock v1.0.0/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM= diff --git a/example/http-stackdriver/go.sum b/example/http-stackdriver/go.sum index d2f1699701d..4ac5cb14fdc 100644 --- a/example/http-stackdriver/go.sum +++ b/example/http-stackdriver/go.sum @@ -22,6 +22,7 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrU github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/benbjohnson/clock v1.0.0/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM= diff --git a/example/http/go.sum b/example/http/go.sum index 4ec8774711e..5115893c59a 100644 --- a/example/http/go.sum +++ b/example/http/go.sum @@ -7,6 +7,7 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrU github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/benbjohnson/clock v1.0.0/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM= diff --git a/example/jaeger/go.sum b/example/jaeger/go.sum index 5aa3adca365..3a0bd1cc046 100644 --- a/example/jaeger/go.sum +++ b/example/jaeger/go.sum @@ -11,6 +11,7 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/benbjohnson/clock v1.0.0/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM= diff --git a/example/namedtracer/go.sum b/example/namedtracer/go.sum index a8786ca0f4c..b3dfa751c53 100644 --- a/example/namedtracer/go.sum +++ b/example/namedtracer/go.sum @@ -7,6 +7,7 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrU github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/benbjohnson/clock v1.0.0/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM= diff --git a/exporter/metric/test/test.go b/exporter/metric/test/test.go index 2a4aec517f3..14c63d0c198 100644 --- a/exporter/metric/test/test.go +++ b/exporter/metric/test/test.go @@ -16,6 +16,10 @@ func NewProducer(encoder export.LabelEncoder) *Producer { } } +func (p *Producer) Reset() { + p.updates = nil +} + func (p *Producer) Add(desc *export.Descriptor, agg export.Aggregator, labels ...core.KeyValue) { encoded := p.encoder.EncodeLabels(labels) elabels := export.NewLabels(labels, encoded, p.encoder) diff --git a/exporter/trace/jaeger/go.sum b/exporter/trace/jaeger/go.sum index 291082dd3ac..5a0808feb90 100644 --- a/exporter/trace/jaeger/go.sum +++ b/exporter/trace/jaeger/go.sum @@ -11,6 +11,7 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/benbjohnson/clock v1.0.0/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM= diff --git a/exporter/trace/stackdriver/go.sum b/exporter/trace/stackdriver/go.sum index 08e99865184..ba7f7654613 100644 --- a/exporter/trace/stackdriver/go.sum +++ b/exporter/trace/stackdriver/go.sum @@ -21,6 +21,7 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrU github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/benbjohnson/clock v1.0.0/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM= diff --git a/go.mod b/go.mod index eec6a4215d0..888b3489b1a 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.13 require ( github.com/DataDog/sketches-go v0.0.0-20190923095040-43f19ad77ff7 + github.com/benbjohnson/clock v1.0.0 github.com/client9/misspell v0.3.4 github.com/gogo/protobuf v1.3.1 // indirect github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d // indirect diff --git a/go.sum b/go.sum index cadf1b265e7..28b92022a51 100644 --- a/go.sum +++ b/go.sum @@ -10,6 +10,8 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrU github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/benbjohnson/clock v1.0.0 h1:78Jk/r6m4wCi6sndMpty7A//t4dw/RW5fV4ZgDVfX1w= +github.com/benbjohnson/clock v1.0.0/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bombsimon/wsl v1.2.5 h1:9gTOkIwVtoDZywvX802SDHokeX4kW1cKnV8ZTVAPkRs= @@ -138,6 +140,7 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= diff --git a/sdk/metric/controller/push/push.go b/sdk/metric/controller/push/push.go index e2e6bc030f5..be928cacbf6 100644 --- a/sdk/metric/controller/push/push.go +++ b/sdk/metric/controller/push/push.go @@ -19,6 +19,8 @@ import ( "sync" "time" + "github.com/benbjohnson/clock" + "go.opentelemetry.io/otel/api/metric" export "go.opentelemetry.io/otel/sdk/export/metric" sdk "go.opentelemetry.io/otel/sdk/metric" @@ -26,12 +28,16 @@ import ( // Controller organizes a periodic push of metric data. type Controller struct { - sdk *sdk.SDK - batcher export.Batcher - exporter export.Exporter - ticker *time.Ticker - wg sync.WaitGroup - ch chan struct{} + lock sync.Mutex + sdk *sdk.SDK + errorHandler sdk.ErrorHandler + batcher export.Batcher + exporter export.Exporter + wg sync.WaitGroup + ch chan struct{} + period time.Duration + ticker *clock.Ticker + clock clock.Clock } var _ metric.Provider = &Controller{} @@ -47,34 +53,74 @@ func New(batcher export.Batcher, exporter export.Exporter, period time.Duration) } return &Controller{ - sdk: sdk.New(batcher, lencoder), - batcher: batcher, - exporter: exporter, - ticker: time.NewTicker(period), - ch: make(chan struct{}), + sdk: sdk.New(batcher, lencoder), + errorHandler: sdk.DefaultErrorHandler, + batcher: batcher, + exporter: exporter, + ch: make(chan struct{}), + period: period, + clock: clock.New(), } } +// SetClock supports setting a mock clock for testing. This must be +// called before Start(). +func (c *Controller) SetClock(clock clock.Clock) { + c.lock.Lock() + defer c.lock.Unlock() + c.clock = clock +} + +func (c *Controller) SetErrorHandler(errorHandler sdk.ErrorHandler) { + c.lock.Lock() + defer c.lock.Unlock() + c.errorHandler = errorHandler + c.sdk.SetErrorHandler(errorHandler) +} + +// GetMeter returns a named Meter, satisifying the metric.Provider +// interface. func (c *Controller) GetMeter(name string) metric.Meter { return c.sdk } +// Start begins a ticker that periodically collects and exports +// metrics with the configured interval. func (c *Controller) Start() { + c.lock.Lock() + defer c.lock.Unlock() + + if c.ticker != nil { + return + } + + c.ticker = c.clock.Ticker(c.period) c.wg.Add(1) - go c.run() + go c.run(c.ch) } +// Stop waits for the background goroutine to return and then collects +// and exports metrics one last time before returning. func (c *Controller) Stop() { + c.lock.Lock() + defer c.lock.Unlock() + + if c.ch == nil { + return + } + close(c.ch) + c.ch = nil c.wg.Wait() + c.ticker.Stop() c.tick() } -func (c *Controller) run() { +func (c *Controller) run(ch chan struct{}) { for { select { - case <-c.ch: + case <-ch: c.wg.Done() return case <-c.ticker.C: @@ -89,7 +135,6 @@ func (c *Controller) tick() { err := c.exporter.Export(ctx, c.batcher.ReadCheckpoint()) if err != nil { - // TODO: report this error - _ = err + c.errorHandler(err) } } diff --git a/sdk/metric/controller/push/push_test.go b/sdk/metric/controller/push/push_test.go new file mode 100644 index 00000000000..513043b72cd --- /dev/null +++ b/sdk/metric/controller/push/push_test.go @@ -0,0 +1,184 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package push_test + +import ( + "context" + "fmt" + "runtime" + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/exporter/metric/test" + export "go.opentelemetry.io/otel/sdk/export/metric" + sdk "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator" + "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" + "go.opentelemetry.io/otel/sdk/metric/controller/push" +) + +type testBatcher struct { + t *testing.T + producer *test.Producer + checkpoints int +} + +type testExporter struct { + t *testing.T + exports int + records []export.Record + retErr error +} + +type testFixture struct { + producer *test.Producer + batcher *testBatcher + exporter *testExporter +} + +func newFixture(t *testing.T) testFixture { + producer := test.NewProducer(sdk.DefaultLabelEncoder()) + + batcher := &testBatcher{ + t: t, + producer: producer, + } + exporter := &testExporter{ + t: t, + } + return testFixture{ + producer: producer, + batcher: batcher, + exporter: exporter, + } +} + +func (b *testBatcher) AggregatorFor(*export.Descriptor) export.Aggregator { + return counter.New() +} + +func (b *testBatcher) ReadCheckpoint() export.Producer { + b.checkpoints++ + return b.producer +} + +func (b *testBatcher) Process(_ context.Context, desc *export.Descriptor, labels export.Labels, agg export.Aggregator) error { + b.producer.Add(desc, agg, labels.Ordered()...) + return nil +} + +func (e *testExporter) Export(_ context.Context, producer export.Producer) error { + e.exports++ + producer.Foreach(func(r export.Record) { + e.records = append(e.records, r) + }) + return e.retErr +} + +func TestPushDoubleStop(t *testing.T) { + fix := newFixture(t) + p := push.New(fix.batcher, fix.exporter, time.Second) + p.Start() + p.Stop() + p.Stop() +} + +func TestPushDoubleStart(t *testing.T) { + fix := newFixture(t) + p := push.New(fix.batcher, fix.exporter, time.Second) + p.Start() + p.Start() + p.Stop() +} + +func TestPushTicker(t *testing.T) { + fix := newFixture(t) + + p := push.New(fix.batcher, fix.exporter, time.Second) + meter := p.GetMeter("name") + + mock := clock.NewMock() + p.SetClock(mock) + + ctx := context.Background() + + counter := meter.NewInt64Counter("counter") + + p.Start() + + counter.Add(ctx, 3, meter.Labels()) + + require.Equal(t, 0, fix.batcher.checkpoints) + require.Equal(t, 0, fix.exporter.exports) + require.Equal(t, 0, len(fix.exporter.records)) + + mock.Add(time.Second) + runtime.Gosched() + + require.Equal(t, 1, fix.batcher.checkpoints) + require.Equal(t, 1, fix.exporter.exports) + require.Equal(t, 1, len(fix.exporter.records)) + require.Equal(t, "counter", fix.exporter.records[0].Descriptor().Name()) + require.Equal(t, int64(3), fix.exporter.records[0].Aggregator().(aggregator.Sum).Sum().AsInt64()) + + fix.producer.Reset() + fix.exporter.records = nil + + counter.Add(ctx, 7, meter.Labels()) + + mock.Add(time.Second) + runtime.Gosched() + + require.Equal(t, 2, fix.batcher.checkpoints) + require.Equal(t, 2, fix.exporter.exports) + require.Equal(t, 1, len(fix.exporter.records)) + require.Equal(t, "counter", fix.exporter.records[0].Descriptor().Name()) + require.Equal(t, int64(7), fix.exporter.records[0].Aggregator().(aggregator.Sum).Sum().AsInt64()) + + p.Stop() +} + +func TestPushExportError(t *testing.T) { + fix := newFixture(t) + fix.exporter.retErr = fmt.Errorf("Test export error") + + p := push.New(fix.batcher, fix.exporter, time.Second) + + var err error + p.SetErrorHandler(func(sdkErr error) { + err = sdkErr + }) + + mock := clock.NewMock() + p.SetClock(mock) + + p.Start() + runtime.Gosched() + + require.Equal(t, 0, fix.exporter.exports) + require.Nil(t, err) + + mock.Add(time.Second) + runtime.Gosched() + + require.Equal(t, 1, fix.exporter.exports) + require.Error(t, err) + require.Equal(t, fix.exporter.retErr, err) + + p.Stop() +} diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 8b7815c4f11..188bbc92a06 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -230,13 +230,13 @@ func New(batcher export.Batcher, lencoder export.LabelEncoder) *SDK { m := &SDK{ batcher: batcher, lencoder: lencoder, - errorHandler: stderrError, + errorHandler: DefaultErrorHandler, } m.empty.meter = m return m } -func stderrError(err error) { +func DefaultErrorHandler(err error) { fmt.Fprintln(os.Stderr, "Metrics SDK error:", err) } From 08a26de558dcf8b71748af7ba2e8f512b8063f9e Mon Sep 17 00:00:00 2001 From: jmacd Date: Sat, 9 Nov 2019 14:28:35 -0800 Subject: [PATCH 49/73] Add SDK label encoder tests --- sdk/metric/correct_test.go | 41 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/sdk/metric/correct_test.go b/sdk/metric/correct_test.go index 4440507636e..3b577228e41 100644 --- a/sdk/metric/correct_test.go +++ b/sdk/metric/correct_test.go @@ -16,11 +16,14 @@ package metric_test import ( "context" + "fmt" "math" "testing" "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/api/core" + "go.opentelemetry.io/otel/api/key" "go.opentelemetry.io/otel/api/metric" export "go.opentelemetry.io/otel/sdk/export/metric" sdk "go.opentelemetry.io/otel/sdk/metric" @@ -31,10 +34,13 @@ import ( ) type correctnessBatcher struct { - t *testing.T - agg export.Aggregator + t *testing.T + agg export.Aggregator + records []export.Record } +type testLabelEncoder struct{} + func (cb *correctnessBatcher) AggregatorFor(*export.Descriptor) export.Aggregator { return cb.agg } @@ -45,9 +51,14 @@ func (cb *correctnessBatcher) ReadCheckpoint() export.Producer { } func (cb *correctnessBatcher) Process(_ context.Context, desc *export.Descriptor, labels export.Labels, agg export.Aggregator) error { + cb.records = append(cb.records, export.NewRecord(desc, labels, agg)) return nil } +func (testLabelEncoder) EncodeLabels(labels []core.KeyValue) string { + return fmt.Sprint(labels) +} + func TestInputRangeTestCounter(t *testing.T) { ctx := context.Background() cagg := counter.New() @@ -144,3 +155,29 @@ func TestRecordNaN(t *testing.T) { g.Set(ctx, math.NaN(), sdk.Labels()) require.Error(t, err) } + +func TestSDKLabelEncoder(t *testing.T) { + ctx := context.Background() + cagg := counter.New() + batcher := &correctnessBatcher{ + t: t, + agg: cagg, + } + sdk := sdk.New(batcher, testLabelEncoder{}) + + measure := sdk.NewFloat64Measure("measure") + measure.Record(ctx, 1, sdk.Labels(key.String("A", "B"), key.String("C", "D"))) + + sdk.Collect(ctx) + + require.Equal(t, 1, len(batcher.records)) + + labels := batcher.records[0].Labels() + require.Equal(t, `[{A {8 0 B}} {C {8 0 D}}]`, labels.Encoded()) +} + +func TestDefaultLabelEncoder(t *testing.T) { + encoder := sdk.DefaultLabelEncoder() + encoded := encoder.EncodeLabels([]core.KeyValue{key.String("A", "B"), key.String("C", "D")}) + require.Equal(t, `A=B,C=D`, encoded) +} From c09bd0ef0e0e4aa4bb6f7adb82584ba34f1675ce Mon Sep 17 00:00:00 2001 From: jmacd Date: Sat, 9 Nov 2019 15:14:35 -0800 Subject: [PATCH 50/73] Add a defaultkeys batcher test --- .../batcher/defaultkeys/defaultkeys_test.go | 150 ++++++++++++++++++ 1 file changed, 150 insertions(+) create mode 100644 sdk/metric/batcher/defaultkeys/defaultkeys_test.go diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys_test.go b/sdk/metric/batcher/defaultkeys/defaultkeys_test.go new file mode 100644 index 00000000000..f633ab64355 --- /dev/null +++ b/sdk/metric/batcher/defaultkeys/defaultkeys_test.go @@ -0,0 +1,150 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package defaultkeys_test + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/api/core" + "go.opentelemetry.io/otel/api/key" + export "go.opentelemetry.io/otel/sdk/export/metric" + sdk "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" + "go.opentelemetry.io/otel/sdk/metric/aggregator/gauge" + "go.opentelemetry.io/otel/sdk/metric/batcher/defaultkeys" +) + +type ( + testEncoder struct{} + + testOutput map[string]int64 +) + +var ( + // Gauge groups by "G" + // Counter groups by "C" + testGaugeDesc = export.NewDescriptor( + "gauge", export.GaugeKind, []core.Key{key.New("G")}, "", "", core.Int64NumberKind, false) + testCounterDesc = export.NewDescriptor( + "counter", export.CounterKind, []core.Key{key.New("C")}, "", "", core.Int64NumberKind, false) + + // The SDK and the batcher use different encoders in these tests. + sdkEncoder = &testEncoder{} + groupEncoder = sdk.DefaultLabelEncoder() + + // Gauge groups are (labels1), (labels2+labels3) + // Counter groups are (labels1+labels2), (labels3) + labels1 = makeLabels(sdkEncoder, key.String("G", "H"), key.String("C", "D")) + labels2 = makeLabels(sdkEncoder, key.String("C", "D"), key.String("E", "F")) + labels3 = makeLabels(sdkEncoder) +) + +func makeLabels(encoder export.LabelEncoder, labels ...core.KeyValue) export.Labels { + encoded := encoder.EncodeLabels(labels) + return export.NewLabels(labels, encoded, encoder) +} + +func (testEncoder) EncodeLabels(labels []core.KeyValue) string { + return fmt.Sprint(labels) +} + +func gaugeAgg(v int64) export.Aggregator { + ctx := context.Background() + gagg := gauge.New() + _ = gagg.Update(ctx, core.NewInt64Number(v), testGaugeDesc) + gagg.Checkpoint(ctx, testCounterDesc) + return gagg +} + +func counterAgg(v int64) export.Aggregator { + ctx := context.Background() + cagg := counter.New() + _ = cagg.Update(ctx, core.NewInt64Number(v), testCounterDesc) + cagg.Checkpoint(ctx, testCounterDesc) + return cagg +} + +func (o testOutput) addTo(rec export.Record) { + labels := rec.Labels() + key := fmt.Sprint(rec.Descriptor().Name(), "/", labels.Encoded()) + var value int64 + switch t := rec.Aggregator().(type) { + case *counter.Aggregator: + value = t.Sum().AsInt64() + case *gauge.Aggregator: + value = t.LastValue().AsInt64() + } + o[key] = value +} + +func TestGroupingStateless(t *testing.T) { + ctx := context.Background() + b := defaultkeys.New(nil, groupEncoder, false) + + b.Process(ctx, testGaugeDesc, labels1, gaugeAgg(10)) + b.Process(ctx, testGaugeDesc, labels2, gaugeAgg(20)) + b.Process(ctx, testGaugeDesc, labels3, gaugeAgg(30)) + + b.Process(ctx, testCounterDesc, labels1, counterAgg(10)) + b.Process(ctx, testCounterDesc, labels2, counterAgg(20)) + b.Process(ctx, testCounterDesc, labels3, counterAgg(40)) + + processor := b.ReadCheckpoint() + + records := testOutput{} + processor.Foreach(records.addTo) + + // Output gauge should have only the "G=H" and "G=" keys. + // Output counter should have only the "C=D" and "C=" keys. + require.EqualValues(t, map[string]int64{ + "counter/C=D": 30, // labels1 + labels2 + "counter/C=": 40, // labels3 + "gauge/G=H": 10, // labels1 + "gauge/G=": 30, // labels3 = last value + }, records) + + // Verify that state was reset + processor = b.ReadCheckpoint() + processor.Foreach(func(rec export.Record) { + t.Fatal("Unexpected call") + }) +} + +func TestGroupingStateful(t *testing.T) { + ctx := context.Background() + b := defaultkeys.New(nil, groupEncoder, true) + + b.Process(ctx, testCounterDesc, labels1, counterAgg(10)) + + processor := b.ReadCheckpoint() + + records1 := testOutput{} + processor.Foreach(records1.addTo) + + require.EqualValues(t, map[string]int64{ + "counter/C=D": 10, // labels1 + }, records1) + + // Test that state was NOT reset + processor = b.ReadCheckpoint() + + records2 := testOutput{} + processor.Foreach(records2.addTo) + + require.EqualValues(t, records1, records2) +} From 9ef0a37e492124ca0441e1a193a419b0120cf9b1 Mon Sep 17 00:00:00 2001 From: jmacd Date: Sat, 9 Nov 2019 15:28:33 -0800 Subject: [PATCH 51/73] Add an ungrouped batcher test --- go.sum | 1 + .../batcher/defaultkeys/defaultkeys_test.go | 101 +++--------------- sdk/metric/batcher/test/test.go | 100 +++++++++++++++++ .../batcher/ungrouped/ungrouped_test.go | 86 +++++++++++++++ 4 files changed, 204 insertions(+), 84 deletions(-) create mode 100644 sdk/metric/batcher/test/test.go create mode 100644 sdk/metric/batcher/ungrouped/ungrouped_test.go diff --git a/go.sum b/go.sum index 28b92022a51..b483f721b6e 100644 --- a/go.sum +++ b/go.sum @@ -280,6 +280,7 @@ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opentelemetry.io v0.1.0 h1:EANZoRCOP+A3faIlw/iN6YEWoYb1vleZRKm1EvH8T48= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys_test.go b/sdk/metric/batcher/defaultkeys/defaultkeys_test.go index f633ab64355..3368a0a5f4b 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys_test.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys_test.go @@ -16,98 +16,31 @@ package defaultkeys_test import ( "context" - "fmt" "testing" "github.com/stretchr/testify/require" - "go.opentelemetry.io/otel/api/core" - "go.opentelemetry.io/otel/api/key" + export "go.opentelemetry.io/otel/sdk/export/metric" - sdk "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" - "go.opentelemetry.io/otel/sdk/metric/aggregator/gauge" "go.opentelemetry.io/otel/sdk/metric/batcher/defaultkeys" + "go.opentelemetry.io/otel/sdk/metric/batcher/test" ) -type ( - testEncoder struct{} - - testOutput map[string]int64 -) - -var ( - // Gauge groups by "G" - // Counter groups by "C" - testGaugeDesc = export.NewDescriptor( - "gauge", export.GaugeKind, []core.Key{key.New("G")}, "", "", core.Int64NumberKind, false) - testCounterDesc = export.NewDescriptor( - "counter", export.CounterKind, []core.Key{key.New("C")}, "", "", core.Int64NumberKind, false) - - // The SDK and the batcher use different encoders in these tests. - sdkEncoder = &testEncoder{} - groupEncoder = sdk.DefaultLabelEncoder() - - // Gauge groups are (labels1), (labels2+labels3) - // Counter groups are (labels1+labels2), (labels3) - labels1 = makeLabels(sdkEncoder, key.String("G", "H"), key.String("C", "D")) - labels2 = makeLabels(sdkEncoder, key.String("C", "D"), key.String("E", "F")) - labels3 = makeLabels(sdkEncoder) -) - -func makeLabels(encoder export.LabelEncoder, labels ...core.KeyValue) export.Labels { - encoded := encoder.EncodeLabels(labels) - return export.NewLabels(labels, encoded, encoder) -} - -func (testEncoder) EncodeLabels(labels []core.KeyValue) string { - return fmt.Sprint(labels) -} - -func gaugeAgg(v int64) export.Aggregator { - ctx := context.Background() - gagg := gauge.New() - _ = gagg.Update(ctx, core.NewInt64Number(v), testGaugeDesc) - gagg.Checkpoint(ctx, testCounterDesc) - return gagg -} - -func counterAgg(v int64) export.Aggregator { - ctx := context.Background() - cagg := counter.New() - _ = cagg.Update(ctx, core.NewInt64Number(v), testCounterDesc) - cagg.Checkpoint(ctx, testCounterDesc) - return cagg -} - -func (o testOutput) addTo(rec export.Record) { - labels := rec.Labels() - key := fmt.Sprint(rec.Descriptor().Name(), "/", labels.Encoded()) - var value int64 - switch t := rec.Aggregator().(type) { - case *counter.Aggregator: - value = t.Sum().AsInt64() - case *gauge.Aggregator: - value = t.LastValue().AsInt64() - } - o[key] = value -} - func TestGroupingStateless(t *testing.T) { ctx := context.Background() - b := defaultkeys.New(nil, groupEncoder, false) + b := defaultkeys.New(nil, test.GroupEncoder, false) - b.Process(ctx, testGaugeDesc, labels1, gaugeAgg(10)) - b.Process(ctx, testGaugeDesc, labels2, gaugeAgg(20)) - b.Process(ctx, testGaugeDesc, labels3, gaugeAgg(30)) + _ = b.Process(ctx, test.GaugeDesc, test.Labels1, test.GaugeAgg(10)) + _ = b.Process(ctx, test.GaugeDesc, test.Labels2, test.GaugeAgg(20)) + _ = b.Process(ctx, test.GaugeDesc, test.Labels3, test.GaugeAgg(30)) - b.Process(ctx, testCounterDesc, labels1, counterAgg(10)) - b.Process(ctx, testCounterDesc, labels2, counterAgg(20)) - b.Process(ctx, testCounterDesc, labels3, counterAgg(40)) + _ = b.Process(ctx, test.CounterDesc, test.Labels1, test.CounterAgg(10)) + _ = b.Process(ctx, test.CounterDesc, test.Labels2, test.CounterAgg(20)) + _ = b.Process(ctx, test.CounterDesc, test.Labels3, test.CounterAgg(40)) processor := b.ReadCheckpoint() - records := testOutput{} - processor.Foreach(records.addTo) + records := test.Output{} + processor.Foreach(records.AddTo) // Output gauge should have only the "G=H" and "G=" keys. // Output counter should have only the "C=D" and "C=" keys. @@ -127,14 +60,14 @@ func TestGroupingStateless(t *testing.T) { func TestGroupingStateful(t *testing.T) { ctx := context.Background() - b := defaultkeys.New(nil, groupEncoder, true) + b := defaultkeys.New(nil, test.GroupEncoder, true) - b.Process(ctx, testCounterDesc, labels1, counterAgg(10)) + _ = b.Process(ctx, test.CounterDesc, test.Labels1, test.CounterAgg(10)) processor := b.ReadCheckpoint() - records1 := testOutput{} - processor.Foreach(records1.addTo) + records1 := test.Output{} + processor.Foreach(records1.AddTo) require.EqualValues(t, map[string]int64{ "counter/C=D": 10, // labels1 @@ -143,8 +76,8 @@ func TestGroupingStateful(t *testing.T) { // Test that state was NOT reset processor = b.ReadCheckpoint() - records2 := testOutput{} - processor.Foreach(records2.addTo) + records2 := test.Output{} + processor.Foreach(records2.AddTo) require.EqualValues(t, records1, records2) } diff --git a/sdk/metric/batcher/test/test.go b/sdk/metric/batcher/test/test.go new file mode 100644 index 00000000000..231b69a5747 --- /dev/null +++ b/sdk/metric/batcher/test/test.go @@ -0,0 +1,100 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "context" + "fmt" + "strings" + + "go.opentelemetry.io/otel/api/core" + "go.opentelemetry.io/otel/api/key" + export "go.opentelemetry.io/otel/sdk/export/metric" + sdk "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" + "go.opentelemetry.io/otel/sdk/metric/aggregator/gauge" +) + +type ( + Encoder struct{} + + Output map[string]int64 +) + +var ( + // Gauge groups by "G" + // Counter groups by "C" + GaugeDesc = export.NewDescriptor( + "gauge", export.GaugeKind, []core.Key{key.New("G")}, "", "", core.Int64NumberKind, false) + CounterDesc = export.NewDescriptor( + "counter", export.CounterKind, []core.Key{key.New("C")}, "", "", core.Int64NumberKind, false) + + // The SDK and the batcher use different encoders in these tests. + SdkEncoder = &Encoder{} + GroupEncoder = sdk.DefaultLabelEncoder() + + // Gauge groups are (labels1), (labels2+labels3) + // Counter groups are (labels1+labels2), (labels3) + Labels1 = makeLabels(SdkEncoder, key.String("G", "H"), key.String("C", "D")) + Labels2 = makeLabels(SdkEncoder, key.String("C", "D"), key.String("E", "F")) + Labels3 = makeLabels(SdkEncoder) +) + +func makeLabels(encoder export.LabelEncoder, labels ...core.KeyValue) export.Labels { + encoded := encoder.EncodeLabels(labels) + return export.NewLabels(labels, encoded, encoder) +} + +func (Encoder) EncodeLabels(labels []core.KeyValue) string { + var sb strings.Builder + for i, l := range labels { + if i > 0 { + sb.WriteString("&") + } + sb.WriteString(string(l.Key)) + sb.WriteString("~") + sb.WriteString(l.Value.Emit()) + } + return sb.String() +} + +func GaugeAgg(v int64) export.Aggregator { + ctx := context.Background() + gagg := gauge.New() + _ = gagg.Update(ctx, core.NewInt64Number(v), GaugeDesc) + gagg.Checkpoint(ctx, CounterDesc) + return gagg +} + +func CounterAgg(v int64) export.Aggregator { + ctx := context.Background() + cagg := counter.New() + _ = cagg.Update(ctx, core.NewInt64Number(v), CounterDesc) + cagg.Checkpoint(ctx, CounterDesc) + return cagg +} + +func (o Output) AddTo(rec export.Record) { + labels := rec.Labels() + key := fmt.Sprint(rec.Descriptor().Name(), "/", labels.Encoded()) + var value int64 + switch t := rec.Aggregator().(type) { + case *counter.Aggregator: + value = t.Sum().AsInt64() + case *gauge.Aggregator: + value = t.LastValue().AsInt64() + } + o[key] = value +} diff --git a/sdk/metric/batcher/ungrouped/ungrouped_test.go b/sdk/metric/batcher/ungrouped/ungrouped_test.go new file mode 100644 index 00000000000..d61f1d6c06e --- /dev/null +++ b/sdk/metric/batcher/ungrouped/ungrouped_test.go @@ -0,0 +1,86 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ungrouped_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/metric/batcher/test" + "go.opentelemetry.io/otel/sdk/metric/batcher/ungrouped" +) + +// These tests use the original label encoding. + +func TestUngroupedStateless(t *testing.T) { + ctx := context.Background() + b := ungrouped.New(nil, false) + + _ = b.Process(ctx, test.GaugeDesc, test.Labels1, test.GaugeAgg(10)) + _ = b.Process(ctx, test.GaugeDesc, test.Labels2, test.GaugeAgg(20)) + _ = b.Process(ctx, test.GaugeDesc, test.Labels3, test.GaugeAgg(30)) + + _ = b.Process(ctx, test.CounterDesc, test.Labels1, test.CounterAgg(10)) + _ = b.Process(ctx, test.CounterDesc, test.Labels2, test.CounterAgg(20)) + _ = b.Process(ctx, test.CounterDesc, test.Labels3, test.CounterAgg(40)) + + processor := b.ReadCheckpoint() + + records := test.Output{} + processor.Foreach(records.AddTo) + + // Output gauge should have only the "G=H" and "G=" keys. + // Output counter should have only the "C=D" and "C=" keys. + require.EqualValues(t, map[string]int64{ + "counter/G~H&C~D": 10, // labels1 + "counter/C~D&E~F": 20, // labels2 + "counter/": 40, // labels3 + "gauge/G~H&C~D": 10, // labels1 + "gauge/C~D&E~F": 20, // labels2 + "gauge/": 30, // labels3 + }, records) + + // Verify that state was reset + processor = b.ReadCheckpoint() + processor.Foreach(func(rec export.Record) { + t.Fatal("Unexpected call") + }) +} + +func TestUngroupedStateful(t *testing.T) { + ctx := context.Background() + b := ungrouped.New(nil, true) + + _ = b.Process(ctx, test.CounterDesc, test.Labels1, test.CounterAgg(10)) + + processor := b.ReadCheckpoint() + + records1 := test.Output{} + processor.Foreach(records1.AddTo) + + require.EqualValues(t, map[string]int64{ + "counter/G~H&C~D": 10, // labels1 + }, records1) + + // Test that state was NOT reset + processor = b.ReadCheckpoint() + + records2 := test.Output{} + processor.Foreach(records2.AddTo) + + require.EqualValues(t, records1, records2) +} From 557f912219b0e2522e2fcf989158bf81e81019ba Mon Sep 17 00:00:00 2001 From: jmacd Date: Sat, 9 Nov 2019 15:31:53 -0800 Subject: [PATCH 52/73] Lint new tests --- go.sum | 1 - sdk/metric/batcher/test/test.go | 19 +++++++++++++++---- .../batcher/ungrouped/ungrouped_test.go | 1 + 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/go.sum b/go.sum index b483f721b6e..28b92022a51 100644 --- a/go.sum +++ b/go.sum @@ -280,7 +280,6 @@ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opentelemetry.io v0.1.0 h1:EANZoRCOP+A3faIlw/iN6YEWoYb1vleZRKm1EvH8T48= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= diff --git a/sdk/metric/batcher/test/test.go b/sdk/metric/batcher/test/test.go index 231b69a5747..0a9549964c6 100644 --- a/sdk/metric/batcher/test/test.go +++ b/sdk/metric/batcher/test/test.go @@ -28,27 +28,34 @@ import ( ) type ( + // Encoder is an alternate label encoder to validate grouping logic. Encoder struct{} + // Output collects distinct metric/label set outputs. Output map[string]int64 ) var ( - // Gauge groups by "G" - // Counter groups by "C" + // GaugeDesc groups by "G" GaugeDesc = export.NewDescriptor( "gauge", export.GaugeKind, []core.Key{key.New("G")}, "", "", core.Int64NumberKind, false) + // CounterDesc groups by "C" CounterDesc = export.NewDescriptor( "counter", export.CounterKind, []core.Key{key.New("C")}, "", "", core.Int64NumberKind, false) - // The SDK and the batcher use different encoders in these tests. - SdkEncoder = &Encoder{} + // SdkEncoder uses a non-standard encoder like K1~V1&K2~V2 + SdkEncoder = &Encoder{} + // GroupEncoder uses the SDK default encoder GroupEncoder = sdk.DefaultLabelEncoder() // Gauge groups are (labels1), (labels2+labels3) // Counter groups are (labels1+labels2), (labels3) + + // Labels1 has G=H and C=D Labels1 = makeLabels(SdkEncoder, key.String("G", "H"), key.String("C", "D")) + // Labels2 has C=D and E=F Labels2 = makeLabels(SdkEncoder, key.String("C", "D"), key.String("E", "F")) + // Labels3 is the empty set Labels3 = makeLabels(SdkEncoder) ) @@ -70,6 +77,7 @@ func (Encoder) EncodeLabels(labels []core.KeyValue) string { return sb.String() } +// GaugeAgg returns a checkpointed gauge aggregator w/ the specified value. func GaugeAgg(v int64) export.Aggregator { ctx := context.Background() gagg := gauge.New() @@ -78,6 +86,7 @@ func GaugeAgg(v int64) export.Aggregator { return gagg } +// CounterAgg returns a checkpointed counter aggregator w/ the specified value. func CounterAgg(v int64) export.Aggregator { ctx := context.Background() cagg := counter.New() @@ -86,6 +95,8 @@ func CounterAgg(v int64) export.Aggregator { return cagg } +// AddTo adds a name/label-encoding entry with the gauge or counter +// value to the output map. func (o Output) AddTo(rec export.Record) { labels := rec.Labels() key := fmt.Sprint(rec.Descriptor().Name(), "/", labels.Encoded()) diff --git a/sdk/metric/batcher/ungrouped/ungrouped_test.go b/sdk/metric/batcher/ungrouped/ungrouped_test.go index d61f1d6c06e..73248cc8e77 100644 --- a/sdk/metric/batcher/ungrouped/ungrouped_test.go +++ b/sdk/metric/batcher/ungrouped/ungrouped_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/stretchr/testify/require" + export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/metric/batcher/test" "go.opentelemetry.io/otel/sdk/metric/batcher/ungrouped" From 0133786ac54d7ea5596878a6a61624b469b65413 Mon Sep 17 00:00:00 2001 From: jmacd Date: Sat, 9 Nov 2019 16:05:56 -0800 Subject: [PATCH 53/73] Respond to krnowak's feedback --- exporter/metric/stdout/stdout.go | 2 +- sdk/export/metric/metric.go | 17 +++++++++++------ sdk/metric/aggregator/test/test.go | 15 +++++++++++---- sdk/metric/controller/push/push.go | 12 ++++++++++-- sdk/metric/doc.go | 4 ++-- sdk/metric/labelencoder.go | 8 +++++++- sdk/metric/sdk.go | 2 ++ 7 files changed, 44 insertions(+), 16 deletions(-) diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index d553ce012e5..15c8efa566d 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -41,7 +41,7 @@ type Options struct { PrettyPrint bool // DoNotPrintTime suppresses timestamp printing. This is - // useful to create testable examples or if the are being + // useful to create deterministic test conditions. DoNotPrintTime bool // Quantiles are the desired aggregation quantiles for measure diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 17720878359..0b0902d9ac5 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -52,9 +52,14 @@ type Batcher interface { // instrument. // // The result from AggregatorSelector.AggregatorFor should be - // deterministic given a metric instrument and label set, - // since occasionally the SDK will have multiple Aggregators - // for the same metric, due to tolerated race conditions. + // the same type for a given Descriptor or else nil. The same + // type should be returned for a given descriptor, because + // Aggregators only know how to Merge with their own type. If + // the result is nil, the metric instrument will be disabled. + // + // Note that the SDK only calls AggregatorFor when new records + // require an Aggregator. This does not provide a way to + // disable metrics with active records. AggregationSelector // Process is called by the SDK once per internal record, @@ -259,7 +264,7 @@ func (r *Record) Labels() Labels { return r.labels } -// Kind describes the kind of instrument. +// MetricKind describes the kind of instrument. type MetricKind int8 const ( @@ -276,7 +281,7 @@ const ( // Descriptor describes a metric instrument to the exporter. // // Descriptors are created once per instrument and a pointer to the -// descriptor may be used to uniquely identfy the instrument in an +// descriptor may be used to uniquely identify the instrument in an // exporter. type Descriptor struct { name string @@ -292,7 +297,7 @@ type Descriptor struct { // implementations in constructing new metric instruments. // // Descriptors are created once per instrument and a pointer to the -// descriptor may be used to uniquely identfy the instrument in an +// descriptor may be used to uniquely identify the instrument in an // exporter. func NewDescriptor( name string, diff --git a/sdk/metric/aggregator/test/test.go b/sdk/metric/aggregator/test/test.go index 914d68db205..1f7e318bb1a 100644 --- a/sdk/metric/aggregator/test/test.go +++ b/sdk/metric/aggregator/test/test.go @@ -127,10 +127,17 @@ func (n *Numbers) Median() core.Number { // Performs the same range test the SDK does on behalf of the aggregator. func CheckedUpdate(t *testing.T, agg export.Aggregator, number core.Number, descriptor *export.Descriptor) { ctx := context.Background() - if err := aggregator.RangeTest(number, descriptor); err == nil { - if err := agg.Update(ctx, number, descriptor); err != nil { - t.Error("Unexpected Update failure", err) - } + + // Note: Aggregator tests are written assuming that the SDK + // has performed the RangeTest. Therefore we skip errors that + // would have been detected by the RangeTest. + err := aggregator.RangeTest(number, descriptor) + if err != nil { + return + } + + if err := agg.Update(ctx, number, descriptor); err != nil { + t.Error("Unexpected Update failure", err) } } diff --git a/sdk/metric/controller/push/push.go b/sdk/metric/controller/push/push.go index be928cacbf6..b864db17898 100644 --- a/sdk/metric/controller/push/push.go +++ b/sdk/metric/controller/push/push.go @@ -43,8 +43,14 @@ type Controller struct { var _ metric.Provider = &Controller{} // New constructs a Controller, an implementation of metric.Provider, -// using the provider batcher, exporter, period. The batcher itself -// is configured with aggregation policy selection. +// using the provided batcher, exporter, and collection period to +// configure an SDK with periodic collection. The batcher itself is +// configured with the aggregation selector policy. +// +// If the Exporter implements the export.LabelEncoder interface, the +// exporter will be used as the label encoder for the SDK itself, +// otherwise the SDK will be configured with the default label +// encoder. func New(batcher export.Batcher, exporter export.Exporter, period time.Duration) *Controller { lencoder, _ := exporter.(export.LabelEncoder) @@ -130,6 +136,8 @@ func (c *Controller) run(ch chan struct{}) { } func (c *Controller) tick() { + // TODO: either remove the context argument from Export() or + // configure a timeout here? ctx := context.Background() c.sdk.Collect(ctx) err := c.exporter.Export(ctx, c.batcher.ReadCheckpoint()) diff --git a/sdk/metric/doc.go b/sdk/metric/doc.go index 9394d0fca00..3dd400b47e9 100644 --- a/sdk/metric/doc.go +++ b/sdk/metric/doc.go @@ -25,7 +25,7 @@ instrument (Counter, Gauge, Measure) crossed with two kinds of number The API assists the SDK by consolidating the variety of metric instruments into a narrower interface, allowing the SDK to avoid repetition of -boilerplate. The API and SDK are separated such that an event reacheing +boilerplate. The API and SDK are separated such that an event reaching the SDK has a uniform structure: an instrument, a label set, and a numerical value. @@ -141,7 +141,7 @@ provide the serialization logic for labels. This allows avoiding duplicate serialization of labels, once as a unique key in the SDK (or Batcher) and once in the exporter. -Producer is an interface between the Batcher and the controller. +Producer is an interface between the Batcher and the Exporter. After completing a collection pass, the Batcher.ReadCheckpoint() method returns a Producer, which the Exporter uses to iterate over all the updated metrics. diff --git a/sdk/metric/labelencoder.go b/sdk/metric/labelencoder.go index ca7b4c8798b..c444b4eb07c 100644 --- a/sdk/metric/labelencoder.go +++ b/sdk/metric/labelencoder.go @@ -23,7 +23,13 @@ import ( ) type defaultLabelEncoder struct { - // pool is a pool of labelset builders. + // pool is a pool of labelset builders. The buffers in this + // pool grow to a size that most label encodings will not + // allocate new memory. This pool reduces the number of + // allocations per new LabelSet to 3, typically, as seen in + // the benchmarks. (It should be 2--one for the LabelSet + // object and one for the buffer.String() here--see the extra + // allocation in the call to sort.Stable). pool sync.Pool // *bytes.Buffer } diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 188bbc92a06..5a61da044f2 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -257,6 +257,8 @@ func (m *SDK) Labels(kvs ...core.KeyValue) api.LabelSet { // Sort and de-duplicate. sorted := sortedLabels(kvs) + + // TODO: Find a way to avoid the memory allocation here: sort.Stable(&sorted) oi := 1 for i := 1; i < len(sorted); i++ { From e5183985019a1d050a669681be2b301b4d54cfec Mon Sep 17 00:00:00 2001 From: jmacd Date: Tue, 12 Nov 2019 08:33:21 -0800 Subject: [PATCH 54/73] Undo comment --- api/metric/sdkhelpers.go | 1 - 1 file changed, 1 deletion(-) diff --git a/api/metric/sdkhelpers.go b/api/metric/sdkhelpers.go index 37bcff12ff2..dc3c6afcff7 100644 --- a/api/metric/sdkhelpers.go +++ b/api/metric/sdkhelpers.go @@ -28,7 +28,6 @@ type InstrumentImpl interface { AcquireHandle(labels LabelSet) HandleImpl // RecordOne allows the SDK to observe a single metric event. - // The error is returned for the SDK to report to the user. RecordOne(ctx context.Context, number core.Number, labels LabelSet) } From 72e3d308601e34c2ce414db9e64a2f21f7c35bb1 Mon Sep 17 00:00:00 2001 From: jmacd Date: Tue, 12 Nov 2019 13:51:19 -0800 Subject: [PATCH 55/73] Use concrete receivers for export records and labels, since the constructors return structs not pointers --- sdk/export/metric/metric.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 0b0902d9ac5..466c61d3a82 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -217,22 +217,22 @@ func NewLabels(ordered []core.KeyValue, encoded string, encoder LabelEncoder) La // Ordered returns the labels in a specified order, according to the // Batcher. -func (l *Labels) Ordered() []core.KeyValue { +func (l Labels) Ordered() []core.KeyValue { return l.ordered } // Encoded is a pre-encoded form of the ordered labels. -func (l *Labels) Encoded() string { +func (l Labels) Encoded() string { return l.encoded } // Encoder is the encoder that computed the Encoded() representation. -func (l *Labels) Encoder() LabelEncoder { +func (l Labels) Encoder() LabelEncoder { return l.encoder } // Len returns the number of labels. -func (l *Labels) Len() int { +func (l Labels) Len() int { return len(l.ordered) } @@ -249,18 +249,18 @@ func NewRecord(descriptor *Descriptor, labels Labels, aggregator Aggregator) Rec // Aggregator returns the checkpointed aggregator. It is safe to // access the checkpointed state without locking. -func (r *Record) Aggregator() Aggregator { +func (r Record) Aggregator() Aggregator { return r.aggregator } // Descriptor describes the metric instrument being exported. -func (r *Record) Descriptor() *Descriptor { +func (r Record) Descriptor() *Descriptor { return r.descriptor } // Labels describes the labels associated with the instrument and the // aggregated data. -func (r *Record) Labels() Labels { +func (r Record) Labels() Labels { return r.labels } From 9acdc5a9d51110b952fc94e18d438b01164086da Mon Sep 17 00:00:00 2001 From: jmacd Date: Tue, 12 Nov 2019 14:47:51 -0800 Subject: [PATCH 56/73] Bug fix for stateful batchers; clone an aggregator for long term storage --- sdk/metric/batcher/defaultkeys/defaultkeys.go | 11 +++++++ .../batcher/defaultkeys/defaultkeys_test.go | 33 +++++++++++++++++-- sdk/metric/batcher/test/test.go | 23 +++++++++++++ sdk/metric/batcher/ungrouped/ungrouped.go | 11 +++++++ .../batcher/ungrouped/ungrouped_test.go | 33 +++++++++++++++++-- 5 files changed, 105 insertions(+), 6 deletions(-) diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys.go b/sdk/metric/batcher/defaultkeys/defaultkeys.go index c35c2781346..86aba933c8b 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys.go @@ -102,6 +102,17 @@ func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels exp // Reduce dimensionality. rag, ok := b.agg[encoded] if !ok { + // If this Batcher is stateful, create a copy of the + // Aggregator for long-term storage. Otherwise the + // Meter implementation will checkpoint the aggregator + // again, overwriting the long-lived state. + if b.stateful { + tmp := agg + agg = b.AggregatorFor(desc) + if err := agg.Merge(tmp, desc); err != nil { + return err + } + } b.agg[encoded] = aggEntry{ descriptor: desc, labels: export.NewLabels(canon, encoded, b.lencoder), diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys_test.go b/sdk/metric/batcher/defaultkeys/defaultkeys_test.go index 3368a0a5f4b..0299984f9c1 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys_test.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys_test.go @@ -20,6 +20,7 @@ import ( "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/api/core" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/metric/batcher/defaultkeys" "go.opentelemetry.io/otel/sdk/metric/batcher/test" @@ -27,7 +28,7 @@ import ( func TestGroupingStateless(t *testing.T) { ctx := context.Background() - b := defaultkeys.New(nil, test.GroupEncoder, false) + b := defaultkeys.New(test.NewAggregationSelector(), test.GroupEncoder, false) _ = b.Process(ctx, test.GaugeDesc, test.Labels1, test.GaugeAgg(10)) _ = b.Process(ctx, test.GaugeDesc, test.Labels2, test.GaugeAgg(20)) @@ -60,9 +61,10 @@ func TestGroupingStateless(t *testing.T) { func TestGroupingStateful(t *testing.T) { ctx := context.Background() - b := defaultkeys.New(nil, test.GroupEncoder, true) + b := defaultkeys.New(test.NewAggregationSelector(), test.GroupEncoder, true) - _ = b.Process(ctx, test.CounterDesc, test.Labels1, test.CounterAgg(10)) + cagg := test.CounterAgg(10) + _ = b.Process(ctx, test.CounterDesc, test.Labels1, cagg) processor := b.ReadCheckpoint() @@ -80,4 +82,29 @@ func TestGroupingStateful(t *testing.T) { processor.Foreach(records2.AddTo) require.EqualValues(t, records1, records2) + + // Update and re-checkpoint the original record. + _ = cagg.Update(ctx, core.NewInt64Number(20), test.CounterDesc) + cagg.Checkpoint(ctx, test.CounterDesc) + + // As yet cagg has not been passed to Batcher.Process. Should + // not see an update. + processor = b.ReadCheckpoint() + + records3 := test.Output{} + processor.Foreach(records3.AddTo) + + require.EqualValues(t, records1, records3) + + // Now process the second update + _ = b.Process(ctx, test.CounterDesc, test.Labels1, cagg) + + processor = b.ReadCheckpoint() + + records4 := test.Output{} + processor.Foreach(records4.AddTo) + + require.EqualValues(t, map[string]int64{ + "counter/C=D": 30, + }, records4) } diff --git a/sdk/metric/batcher/test/test.go b/sdk/metric/batcher/test/test.go index 0a9549964c6..71b03c88165 100644 --- a/sdk/metric/batcher/test/test.go +++ b/sdk/metric/batcher/test/test.go @@ -33,6 +33,11 @@ type ( // Output collects distinct metric/label set outputs. Output map[string]int64 + + // testAggregationSelector returns aggregators consistent with + // the test variables below, needed for testing stateful + // batchers, which clone Aggregators using AggregatorFor(desc). + testAggregationSelector struct{} ) var ( @@ -59,6 +64,24 @@ var ( Labels3 = makeLabels(SdkEncoder) ) +// NewAggregationSelector returns a policy that is consistent with the +// test descriptors above. I.e., it returns counter.New() for counter +// instruments and gauge.New for gauge instruments. +func NewAggregationSelector() export.AggregationSelector { + return &testAggregationSelector{} +} + +func (*testAggregationSelector) AggregatorFor(desc *export.Descriptor) export.Aggregator { + switch desc.MetricKind() { + case export.CounterKind: + return counter.New() + case export.GaugeKind: + return gauge.New() + default: + panic("Invalid descriptor MetricKind for this test") + } +} + func makeLabels(encoder export.LabelEncoder, labels ...core.KeyValue) export.Labels { encoded := encoder.EncodeLabels(labels) return export.NewLabels(labels, encoded, encoder) diff --git a/sdk/metric/batcher/ungrouped/ungrouped.go b/sdk/metric/batcher/ungrouped/ungrouped.go index 49eb0233a22..8f510f8468d 100644 --- a/sdk/metric/batcher/ungrouped/ungrouped.go +++ b/sdk/metric/batcher/ungrouped/ungrouped.go @@ -62,6 +62,17 @@ func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels exp } value, ok := b.batchMap[key] if !ok { + // If this Batcher is stateful, create a copy of the + // Aggregator for long-term storage. Otherwise the + // Meter implementation will checkpoint the aggregator + // again, overwriting the long-lived state. + if b.stateful { + tmp := agg + agg = b.AggregatorFor(desc) + if err := agg.Merge(tmp, desc); err != nil { + return err + } + } b.batchMap[key] = batchValue{ aggregator: agg, labels: labels, diff --git a/sdk/metric/batcher/ungrouped/ungrouped_test.go b/sdk/metric/batcher/ungrouped/ungrouped_test.go index 73248cc8e77..bacd0a776dc 100644 --- a/sdk/metric/batcher/ungrouped/ungrouped_test.go +++ b/sdk/metric/batcher/ungrouped/ungrouped_test.go @@ -20,6 +20,7 @@ import ( "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/api/core" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/metric/batcher/test" "go.opentelemetry.io/otel/sdk/metric/batcher/ungrouped" @@ -29,7 +30,7 @@ import ( func TestUngroupedStateless(t *testing.T) { ctx := context.Background() - b := ungrouped.New(nil, false) + b := ungrouped.New(test.NewAggregationSelector(), false) _ = b.Process(ctx, test.GaugeDesc, test.Labels1, test.GaugeAgg(10)) _ = b.Process(ctx, test.GaugeDesc, test.Labels2, test.GaugeAgg(20)) @@ -64,9 +65,10 @@ func TestUngroupedStateless(t *testing.T) { func TestUngroupedStateful(t *testing.T) { ctx := context.Background() - b := ungrouped.New(nil, true) + b := ungrouped.New(test.NewAggregationSelector(), true) - _ = b.Process(ctx, test.CounterDesc, test.Labels1, test.CounterAgg(10)) + cagg := test.CounterAgg(10) + _ = b.Process(ctx, test.CounterDesc, test.Labels1, cagg) processor := b.ReadCheckpoint() @@ -84,4 +86,29 @@ func TestUngroupedStateful(t *testing.T) { processor.Foreach(records2.AddTo) require.EqualValues(t, records1, records2) + + // Update and re-checkpoint the original record. + _ = cagg.Update(ctx, core.NewInt64Number(20), test.CounterDesc) + cagg.Checkpoint(ctx, test.CounterDesc) + + // As yet cagg has not been passed to Batcher.Process. Should + // not see an update. + processor = b.ReadCheckpoint() + + records3 := test.Output{} + processor.Foreach(records3.AddTo) + + require.EqualValues(t, records1, records3) + + // Now process the second update + _ = b.Process(ctx, test.CounterDesc, test.Labels1, cagg) + + processor = b.ReadCheckpoint() + + records4 := test.Output{} + processor.Foreach(records4.AddTo) + + require.EqualValues(t, map[string]int64{ + "counter/G~H&C~D": 30, + }, records4) } From 399c34c0714ee18c29dc9fcfd7952af66447a9b4 Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 14 Nov 2019 11:07:08 -0800 Subject: [PATCH 57/73] Remove TODO addressed in #318 --- sdk/metric/sdk.go | 1 - 1 file changed, 1 deletion(-) diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 5a61da044f2..33919f149db 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -258,7 +258,6 @@ func (m *SDK) Labels(kvs ...core.KeyValue) api.LabelSet { // Sort and de-duplicate. sorted := sortedLabels(kvs) - // TODO: Find a way to avoid the memory allocation here: sort.Stable(&sorted) oi := 1 for i := 1; i < len(sorted); i++ { From 2a75566d9a640fe3445d60b9d760685a1394faab Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 14 Nov 2019 14:33:34 -0800 Subject: [PATCH 58/73] Add errors to all aggregator interfaces --- exporter/metric/stdout/stdout.go | 34 ++++++++++---- exporter/metric/test/test.go | 2 +- sdk/export/metric/metric.go | 6 +-- sdk/metric/aggregator/api.go | 10 ++-- sdk/metric/aggregator/array/array.go | 8 ++-- sdk/metric/aggregator/array/array_test.go | 25 +++++++--- sdk/metric/aggregator/counter/counter.go | 4 +- sdk/metric/aggregator/counter/counter_test.go | 16 +++++-- sdk/metric/aggregator/ddsketch/ddsketch.go | 8 ++-- .../aggregator/ddsketch/ddsketch_test.go | 18 ++++++-- sdk/metric/aggregator/errors.go | 6 +++ sdk/metric/aggregator/gauge/gauge.go | 13 +++--- sdk/metric/aggregator/gauge/gauge_test.go | 45 ++++++++++++++---- sdk/metric/aggregator/maxsumcount/msc.go | 8 ++-- sdk/metric/aggregator/maxsumcount/msc_test.go | 18 ++++++-- sdk/metric/batcher/defaultkeys/defaultkeys.go | 2 +- .../batcher/defaultkeys/defaultkeys_test.go | 12 ++--- sdk/metric/batcher/test/test.go | 6 ++- sdk/metric/batcher/ungrouped/ungrouped.go | 2 +- .../batcher/ungrouped/ungrouped_test.go | 12 ++--- sdk/metric/controller/push/push_test.go | 12 +++-- sdk/metric/correct_test.go | 46 +++++++++++-------- sdk/metric/monotone_test.go | 4 +- sdk/metric/stress_test.go | 16 +++++-- 24 files changed, 221 insertions(+), 112 deletions(-) diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index 15c8efa566d..3e280f7fbaa 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -106,16 +106,30 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) error { ts := time.Now() batch.Timestamp = &ts } - producer.Foreach(func(record export.Record) { + producer.ForEach(func(record export.Record) { desc := record.Descriptor() labels := record.Labels() agg := record.Aggregator() kind := desc.NumberKind() var expose expoLine + + if sum, ok := agg.(aggregator.Sum); ok { + if value, err := sum.Sum(); err != nil { + aggError = err + expose.Sum = "NaN" + } else { + expose.Sum = value.AsInterface(kind) + } + } + if msc, ok := agg.(aggregator.MaxSumCount); ok { - expose.Sum = msc.Sum().AsInterface(kind) - expose.Count = msc.Count() + if count, err := msc.Count(); err != nil { + aggError = err + expose.Count = "NaN" + } else { + expose.Count = count + } if max, err := msc.Max(); err != nil { aggError = err @@ -142,15 +156,17 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) error { } } } - } else if sum, ok := agg.(aggregator.Sum); ok { - expose.Sum = sum.Sum().AsInterface(kind) } else if lv, ok := agg.(aggregator.LastValue); ok { - ts := lv.Timestamp() - expose.LastValue = lv.LastValue().AsInterface(kind) + if value, timestamp, err := lv.LastValue(); err != nil { + aggError = err + expose.LastValue = "NaN" + } else { + expose.LastValue = value.AsInterface(kind) - if !e.options.DoNotPrintTime { - expose.Timestamp = &ts + if !e.options.DoNotPrintTime { + expose.Timestamp = ×tamp + } } } diff --git a/exporter/metric/test/test.go b/exporter/metric/test/test.go index 14c63d0c198..49bef7f57ef 100644 --- a/exporter/metric/test/test.go +++ b/exporter/metric/test/test.go @@ -27,7 +27,7 @@ func (p *Producer) Add(desc *export.Descriptor, agg export.Aggregator, labels .. p.updates = append(p.updates, export.NewRecord(desc, elabels, agg)) } -func (p *Producer) Foreach(f func(export.Record)) { +func (p *Producer) ForEach(f func(export.Record)) { for _, r := range p.updates { f(r) } diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 466c61d3a82..8aa7f7ec8d4 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -178,12 +178,12 @@ type LabelEncoder interface { // Producer allows a controller to access a complete checkpoint of // aggregated metrics from the Batcher. This is passed to the -// Exporter which may then use Foreach to iterate over the collection +// Exporter which may then use ForEach to iterate over the collection // of aggregated metrics. type Producer interface { - // Foreach iterates over all metrics that were updated during + // ForEach iterates over all metrics that were updated during // the last collection period. - Foreach(func(Record)) + ForEach(func(Record)) } // Record contains the exported data for a single metric instrument diff --git a/sdk/metric/aggregator/api.go b/sdk/metric/aggregator/api.go index 0f3855637da..8946251cfbe 100644 --- a/sdk/metric/aggregator/api.go +++ b/sdk/metric/aggregator/api.go @@ -20,16 +20,13 @@ import ( "go.opentelemetry.io/otel/api/core" ) -// TODO: Add Min() support to maxsumcount? It's the same as -// Quantile(0) but cheap to compute like Max(). - type ( Sum interface { - Sum() core.Number + Sum() (core.Number, error) } Count interface { - Count() int64 + Count() (int64, error) } Max interface { @@ -41,8 +38,7 @@ type ( } LastValue interface { - LastValue() core.Number - Timestamp() time.Time + LastValue() (core.Number, time.Time, error) } MaxSumCount interface { diff --git a/sdk/metric/aggregator/array/array.go b/sdk/metric/aggregator/array/array.go index 0ad4fa29df7..3565f146feb 100644 --- a/sdk/metric/aggregator/array/array.go +++ b/sdk/metric/aggregator/array/array.go @@ -46,13 +46,13 @@ func New() *Aggregator { } // Sum returns the sum of the checkpoint. -func (c *Aggregator) Sum() core.Number { - return c.ckptSum +func (c *Aggregator) Sum() (core.Number, error) { + return c.ckptSum, nil } // Count returns the count of the checkpoint. -func (c *Aggregator) Count() int64 { - return int64(len(c.checkpoint)) +func (c *Aggregator) Count() (int64, error) { + return int64(len(c.checkpoint)), nil } // Max returns the max of the checkpoint. diff --git a/sdk/metric/aggregator/array/array_test.go b/sdk/metric/aggregator/array/array_test.go index 99ab8e99a8d..6d94d100ac1 100644 --- a/sdk/metric/aggregator/array/array_test.go +++ b/sdk/metric/aggregator/array/array_test.go @@ -57,12 +57,16 @@ func (ut *updateTest) run(t *testing.T, profile test.Profile) { all.Sort() + sum, err := agg.Sum() require.InEpsilon(t, all.Sum().CoerceToFloat64(profile.NumberKind), - agg.Sum().CoerceToFloat64(profile.NumberKind), + sum.CoerceToFloat64(profile.NumberKind), 0.0000001, "Same sum - absolute") - require.Equal(t, all.Count(), agg.Count(), "Same count - absolute") + require.Nil(t, err) + count, err := agg.Count() + require.Nil(t, err) + require.Equal(t, all.Count(), count, "Same count - absolute") min, err := agg.Min() require.Nil(t, err) @@ -139,12 +143,16 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { all.Sort() + sum, err := agg1.Sum() require.InEpsilon(t, all.Sum().CoerceToFloat64(profile.NumberKind), - agg1.Sum().CoerceToFloat64(profile.NumberKind), + sum.CoerceToFloat64(profile.NumberKind), 0.0000001, "Same sum - absolute") - require.Equal(t, all.Count(), agg1.Count(), "Same count - absolute") + require.Nil(t, err) + count, err := agg1.Count() + require.Nil(t, err) + require.Equal(t, all.Count(), count, "Same count - absolute") min, err := agg1.Min() require.Nil(t, err) @@ -206,7 +214,8 @@ func TestArrayErrors(t *testing.T) { } agg.Checkpoint(ctx, descriptor) - require.Equal(t, int64(1), agg.Count(), "NaN value was not counted") + count, err := agg.Count() + require.Equal(t, int64(1), count, "NaN value was not counted") num, err := agg.Quantile(0) require.Nil(t, err) @@ -276,9 +285,11 @@ func TestArrayFloat64(t *testing.T) { all.Sort() - require.InEpsilon(t, all.Sum().AsFloat64(), agg.Sum().AsFloat64(), 0.0000001, "Same sum") + sum, err := agg.Sum() + require.InEpsilon(t, all.Sum().AsFloat64(), sum.AsFloat64(), 0.0000001, "Same sum") - require.Equal(t, all.Count(), agg.Count(), "Same count") + count, err := agg.Count() + require.Equal(t, all.Count(), count, "Same count") min, err := agg.Min() require.Nil(t, err) diff --git a/sdk/metric/aggregator/counter/counter.go b/sdk/metric/aggregator/counter/counter.go index 91c04e20c99..3a3e1f680f8 100644 --- a/sdk/metric/aggregator/counter/counter.go +++ b/sdk/metric/aggregator/counter/counter.go @@ -41,8 +41,8 @@ func New() *Aggregator { } // Sum returns the accumulated count as a Number. -func (c *Aggregator) Sum() core.Number { - return c.checkpoint +func (c *Aggregator) Sum() (core.Number, error) { + return c.checkpoint, nil } // Checkpoint checkpoints the current value (atomically) and exports it. diff --git a/sdk/metric/aggregator/counter/counter_test.go b/sdk/metric/aggregator/counter/counter_test.go index a876bcb879c..7ce5d16cdac 100644 --- a/sdk/metric/aggregator/counter/counter_test.go +++ b/sdk/metric/aggregator/counter/counter_test.go @@ -44,7 +44,9 @@ func TestCounterMonotonic(t *testing.T) { agg.Checkpoint(ctx, descriptor) - require.Equal(t, sum, agg.Sum(), "Same sum - monotonic") + asum, err := agg.Sum() + require.Equal(t, sum, asum, "Same sum - monotonic") + require.Nil(t, err) }) } @@ -64,7 +66,9 @@ func TestCounterMonotonicNegative(t *testing.T) { test.CheckedUpdate(t, agg, sum, descriptor) agg.Checkpoint(ctx, descriptor) - require.Equal(t, sum, agg.Sum(), "Same sum - monotonic") + asum, err := agg.Sum() + require.Equal(t, sum, asum, "Same sum - monotonic") + require.Nil(t, err) }) } @@ -88,7 +92,9 @@ func TestCounterNonMonotonic(t *testing.T) { agg.Checkpoint(ctx, descriptor) - require.Equal(t, sum, agg.Sum(), "Same sum - monotonic") + asum, err := agg.Sum() + require.Equal(t, sum, asum, "Same sum - monotonic") + require.Nil(t, err) }) } @@ -116,6 +122,8 @@ func TestCounterMerge(t *testing.T) { sum.AddNumber(descriptor.NumberKind(), sum) - require.Equal(t, sum, agg1.Sum(), "Same sum - monotonic") + asum, err := agg1.Sum() + require.Equal(t, sum, asum, "Same sum - monotonic") + require.Nil(t, err) }) } diff --git a/sdk/metric/aggregator/ddsketch/ddsketch.go b/sdk/metric/aggregator/ddsketch/ddsketch.go index 5f1d51cc4db..43b16ebe268 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch.go @@ -61,13 +61,13 @@ func NewDefaultConfig() *Config { } // Sum returns the sum of the checkpoint. -func (c *Aggregator) Sum() core.Number { - return c.toNumber(c.checkpoint.Sum()) +func (c *Aggregator) Sum() (core.Number, error) { + return c.toNumber(c.checkpoint.Sum()), nil } // Count returns the count of the checkpoint. -func (c *Aggregator) Count() int64 { - return c.checkpoint.Count() +func (c *Aggregator) Count() (int64, error) { + return c.checkpoint.Count(), nil } // Max returns the max of the checkpoint. diff --git a/sdk/metric/aggregator/ddsketch/ddsketch_test.go b/sdk/metric/aggregator/ddsketch/ddsketch_test.go index 53911aa44c9..08b9825244a 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch_test.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch_test.go @@ -54,12 +54,17 @@ func (ut *updateTest) run(t *testing.T, profile test.Profile) { all.Sort() + sum, err := agg.Sum() require.InDelta(t, all.Sum().CoerceToFloat64(profile.NumberKind), - agg.Sum().CoerceToFloat64(profile.NumberKind), + sum.CoerceToFloat64(profile.NumberKind), 1, "Same sum - absolute") - require.Equal(t, all.Count(), agg.Count(), "Same count - absolute") + require.Nil(t, err) + + count, err := agg.Count() + require.Equal(t, all.Count(), count, "Same count - absolute") + require.Nil(t, err) max, err := agg.Max() require.Nil(t, err) @@ -133,12 +138,17 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) { all.Sort() + asum, err := agg1.Sum() require.InDelta(t, all.Sum().CoerceToFloat64(profile.NumberKind), - agg1.Sum().CoerceToFloat64(profile.NumberKind), + asum.CoerceToFloat64(profile.NumberKind), 1, "Same sum - absolute") - require.Equal(t, all.Count(), agg1.Count(), "Same count - absolute") + require.Nil(t, err) + + count, err := agg1.Count() + require.Equal(t, all.Count(), count, "Same count - absolute") + require.Nil(t, err) max, err := agg1.Max() require.Nil(t, err) diff --git a/sdk/metric/aggregator/errors.go b/sdk/metric/aggregator/errors.go index f54fad0d54b..4679b7c01d9 100644 --- a/sdk/metric/aggregator/errors.go +++ b/sdk/metric/aggregator/errors.go @@ -29,6 +29,12 @@ var ( ErrNaNInput = fmt.Errorf("NaN value is an invalid input") ErrNonMonotoneInput = fmt.Errorf("The new value is not monotone") ErrInconsistentType = fmt.Errorf("Cannot merge different aggregator types") + + // ErrNoLastValue is returned by the gauge.Aggregator when + // (due to a race with collection) the Aggregator is + // checkpointed before the first value is set. The aggregator + // should simply be skipped in this case. + ErrNoLastValue = fmt.Errorf("No value has been set") ) func RangeTest(number core.Number, descriptor *export.Descriptor) error { diff --git a/sdk/metric/aggregator/gauge/gauge.go b/sdk/metric/aggregator/gauge/gauge.go index 73ac2b78d2a..2e0485892ad 100644 --- a/sdk/metric/aggregator/gauge/gauge.go +++ b/sdk/metric/aggregator/gauge/gauge.go @@ -71,13 +71,12 @@ func New() *Aggregator { } // LastValue returns the last-recorded gauge value as a Number. -func (g *Aggregator) LastValue() core.Number { - return (*gaugeData)(g.checkpoint).value.AsNumber() -} - -// Timestamp returns the timestamp of the last recorded gauge value. -func (g *Aggregator) Timestamp() time.Time { - return (*gaugeData)(g.checkpoint).timestamp +func (g *Aggregator) LastValue() (core.Number, time.Time, error) { + gd := (*gaugeData)(g.checkpoint) + if gd == unsetGauge { + return core.Number(0), time.Time{}, aggregator.ErrNoLastValue + } + return gd.value.AsNumber(), gd.timestamp, nil } // Checkpoint checkpoints the current value (atomically) and exports it. diff --git a/sdk/metric/aggregator/gauge/gauge_test.go b/sdk/metric/aggregator/gauge/gauge_test.go index f4482a67dee..a1c4112daa0 100644 --- a/sdk/metric/aggregator/gauge/gauge_test.go +++ b/sdk/metric/aggregator/gauge/gauge_test.go @@ -48,7 +48,9 @@ func TestGaugeNonMonotonic(t *testing.T) { agg.Checkpoint(ctx, record) - require.Equal(t, last, agg.LastValue(), "Same last value - non-monotonic") + lv, _, err := agg.LastValue() + require.Equal(t, last, lv, "Same last value - non-monotonic") + require.Nil(t, err) }) } @@ -70,7 +72,9 @@ func TestGaugeMonotonic(t *testing.T) { agg.Checkpoint(ctx, record) - require.Equal(t, last, agg.LastValue(), "Same last value - monotonic") + lv, _, err := agg.LastValue() + require.Equal(t, last, lv, "Same last value - monotonic") + require.Nil(t, err) }) } @@ -96,7 +100,9 @@ func TestGaugeMonotonicDescending(t *testing.T) { agg.Checkpoint(ctx, record) - require.Equal(t, first, agg.LastValue(), "Same last value - monotonic") + lv, _, err := agg.LastValue() + require.Equal(t, first, lv, "Same last value - monotonic") + require.Nil(t, err) }) } @@ -119,14 +125,18 @@ func TestGaugeNormalMerge(t *testing.T) { agg1.Checkpoint(ctx, descriptor) agg2.Checkpoint(ctx, descriptor) - t1 := agg1.Timestamp() - t2 := agg2.Timestamp() + _, t1, err := agg1.LastValue() + require.Nil(t, err) + _, t2, err := agg2.LastValue() + require.Nil(t, err) require.True(t, t1.Before(t2)) test.CheckedMerge(t, agg1, agg2, descriptor) - require.Equal(t, t2, agg1.Timestamp(), "Merged timestamp - non-monotonic") - require.Equal(t, first2, agg1.LastValue(), "Merged value - non-monotonic") + lv, ts, err := agg1.LastValue() + require.Nil(t, err) + require.Equal(t, t2, ts, "Merged timestamp - non-monotonic") + require.Equal(t, first2, lv, "Merged value - non-monotonic") }) } @@ -151,7 +161,24 @@ func TestGaugeMonotonicMerge(t *testing.T) { test.CheckedMerge(t, agg1, agg2, descriptor) - require.Equal(t, first2, agg1.LastValue(), "Merged value - monotonic") - require.Equal(t, agg2.Timestamp(), agg1.Timestamp(), "Merged timestamp - monotonic") + _, ts2, err := agg1.LastValue() + require.Nil(t, err) + + lv, ts1, err := agg1.LastValue() + require.Nil(t, err) + require.Equal(t, first2, lv, "Merged value - monotonic") + require.Equal(t, ts2, ts1, "Merged timestamp - monotonic") }) } + +func TestGaugeNotSet(t *testing.T) { + descriptor := test.NewAggregatorTest(export.GaugeKind, core.Int64NumberKind, true) + + g := New() + g.Checkpoint(context.Background(), descriptor) + + value, timestamp, err := g.LastValue() + require.Equal(t, aggregator.ErrNoLastValue, err) + require.True(t, timestamp.IsZero()) + require.Equal(t, core.Number(0), value) +} diff --git a/sdk/metric/aggregator/maxsumcount/msc.go b/sdk/metric/aggregator/maxsumcount/msc.go index fee3caf312d..0fb8529032e 100644 --- a/sdk/metric/aggregator/maxsumcount/msc.go +++ b/sdk/metric/aggregator/maxsumcount/msc.go @@ -46,13 +46,13 @@ func New() *Aggregator { } // Sum returns the accumulated sum as a Number. -func (c *Aggregator) Sum() core.Number { - return c.checkpoint.sum +func (c *Aggregator) Sum() (core.Number, error) { + return c.checkpoint.sum, nil } // Count returns the accumulated count. -func (c *Aggregator) Count() int64 { - return int64(c.checkpoint.count.AsUint64()) +func (c *Aggregator) Count() (int64, error) { + return int64(c.checkpoint.count.AsUint64()), nil } // Max returns the accumulated max as a Number. diff --git a/sdk/metric/aggregator/maxsumcount/msc_test.go b/sdk/metric/aggregator/maxsumcount/msc_test.go index 47680677faf..461b7f783f2 100644 --- a/sdk/metric/aggregator/maxsumcount/msc_test.go +++ b/sdk/metric/aggregator/maxsumcount/msc_test.go @@ -46,12 +46,17 @@ func TestMaxSumCountAbsolute(t *testing.T) { all.Sort() + asum, err := agg.Sum() require.InEpsilon(t, all.Sum().CoerceToFloat64(profile.NumberKind), - agg.Sum().CoerceToFloat64(profile.NumberKind), + asum.CoerceToFloat64(profile.NumberKind), 0.000000001, "Same sum - absolute") - require.Equal(t, all.Count(), agg.Count(), "Same count - absolute") + require.Nil(t, err) + + count, err := agg.Count() + require.Equal(t, all.Count(), count, "Same count - absolute") + require.Nil(t, err) max, err := agg.Max() require.Nil(t, err) @@ -91,12 +96,17 @@ func TestMaxSumCountMerge(t *testing.T) { all.Sort() + asum, err := agg1.Sum() require.InEpsilon(t, all.Sum().CoerceToFloat64(profile.NumberKind), - agg1.Sum().CoerceToFloat64(profile.NumberKind), + asum.CoerceToFloat64(profile.NumberKind), 0.000000001, "Same sum - absolute") - require.Equal(t, all.Count(), agg1.Count(), "Same count - absolute") + require.Nil(t, err) + + count, err := agg1.Count() + require.Equal(t, all.Count(), count, "Same count - absolute") + require.Nil(t, err) max, err := agg1.Max() require.Nil(t, err) diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys.go b/sdk/metric/batcher/defaultkeys/defaultkeys.go index 86aba933c8b..75306e937ef 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys.go @@ -134,7 +134,7 @@ func (b *Batcher) ReadCheckpoint() export.Producer { } } -func (p *producer) Foreach(f func(export.Record)) { +func (p *producer) ForEach(f func(export.Record)) { for _, entry := range p.aggMap { f(export.NewRecord( entry.descriptor, diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys_test.go b/sdk/metric/batcher/defaultkeys/defaultkeys_test.go index 0299984f9c1..7c021afeff9 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys_test.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys_test.go @@ -41,7 +41,7 @@ func TestGroupingStateless(t *testing.T) { processor := b.ReadCheckpoint() records := test.Output{} - processor.Foreach(records.AddTo) + processor.ForEach(records.AddTo) // Output gauge should have only the "G=H" and "G=" keys. // Output counter should have only the "C=D" and "C=" keys. @@ -54,7 +54,7 @@ func TestGroupingStateless(t *testing.T) { // Verify that state was reset processor = b.ReadCheckpoint() - processor.Foreach(func(rec export.Record) { + processor.ForEach(func(rec export.Record) { t.Fatal("Unexpected call") }) } @@ -69,7 +69,7 @@ func TestGroupingStateful(t *testing.T) { processor := b.ReadCheckpoint() records1 := test.Output{} - processor.Foreach(records1.AddTo) + processor.ForEach(records1.AddTo) require.EqualValues(t, map[string]int64{ "counter/C=D": 10, // labels1 @@ -79,7 +79,7 @@ func TestGroupingStateful(t *testing.T) { processor = b.ReadCheckpoint() records2 := test.Output{} - processor.Foreach(records2.AddTo) + processor.ForEach(records2.AddTo) require.EqualValues(t, records1, records2) @@ -92,7 +92,7 @@ func TestGroupingStateful(t *testing.T) { processor = b.ReadCheckpoint() records3 := test.Output{} - processor.Foreach(records3.AddTo) + processor.ForEach(records3.AddTo) require.EqualValues(t, records1, records3) @@ -102,7 +102,7 @@ func TestGroupingStateful(t *testing.T) { processor = b.ReadCheckpoint() records4 := test.Output{} - processor.Foreach(records4.AddTo) + processor.ForEach(records4.AddTo) require.EqualValues(t, map[string]int64{ "counter/C=D": 30, diff --git a/sdk/metric/batcher/test/test.go b/sdk/metric/batcher/test/test.go index 71b03c88165..e9d7aea4389 100644 --- a/sdk/metric/batcher/test/test.go +++ b/sdk/metric/batcher/test/test.go @@ -126,9 +126,11 @@ func (o Output) AddTo(rec export.Record) { var value int64 switch t := rec.Aggregator().(type) { case *counter.Aggregator: - value = t.Sum().AsInt64() + sum, _ := t.Sum() + value = sum.AsInt64() case *gauge.Aggregator: - value = t.LastValue().AsInt64() + lv, _, _ := t.LastValue() + value = lv.AsInt64() } o[key] = value } diff --git a/sdk/metric/batcher/ungrouped/ungrouped.go b/sdk/metric/batcher/ungrouped/ungrouped.go index 8f510f8468d..6ceda972be4 100644 --- a/sdk/metric/batcher/ungrouped/ungrouped.go +++ b/sdk/metric/batcher/ungrouped/ungrouped.go @@ -90,7 +90,7 @@ func (b *Batcher) ReadCheckpoint() export.Producer { return checkpoint } -func (c batchMap) Foreach(f func(export.Record)) { +func (c batchMap) ForEach(f func(export.Record)) { for key, value := range c { f(export.NewRecord( key.descriptor, diff --git a/sdk/metric/batcher/ungrouped/ungrouped_test.go b/sdk/metric/batcher/ungrouped/ungrouped_test.go index bacd0a776dc..a57d2c82a2d 100644 --- a/sdk/metric/batcher/ungrouped/ungrouped_test.go +++ b/sdk/metric/batcher/ungrouped/ungrouped_test.go @@ -43,7 +43,7 @@ func TestUngroupedStateless(t *testing.T) { processor := b.ReadCheckpoint() records := test.Output{} - processor.Foreach(records.AddTo) + processor.ForEach(records.AddTo) // Output gauge should have only the "G=H" and "G=" keys. // Output counter should have only the "C=D" and "C=" keys. @@ -58,7 +58,7 @@ func TestUngroupedStateless(t *testing.T) { // Verify that state was reset processor = b.ReadCheckpoint() - processor.Foreach(func(rec export.Record) { + processor.ForEach(func(rec export.Record) { t.Fatal("Unexpected call") }) } @@ -73,7 +73,7 @@ func TestUngroupedStateful(t *testing.T) { processor := b.ReadCheckpoint() records1 := test.Output{} - processor.Foreach(records1.AddTo) + processor.ForEach(records1.AddTo) require.EqualValues(t, map[string]int64{ "counter/G~H&C~D": 10, // labels1 @@ -83,7 +83,7 @@ func TestUngroupedStateful(t *testing.T) { processor = b.ReadCheckpoint() records2 := test.Output{} - processor.Foreach(records2.AddTo) + processor.ForEach(records2.AddTo) require.EqualValues(t, records1, records2) @@ -96,7 +96,7 @@ func TestUngroupedStateful(t *testing.T) { processor = b.ReadCheckpoint() records3 := test.Output{} - processor.Foreach(records3.AddTo) + processor.ForEach(records3.AddTo) require.EqualValues(t, records1, records3) @@ -106,7 +106,7 @@ func TestUngroupedStateful(t *testing.T) { processor = b.ReadCheckpoint() records4 := test.Output{} - processor.Foreach(records4.AddTo) + processor.ForEach(records4.AddTo) require.EqualValues(t, map[string]int64{ "counter/G~H&C~D": 30, diff --git a/sdk/metric/controller/push/push_test.go b/sdk/metric/controller/push/push_test.go index 513043b72cd..fdd2f5c5df3 100644 --- a/sdk/metric/controller/push/push_test.go +++ b/sdk/metric/controller/push/push_test.go @@ -84,7 +84,7 @@ func (b *testBatcher) Process(_ context.Context, desc *export.Descriptor, labels func (e *testExporter) Export(_ context.Context, producer export.Producer) error { e.exports++ - producer.Foreach(func(r export.Record) { + producer.ForEach(func(r export.Record) { e.records = append(e.records, r) }) return e.retErr @@ -134,7 +134,10 @@ func TestPushTicker(t *testing.T) { require.Equal(t, 1, fix.exporter.exports) require.Equal(t, 1, len(fix.exporter.records)) require.Equal(t, "counter", fix.exporter.records[0].Descriptor().Name()) - require.Equal(t, int64(3), fix.exporter.records[0].Aggregator().(aggregator.Sum).Sum().AsInt64()) + + sum, err := fix.exporter.records[0].Aggregator().(aggregator.Sum).Sum() + require.Equal(t, int64(3), sum.AsInt64()) + require.Nil(t, err) fix.producer.Reset() fix.exporter.records = nil @@ -148,7 +151,10 @@ func TestPushTicker(t *testing.T) { require.Equal(t, 2, fix.exporter.exports) require.Equal(t, 1, len(fix.exporter.records)) require.Equal(t, "counter", fix.exporter.records[0].Descriptor().Name()) - require.Equal(t, int64(7), fix.exporter.records[0].Aggregator().(aggregator.Sum).Sum().AsInt64()) + + sum, err = fix.exporter.records[0].Aggregator().(aggregator.Sum).Sum() + require.Equal(t, int64(7), sum.AsInt64()) + require.Nil(t, err) p.Stop() } diff --git a/sdk/metric/correct_test.go b/sdk/metric/correct_test.go index 3b577228e41..0e92d97ffa6 100644 --- a/sdk/metric/correct_test.go +++ b/sdk/metric/correct_test.go @@ -68,26 +68,30 @@ func TestInputRangeTestCounter(t *testing.T) { } sdk := sdk.New(batcher, sdk.DefaultLabelEncoder()) - var err error - sdk.SetErrorHandler(func(sdkErr error) { - err = sdkErr + var sdkErr error + sdk.SetErrorHandler(func(handleErr error) { + sdkErr = handleErr }) counter := sdk.NewInt64Counter("counter.name", metric.WithMonotonic(true)) counter.Add(ctx, -1, sdk.Labels()) - require.Equal(t, aggregator.ErrNegativeInput, err) - err = nil + require.Equal(t, aggregator.ErrNegativeInput, sdkErr) + sdkErr = nil sdk.Collect(ctx) - require.Equal(t, int64(0), cagg.Sum().AsInt64()) + sum, err := cagg.Sum() + require.Equal(t, int64(0), sum.AsInt64()) + require.Nil(t, err) counter.Add(ctx, 1, sdk.Labels()) checkpointed := sdk.Collect(ctx) - require.Equal(t, int64(1), cagg.Sum().AsInt64()) + sum, err = cagg.Sum() + require.Equal(t, int64(1), sum.AsInt64()) require.Equal(t, 1, checkpointed) require.Nil(t, err) + require.Nil(t, sdkErr) } func TestInputRangeTestMeasure(t *testing.T) { @@ -99,26 +103,30 @@ func TestInputRangeTestMeasure(t *testing.T) { } sdk := sdk.New(batcher, sdk.DefaultLabelEncoder()) - var err error - sdk.SetErrorHandler(func(sdkErr error) { - err = sdkErr + var sdkErr error + sdk.SetErrorHandler(func(handleErr error) { + sdkErr = handleErr }) measure := sdk.NewFloat64Measure("measure.name", metric.WithAbsolute(true)) measure.Record(ctx, -1, sdk.Labels()) - require.Equal(t, aggregator.ErrNegativeInput, err) - err = nil + require.Equal(t, aggregator.ErrNegativeInput, sdkErr) + sdkErr = nil sdk.Collect(ctx) - require.Equal(t, int64(0), magg.Count()) + count, err := magg.Count() + require.Equal(t, int64(0), count) + require.Nil(t, err) measure.Record(ctx, 1, sdk.Labels()) measure.Record(ctx, 2, sdk.Labels()) checkpointed := sdk.Collect(ctx) - require.Equal(t, int64(2), magg.Count()) + count, err = magg.Count() + require.Equal(t, int64(2), count) require.Equal(t, 1, checkpointed) + require.Nil(t, sdkErr) require.Nil(t, err) } @@ -145,15 +153,15 @@ func TestRecordNaN(t *testing.T) { } sdk := sdk.New(batcher, sdk.DefaultLabelEncoder()) - var err error - sdk.SetErrorHandler(func(sdkErr error) { - err = sdkErr + var sdkErr error + sdk.SetErrorHandler(func(handleErr error) { + sdkErr = handleErr }) g := sdk.NewFloat64Gauge("gauge.name") - require.Nil(t, err) + require.Nil(t, sdkErr) g.Set(ctx, math.NaN(), sdk.Labels()) - require.Error(t, err) + require.Error(t, sdkErr) } func TestSDKLabelEncoder(t *testing.T) { diff --git a/sdk/metric/monotone_test.go b/sdk/metric/monotone_test.go index d6eee3fc42b..72c5a72618d 100644 --- a/sdk/metric/monotone_test.go +++ b/sdk/metric/monotone_test.go @@ -53,8 +53,8 @@ func (m *monotoneBatcher) Process(_ context.Context, desc *export.Descriptor, la require.Equal(m.t, "b", labels.Ordered()[0].Value.Emit()) gauge := agg.(*gauge.Aggregator) - val := gauge.LastValue() - ts := gauge.Timestamp() + val, ts, err := gauge.LastValue() + require.Nil(m.t, err) m.currentValue = &val m.currentTime = &ts diff --git a/sdk/metric/stress_test.go b/sdk/metric/stress_test.go index 6590bd42ca7..eb24dfa1989 100644 --- a/sdk/metric/stress_test.go +++ b/sdk/metric/stress_test.go @@ -37,6 +37,7 @@ import ( api "go.opentelemetry.io/otel/api/metric" export "go.opentelemetry.io/otel/sdk/export/metric" sdk "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/aggregator" "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" "go.opentelemetry.io/otel/sdk/metric/aggregator/gauge" ) @@ -257,10 +258,19 @@ func (f *testFixture) Process(ctx context.Context, desc *export.Descriptor, labe switch desc.MetricKind() { case export.CounterKind: - f.impl.storeCollect(actual, agg.(*counter.Aggregator).Sum(), time.Time{}) + counter := agg.(aggregator.Sum) + sum, err := counter.Sum() + if err != nil { + panic("Impossible") + } + f.impl.storeCollect(actual, sum, time.Time{}) case export.GaugeKind: - gauge := agg.(*gauge.Aggregator) - f.impl.storeCollect(actual, gauge.LastValue(), gauge.Timestamp()) + gauge := agg.(aggregator.LastValue) + lv, ts, err := gauge.LastValue() + if err != nil { + panic("Impossible") + } + f.impl.storeCollect(actual, lv, ts) default: panic("Not used in this test") } From efb75edf9493d57897c32f4eb9f6f48a48d0b2ed Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 14 Nov 2019 14:37:32 -0800 Subject: [PATCH 59/73] Handle ErrNoLastValue case in stdout exporter --- exporter/metric/stdout/stdout.go | 8 +++++++- exporter/metric/stdout/stdout_test.go | 16 ++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index 3e280f7fbaa..dddf543b5ee 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -56,7 +56,7 @@ type Options struct { type expoBatch struct { Timestamp *time.Time `json:"time,omitempty"` - Updates []expoLine `json:"updates,omitempty"` + Updates []expoLine `json:"updates"` } type expoLine struct { @@ -159,6 +159,12 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) error { } else if lv, ok := agg.(aggregator.LastValue); ok { if value, timestamp, err := lv.LastValue(); err != nil { + if err == aggregator.ErrNoLastValue { + // This is a special case, indicates an aggregator that + // was checkpointed before its first value was set. + return + } + aggError = err expose.LastValue = "NaN" } else { diff --git a/exporter/metric/stdout/stdout_test.go b/exporter/metric/stdout/stdout_test.go index 42dd0bb26fc..6d9b5dddf0b 100644 --- a/exporter/metric/stdout/stdout_test.go +++ b/exporter/metric/stdout/stdout_test.go @@ -238,3 +238,19 @@ func TestStdoutAggError(t *testing.T) { require.Equal(t, aggregator.ErrEmptyDataSet, err) require.Equal(t, `{"updates":[{"name":"test.name","max":"NaN","sum":0,"count":0,"quantiles":[{"q":0.5,"v":"NaN"},{"q":0.9,"v":"NaN"},{"q":0.99,"v":"NaN"}]}]}`, fix.Output()) } + +func TestStdoutGaugeNotSet(t *testing.T) { + fix := newFixture(t, stdout.Options{}) + + producer := test.NewProducer(sdk.DefaultLabelEncoder()) + + desc := export.NewDescriptor("test.name", export.GaugeKind, nil, "", "", core.Float64NumberKind, false) + gagg := gauge.New() + gagg.Checkpoint(fix.ctx, desc) + + producer.Add(desc, gagg, key.String("A", "B"), key.String("C", "D")) + + fix.Export(producer) + + require.Equal(t, `{"updates":null}`, fix.Output()) +} From 1153503c45cfc9fee06ab1f112834aecaaa0e7cb Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 14 Nov 2019 16:50:33 -0800 Subject: [PATCH 60/73] Move aggregator API into sdk/export/metric/aggregator --- exporter/metric/stdout/stdout.go | 2 +- exporter/metric/stdout/stdout_test.go | 2 +- go.sum | 1 + .../metric/aggregator/api.go} | 36 ++++++++++++- sdk/export/metric/metric.go | 17 +++--- sdk/metric/aggregator/api.go | 54 ------------------- sdk/metric/aggregator/array/array.go | 2 +- sdk/metric/aggregator/array/array_test.go | 2 +- sdk/metric/aggregator/counter/counter.go | 2 +- sdk/metric/aggregator/ddsketch/ddsketch.go | 2 +- sdk/metric/aggregator/gauge/gauge.go | 2 +- sdk/metric/aggregator/gauge/gauge_test.go | 2 +- sdk/metric/aggregator/maxsumcount/msc.go | 2 +- sdk/metric/aggregator/test/test.go | 2 +- sdk/metric/controller/push/push_test.go | 2 +- sdk/metric/correct_test.go | 2 +- sdk/metric/monotone_test.go | 2 +- sdk/metric/sdk.go | 2 +- sdk/metric/stress_test.go | 2 +- 19 files changed, 62 insertions(+), 76 deletions(-) rename sdk/{metric/aggregator/errors.go => export/metric/aggregator/api.go} (79%) delete mode 100644 sdk/metric/aggregator/api.go diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index dddf543b5ee..2f0239eff17 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -24,7 +24,7 @@ import ( "time" export "go.opentelemetry.io/otel/sdk/export/metric" - "go.opentelemetry.io/otel/sdk/metric/aggregator" + "go.opentelemetry.io/otel/sdk/export/metric/aggregator" ) type Exporter struct { diff --git a/exporter/metric/stdout/stdout_test.go b/exporter/metric/stdout/stdout_test.go index 6d9b5dddf0b..009d1cfbda2 100644 --- a/exporter/metric/stdout/stdout_test.go +++ b/exporter/metric/stdout/stdout_test.go @@ -15,8 +15,8 @@ import ( "go.opentelemetry.io/otel/exporter/metric/stdout" "go.opentelemetry.io/otel/exporter/metric/test" export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/export/metric/aggregator" sdk "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/metric/aggregator" "go.opentelemetry.io/otel/sdk/metric/aggregator/array" "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" "go.opentelemetry.io/otel/sdk/metric/aggregator/ddsketch" diff --git a/go.sum b/go.sum index 28b92022a51..b483f721b6e 100644 --- a/go.sum +++ b/go.sum @@ -280,6 +280,7 @@ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opentelemetry.io v0.1.0 h1:EANZoRCOP+A3faIlw/iN6YEWoYb1vleZRKm1EvH8T48= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= diff --git a/sdk/metric/aggregator/errors.go b/sdk/export/metric/aggregator/api.go similarity index 79% rename from sdk/metric/aggregator/errors.go rename to sdk/export/metric/aggregator/api.go index 4679b7c01d9..c92edd75c03 100644 --- a/sdk/metric/aggregator/errors.go +++ b/sdk/export/metric/aggregator/api.go @@ -12,16 +12,50 @@ // See the License for the specific language governing permissions and // limitations under the License. -package aggregator +package aggregator // import "go.opentelemetry.io/otel/sdk/metric/aggregator" import ( "fmt" "math" + "time" "go.opentelemetry.io/otel/api/core" export "go.opentelemetry.io/otel/sdk/export/metric" ) +type ( + Sum interface { + Sum() (core.Number, error) + } + + Count interface { + Count() (int64, error) + } + + Max interface { + Max() (core.Number, error) + } + + Quantile interface { + Quantile(float64) (core.Number, error) + } + + LastValue interface { + LastValue() (core.Number, time.Time, error) + } + + MaxSumCount interface { + Sum + Count + Max + } + + Distribution interface { + MaxSumCount + Quantile + } +) + var ( ErrEmptyDataSet = fmt.Errorf("The result is not defined on an empty data set") ErrInvalidQuantile = fmt.Errorf("The requested quantile is out of range") diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 8aa7f7ec8d4..0166899c9a1 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -124,17 +124,22 @@ type Aggregator interface { // inspected for distributed or span context. Update(context.Context, core.Number, *Descriptor) error - // Checkpoint is called in collection context to finish one - // period of aggregation. Checkpoint() is called in a - // single-threaded context, no locking is required. + // Checkpoint is called during collection to finish one period + // of aggregation by atomically saving the current value. + // Checkpoint() is called concurrently with Update(). + // + // After the checkpoint is taken, the current value may be + // accessed using by converting to one a suitable interface + // types in the `aggregator` sub-package. // // The Context argument originates from the controller that // orchestrates collection. Checkpoint(context.Context, *Descriptor) - // Merge combines state from the argument aggregator into this - // one. Merge() is called in a single-threaded context, no - // locking is required. + // Merge combines the checkpointed state from the argument + // aggregator into this aggregator's checkpointed state. + // Merge() is called in a single-threaded context, no locking + // is required. Merge(Aggregator, *Descriptor) error } diff --git a/sdk/metric/aggregator/api.go b/sdk/metric/aggregator/api.go deleted file mode 100644 index 8946251cfbe..00000000000 --- a/sdk/metric/aggregator/api.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package aggregator // import "go.opentelemetry.io/otel/sdk/metric/aggregator" - -import ( - "time" - - "go.opentelemetry.io/otel/api/core" -) - -type ( - Sum interface { - Sum() (core.Number, error) - } - - Count interface { - Count() (int64, error) - } - - Max interface { - Max() (core.Number, error) - } - - Quantile interface { - Quantile(float64) (core.Number, error) - } - - LastValue interface { - LastValue() (core.Number, time.Time, error) - } - - MaxSumCount interface { - Sum - Count - Max - } - - Distribution interface { - MaxSumCount - Quantile - } -) diff --git a/sdk/metric/aggregator/array/array.go b/sdk/metric/aggregator/array/array.go index 3565f146feb..9d7fa5c7066 100644 --- a/sdk/metric/aggregator/array/array.go +++ b/sdk/metric/aggregator/array/array.go @@ -23,7 +23,7 @@ import ( "go.opentelemetry.io/otel/api/core" export "go.opentelemetry.io/otel/sdk/export/metric" - "go.opentelemetry.io/otel/sdk/metric/aggregator" + "go.opentelemetry.io/otel/sdk/export/metric/aggregator" ) type ( diff --git a/sdk/metric/aggregator/array/array_test.go b/sdk/metric/aggregator/array/array_test.go index 6d94d100ac1..279554deb3a 100644 --- a/sdk/metric/aggregator/array/array_test.go +++ b/sdk/metric/aggregator/array/array_test.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/otel/api/core" export "go.opentelemetry.io/otel/sdk/export/metric" - "go.opentelemetry.io/otel/sdk/metric/aggregator" + "go.opentelemetry.io/otel/sdk/export/metric/aggregator" "go.opentelemetry.io/otel/sdk/metric/aggregator/test" ) diff --git a/sdk/metric/aggregator/counter/counter.go b/sdk/metric/aggregator/counter/counter.go index 3a3e1f680f8..f16346fd1ea 100644 --- a/sdk/metric/aggregator/counter/counter.go +++ b/sdk/metric/aggregator/counter/counter.go @@ -19,7 +19,7 @@ import ( "go.opentelemetry.io/otel/api/core" export "go.opentelemetry.io/otel/sdk/export/metric" - "go.opentelemetry.io/otel/sdk/metric/aggregator" + "go.opentelemetry.io/otel/sdk/export/metric/aggregator" ) // Aggregator aggregates counter events. diff --git a/sdk/metric/aggregator/ddsketch/ddsketch.go b/sdk/metric/aggregator/ddsketch/ddsketch.go index 43b16ebe268..ddf35400e11 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch.go @@ -23,7 +23,7 @@ import ( "go.opentelemetry.io/otel/api/core" export "go.opentelemetry.io/otel/sdk/export/metric" - "go.opentelemetry.io/otel/sdk/metric/aggregator" + "go.opentelemetry.io/otel/sdk/export/metric/aggregator" ) // Config is an alias for the underlying DDSketch config object. diff --git a/sdk/metric/aggregator/gauge/gauge.go b/sdk/metric/aggregator/gauge/gauge.go index 2e0485892ad..536e752bf6b 100644 --- a/sdk/metric/aggregator/gauge/gauge.go +++ b/sdk/metric/aggregator/gauge/gauge.go @@ -22,7 +22,7 @@ import ( "go.opentelemetry.io/otel/api/core" export "go.opentelemetry.io/otel/sdk/export/metric" - "go.opentelemetry.io/otel/sdk/metric/aggregator" + "go.opentelemetry.io/otel/sdk/export/metric/aggregator" ) // Note: This aggregator enforces the behavior of monotonic gauges to diff --git a/sdk/metric/aggregator/gauge/gauge_test.go b/sdk/metric/aggregator/gauge/gauge_test.go index a1c4112daa0..aeff19643f1 100644 --- a/sdk/metric/aggregator/gauge/gauge_test.go +++ b/sdk/metric/aggregator/gauge/gauge_test.go @@ -23,7 +23,7 @@ import ( "go.opentelemetry.io/otel/api/core" export "go.opentelemetry.io/otel/sdk/export/metric" - "go.opentelemetry.io/otel/sdk/metric/aggregator" + "go.opentelemetry.io/otel/sdk/export/metric/aggregator" "go.opentelemetry.io/otel/sdk/metric/aggregator/test" ) diff --git a/sdk/metric/aggregator/maxsumcount/msc.go b/sdk/metric/aggregator/maxsumcount/msc.go index 0fb8529032e..8629387e1cd 100644 --- a/sdk/metric/aggregator/maxsumcount/msc.go +++ b/sdk/metric/aggregator/maxsumcount/msc.go @@ -19,7 +19,7 @@ import ( "go.opentelemetry.io/otel/api/core" export "go.opentelemetry.io/otel/sdk/export/metric" - "go.opentelemetry.io/otel/sdk/metric/aggregator" + "go.opentelemetry.io/otel/sdk/export/metric/aggregator" ) type ( diff --git a/sdk/metric/aggregator/test/test.go b/sdk/metric/aggregator/test/test.go index 1f7e318bb1a..b1cb2025ad7 100644 --- a/sdk/metric/aggregator/test/test.go +++ b/sdk/metric/aggregator/test/test.go @@ -22,7 +22,7 @@ import ( "go.opentelemetry.io/otel/api/core" export "go.opentelemetry.io/otel/sdk/export/metric" - "go.opentelemetry.io/otel/sdk/metric/aggregator" + "go.opentelemetry.io/otel/sdk/export/metric/aggregator" ) const Magnitude = 1000 diff --git a/sdk/metric/controller/push/push_test.go b/sdk/metric/controller/push/push_test.go index fdd2f5c5df3..5f59b3d5063 100644 --- a/sdk/metric/controller/push/push_test.go +++ b/sdk/metric/controller/push/push_test.go @@ -26,8 +26,8 @@ import ( "go.opentelemetry.io/otel/exporter/metric/test" export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/export/metric/aggregator" sdk "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/metric/aggregator" "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" "go.opentelemetry.io/otel/sdk/metric/controller/push" ) diff --git a/sdk/metric/correct_test.go b/sdk/metric/correct_test.go index 0e92d97ffa6..087f0fff175 100644 --- a/sdk/metric/correct_test.go +++ b/sdk/metric/correct_test.go @@ -26,8 +26,8 @@ import ( "go.opentelemetry.io/otel/api/key" "go.opentelemetry.io/otel/api/metric" export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/export/metric/aggregator" sdk "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/metric/aggregator" "go.opentelemetry.io/otel/sdk/metric/aggregator/array" "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" "go.opentelemetry.io/otel/sdk/metric/aggregator/gauge" diff --git a/sdk/metric/monotone_test.go b/sdk/metric/monotone_test.go index 72c5a72618d..7af1ef91e02 100644 --- a/sdk/metric/monotone_test.go +++ b/sdk/metric/monotone_test.go @@ -25,8 +25,8 @@ import ( "go.opentelemetry.io/otel/api/key" "go.opentelemetry.io/otel/api/metric" export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/export/metric/aggregator" sdk "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/metric/aggregator" "go.opentelemetry.io/otel/sdk/metric/aggregator/gauge" ) diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 33919f149db..9cf157f5f9c 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -27,7 +27,7 @@ import ( "go.opentelemetry.io/otel/api/metric" api "go.opentelemetry.io/otel/api/metric" export "go.opentelemetry.io/otel/sdk/export/metric" - "go.opentelemetry.io/otel/sdk/metric/aggregator" + "go.opentelemetry.io/otel/sdk/export/metric/aggregator" ) type ( diff --git a/sdk/metric/stress_test.go b/sdk/metric/stress_test.go index eb24dfa1989..6779c4e14d1 100644 --- a/sdk/metric/stress_test.go +++ b/sdk/metric/stress_test.go @@ -36,8 +36,8 @@ import ( "go.opentelemetry.io/otel/api/metric" api "go.opentelemetry.io/otel/api/metric" export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/export/metric/aggregator" sdk "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/metric/aggregator" "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" "go.opentelemetry.io/otel/sdk/metric/aggregator/gauge" ) From 623a63a5ab67173d5218df3f0d2774c0c64c0ee8 Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 14 Nov 2019 17:11:39 -0800 Subject: [PATCH 61/73] Update all aggregator exported-method comments --- exporter/metric/stdout/stdout.go | 6 +++++ sdk/metric/aggregator/array/array.go | 30 +++++++++++++++------- sdk/metric/aggregator/counter/counter.go | 14 ++++++---- sdk/metric/aggregator/ddsketch/ddsketch.go | 25 ++++++++++-------- sdk/metric/aggregator/gauge/gauge.go | 13 +++++++--- sdk/metric/aggregator/maxsumcount/msc.go | 24 ++++++++++++----- 6 files changed, 79 insertions(+), 33 deletions(-) diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index 2f0239eff17..ce28315fcff 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -131,6 +131,12 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) error { expose.Count = count } + // TODO: Should tolerate ErrEmptyDataSet here, + // just like ErrNoLastValue below, since + // there's a race condition between creating + // the Aggregator and updating the first + // value. + if max, err := msc.Max(); err != nil { aggError = err expose.Max = "NaN" diff --git a/sdk/metric/aggregator/array/array.go b/sdk/metric/aggregator/array/array.go index 9d7fa5c7066..00064512b1f 100644 --- a/sdk/metric/aggregator/array/array.go +++ b/sdk/metric/aggregator/array/array.go @@ -41,35 +41,41 @@ var _ export.Aggregator = &Aggregator{} var _ aggregator.MaxSumCount = &Aggregator{} var _ aggregator.Distribution = &Aggregator{} +// New returns a new array aggregator, which aggregates recorded +// measurements by storing them in an array. This type uses a mutex +// for Update() and Checkpoint() concurrency. func New() *Aggregator { return &Aggregator{} } -// Sum returns the sum of the checkpoint. +// Sum returns the sum of values in the checkpoint. func (c *Aggregator) Sum() (core.Number, error) { return c.ckptSum, nil } -// Count returns the count of the checkpoint. +// Count returns the number of values in the checkpoint. func (c *Aggregator) Count() (int64, error) { return int64(len(c.checkpoint)), nil } -// Max returns the max of the checkpoint. +// Max returns the maximum value in the checkpoint. func (c *Aggregator) Max() (core.Number, error) { return c.checkpoint.Quantile(1) } -// Min returns the min of the checkpoint. +// Min returns the mininum value in the checkpoint. func (c *Aggregator) Min() (core.Number, error) { return c.checkpoint.Quantile(0) } -// Quantile returns the estimated quantile of the checkpoint. +// Quantile returns the estimated quantile of data in the checkpoint. +// It is an error if `q` is less than 0 or greated than 1. func (c *Aggregator) Quantile(q float64) (core.Number, error) { return c.checkpoint.Quantile(q) } +// Checkpoint saves the current state, taking a lock to prevent +// concurrent Update() calls. func (c *Aggregator) Checkpoint(ctx context.Context, desc *export.Descriptor) { c.lock.Lock() c.checkpoint, c.current = c.current, nil @@ -77,6 +83,10 @@ func (c *Aggregator) Checkpoint(ctx context.Context, desc *export.Descriptor) { kind := desc.NumberKind() + // TODO: This sort should be done lazily, only when quantiles + // are requested. The SDK specification says you can use this + // aggregator to simply list values in the order they were + // received as an alternative to requesting quantile information. c.sort(kind) c.ckptSum = core.Number(0) @@ -86,6 +96,9 @@ func (c *Aggregator) Checkpoint(ctx context.Context, desc *export.Descriptor) { } } +// Update adds the recorded measurement to the current data set. +// Update takes a lock to prevent concurrent Update() and Checkpoint() +// calls. func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) error { c.lock.Lock() c.current = append(c.current, number) @@ -93,6 +106,7 @@ func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export. return nil } +// Merge combines two data sets into one. func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { @@ -152,7 +166,8 @@ func (p *Points) Swap(i, j int) { } // Quantile returns the least X such that Pr(x=q, where X is an -// element of the data set. +// element of the data set. This uses the "Nearest-Rank" definition +// of a quantile. func (p *Points) Quantile(q float64) (core.Number, error) { if len(*p) == 0 { return core.Number(0), aggregator.ErrEmptyDataSet @@ -168,9 +183,6 @@ func (p *Points) Quantile(q float64) (core.Number, error) { return (*p)[len(*p)-1], nil } - // Note: There's no interpolation being done here. There are - // many definitions for "quantile", some interpolate, some do - // not. What is expected? position := float64(len(*p)-1) * q ceil := int(math.Ceil(position)) return (*p)[ceil], nil diff --git a/sdk/metric/aggregator/counter/counter.go b/sdk/metric/aggregator/counter/counter.go index f16346fd1ea..e79cac00f15 100644 --- a/sdk/metric/aggregator/counter/counter.go +++ b/sdk/metric/aggregator/counter/counter.go @@ -34,28 +34,32 @@ type Aggregator struct { var _ export.Aggregator = &Aggregator{} var _ aggregator.Sum = &Aggregator{} -// New returns a new counter aggregator. This aggregator computes an -// atomic sum. +// New returns a new counter aggregator implemented by atomic +// operations. This aggregator implements the aggregator.Sum +// export interface. func New() *Aggregator { return &Aggregator{} } -// Sum returns the accumulated count as a Number. +// Sum returns the last-checkpointed sum. This will never return an +// error. func (c *Aggregator) Sum() (core.Number, error) { return c.checkpoint, nil } -// Checkpoint checkpoints the current value (atomically) and exports it. +// Checkpoint atomically saves the current value and resets the +// current sum to zero. func (c *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) { c.checkpoint = c.current.SwapNumberAtomic(core.Number(0)) } -// Update modifies the current value (atomically) for later export. +// Update atomically adds to the current value. func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) error { c.current.AddNumberAtomic(desc.NumberKind(), number) return nil } +// Merge combines two counters by adding their sums. func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { diff --git a/sdk/metric/aggregator/ddsketch/ddsketch.go b/sdk/metric/aggregator/ddsketch/ddsketch.go index ddf35400e11..92d1e228238 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch.go @@ -53,34 +53,35 @@ func New(cfg *Config, desc *export.Descriptor) *Aggregator { // NewDefaultConfig returns a new, default DDSketch config. // -// TODO: The Config constructor should probably set minValue to -Inf -// to aggregate metrics with absolute=false. This requires providing values -// for alpha and maxNumBins +// TODO: Should the Config constructor set minValue to -Inf to +// when the descriptor has absolute=false? This requires providing +// values for alpha and maxNumBins, apparently. func NewDefaultConfig() *Config { return sdk.NewDefaultConfig() } -// Sum returns the sum of the checkpoint. +// Sum returns the sum of values in the checkpoint. func (c *Aggregator) Sum() (core.Number, error) { return c.toNumber(c.checkpoint.Sum()), nil } -// Count returns the count of the checkpoint. +// Count returns the number of values in the checkpoint. func (c *Aggregator) Count() (int64, error) { return c.checkpoint.Count(), nil } -// Max returns the max of the checkpoint. +// Max returns the maximum value in the checkpoint. func (c *Aggregator) Max() (core.Number, error) { return c.Quantile(1) } -// Min returns the min of the checkpoint. +// Min returns the mininum value in the checkpoint. func (c *Aggregator) Min() (core.Number, error) { return c.Quantile(0) } -// Quantile returns the estimated quantile of the checkpoint. +// Quantile returns the estimated quantile of data in the checkpoint. +// It is an error if `q` is less than 0 or greated than 1. func (c *Aggregator) Quantile(q float64) (core.Number, error) { if c.checkpoint.Count() == 0 { return core.Number(0), aggregator.ErrEmptyDataSet @@ -99,7 +100,8 @@ func (c *Aggregator) toNumber(f float64) core.Number { return core.NewInt64Number(int64(f)) } -// Checkpoint checkpoints the current value (atomically) and exports it. +// Checkpoint saves the current state, taking a lock to prevent +// concurrent Update() calls. func (c *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) { replace := sdk.NewDDSketch(c.cfg) @@ -109,7 +111,9 @@ func (c *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) { c.lock.Unlock() } -// Update modifies the current value (atomically) for later export. +// Update adds the recorded measurement to the current data set. +// Update takes a lock to prevent concurrent Update() and Checkpoint() +// calls. func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) error { c.lock.Lock() defer c.lock.Unlock() @@ -117,6 +121,7 @@ func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export. return nil } +// Merge combines two sketches into one. func (c *Aggregator) Merge(oa export.Aggregator, d *export.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { diff --git a/sdk/metric/aggregator/gauge/gauge.go b/sdk/metric/aggregator/gauge/gauge.go index 536e752bf6b..ce61a788892 100644 --- a/sdk/metric/aggregator/gauge/gauge.go +++ b/sdk/metric/aggregator/gauge/gauge.go @@ -70,7 +70,10 @@ func New() *Aggregator { } } -// LastValue returns the last-recorded gauge value as a Number. +// LastValue returns the last-recorded gauge value and the +// corresponding timestamp. The error value aggregator.ErrNoLastValue +// will be returned if (due to a race condition) the checkpoint was +// computed before the first value was set. func (g *Aggregator) LastValue() (core.Number, time.Time, error) { gd := (*gaugeData)(g.checkpoint) if gd == unsetGauge { @@ -79,12 +82,12 @@ func (g *Aggregator) LastValue() (core.Number, time.Time, error) { return gd.value.AsNumber(), gd.timestamp, nil } -// Checkpoint checkpoints the current value (atomically) and exports it. +// Checkpoint atomically saves the current value. func (g *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) { g.checkpoint = atomic.LoadPointer(&g.current) } -// Update modifies the current value (atomically) for later export. +// Update atomically sets the current "last" value. func (g *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) error { if !desc.Alternate() { g.updateNonMonotonic(number) @@ -121,6 +124,10 @@ func (g *Aggregator) updateMonotonic(number core.Number, desc *export.Descriptor } } +// Merge combines state from two aggregators. If the gauge is +// declared as monotonic, the greater value is chosen. If the gauge +// is declared as non-monotonic, the most-recently set value is +// chosen. func (g *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { diff --git a/sdk/metric/aggregator/maxsumcount/msc.go b/sdk/metric/aggregator/maxsumcount/msc.go index 8629387e1cd..12b60da4515 100644 --- a/sdk/metric/aggregator/maxsumcount/msc.go +++ b/sdk/metric/aggregator/maxsumcount/msc.go @@ -37,30 +37,41 @@ type ( } ) +// TODO: The SDK specification says this type should support Min +// values, see #319. + var _ export.Aggregator = &Aggregator{} var _ aggregator.MaxSumCount = &Aggregator{} -// New returns a new measure aggregator for computing max, sum, and count. +// New returns a new measure aggregator for computing max, sum, and +// count. It does not compute quantile information other than Max. +// +// Note that this aggregator maintains each value using independent +// atomic operations, which introduces the possibility that +// checkpoints are inconsistent. For greater consistency and lower +// performance, consider using Array or DDSketch aggregators. func New() *Aggregator { return &Aggregator{} } -// Sum returns the accumulated sum as a Number. +// Sum returns the sum of values in the checkpoint. func (c *Aggregator) Sum() (core.Number, error) { return c.checkpoint.sum, nil } -// Count returns the accumulated count. +// Count returns the number of values in the checkpoint. func (c *Aggregator) Count() (int64, error) { return int64(c.checkpoint.count.AsUint64()), nil } -// Max returns the accumulated max as a Number. +// Max returns the maximum value in the checkpoint. func (c *Aggregator) Max() (core.Number, error) { return c.checkpoint.max, nil } -// Checkpoint checkpoints the current value (atomically) and exports it. +// Checkpoint saves the current state. Since no locks are taken, +// there is a chance that the independent Max, Sum, and Count are not +// consistent with each other. func (c *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) { // N.B. There is no atomic operation that can update all three // values at once without a memory allocation. @@ -77,7 +88,7 @@ func (c *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) { c.checkpoint.max = c.current.max.SwapNumberAtomic(core.Number(0)) } -// Update modifies the current value (atomically) for later export. +// Update adds the recorded measurement to the current data set. func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) error { kind := desc.NumberKind() @@ -97,6 +108,7 @@ func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export. return nil } +// Merge combines two data sets into one. func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { From e298c94e91fba08d11adbb9727e28311fd8ea311 Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 14 Nov 2019 17:23:16 -0800 Subject: [PATCH 62/73] Document the aggregator APIs --- sdk/export/metric/aggregator/api.go | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/sdk/export/metric/aggregator/api.go b/sdk/export/metric/aggregator/api.go index c92edd75c03..1530c48dcc7 100644 --- a/sdk/export/metric/aggregator/api.go +++ b/sdk/export/metric/aggregator/api.go @@ -23,33 +23,45 @@ import ( export "go.opentelemetry.io/otel/sdk/export/metric" ) +// These interfaces describe the various ways to access state from an +// Aggregator. + type ( + // Sum returns an aggregated sum. Sum interface { Sum() (core.Number, error) } + // Sum returns the number of values that were aggregated. Count interface { Count() (int64, error) } + // Max returns the maximum value over the set of values that were aggregated. Max interface { Max() (core.Number, error) } + // Quantile returns an exact or estimated quantile over the + // set of values that were aggregated. Quantile interface { Quantile(float64) (core.Number, error) } + // LastValue returns the latest value that was aggregated. LastValue interface { LastValue() (core.Number, time.Time, error) } + // MaxSumCount supports the Max, Sum, and Count interfaces. MaxSumCount interface { Sum Count Max } + // MaxSumCount supports the Max, Sum, Count, and Quantile + // interfaces. Distribution interface { MaxSumCount Quantile @@ -57,20 +69,29 @@ type ( ) var ( - ErrEmptyDataSet = fmt.Errorf("The result is not defined on an empty data set") ErrInvalidQuantile = fmt.Errorf("The requested quantile is out of range") ErrNegativeInput = fmt.Errorf("Negative value is out of range for this instrument") ErrNaNInput = fmt.Errorf("NaN value is an invalid input") ErrNonMonotoneInput = fmt.Errorf("The new value is not monotone") ErrInconsistentType = fmt.Errorf("Cannot merge different aggregator types") - // ErrNoLastValue is returned by the gauge.Aggregator when + // ErrNoLastValue is returned by the LastValue interface when // (due to a race with collection) the Aggregator is // checkpointed before the first value is set. The aggregator // should simply be skipped in this case. ErrNoLastValue = fmt.Errorf("No value has been set") + + // ErrEmptyDataSet is returned by Max and Quantile interfaces + // when (due to a race with collection) the Aggregator is + // checkpointed before the first value is set. The aggregator + // should simply be skipped in this case. + ErrEmptyDataSet = fmt.Errorf("The result is not defined on an empty data set") ) +// RangeTest is a commmon routine for testing for valid input values. +// This rejects NaN values. This rejects negative values when the +// metric instrument does not support negative values, including +// monotonic counter metrics and absolute measure metrics). func RangeTest(number core.Number, descriptor *export.Descriptor) error { numberKind := descriptor.NumberKind() From d75bc6eec555c168acc5aef29f66a3401b18232a Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 14 Nov 2019 17:33:37 -0800 Subject: [PATCH 63/73] More aggregator comments --- sdk/export/metric/metric.go | 8 ++++++++ sdk/metric/aggregator/array/array.go | 4 ++-- sdk/metric/aggregator/ddsketch/ddsketch.go | 4 ++-- sdk/metric/aggregator/maxsumcount/msc.go | 7 ++++--- 4 files changed, 16 insertions(+), 7 deletions(-) diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 0166899c9a1..d2611b9cec6 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -107,6 +107,11 @@ type AggregationSelector interface { // metrics offer a wide range of potential tradeoffs and several // implementations are provided. // +// Aggregators are meant to compute the change (i.e., delta) in state +// from one checkpoint to the next, with the exception of gauge +// aggregators. Gauge aggregators are required to maintain the last +// value across checkpoints to implement montonic gauge support. +// // Note that any Aggregator may be attached to any instrument--this is // the result of the OpenTelemetry API/SDK separation. It is possible // to attach a counter aggregator to a measure instrument (to compute @@ -127,6 +132,9 @@ type Aggregator interface { // Checkpoint is called during collection to finish one period // of aggregation by atomically saving the current value. // Checkpoint() is called concurrently with Update(). + // Checkpoint should reset the current state to the empty + // state, in order to begin computing a new delta for the next + // collection period. // // After the checkpoint is taken, the current value may be // accessed using by converting to one a suitable interface diff --git a/sdk/metric/aggregator/array/array.go b/sdk/metric/aggregator/array/array.go index 00064512b1f..34e17100b0f 100644 --- a/sdk/metric/aggregator/array/array.go +++ b/sdk/metric/aggregator/array/array.go @@ -74,8 +74,8 @@ func (c *Aggregator) Quantile(q float64) (core.Number, error) { return c.checkpoint.Quantile(q) } -// Checkpoint saves the current state, taking a lock to prevent -// concurrent Update() calls. +// Checkpoint saves the current state and resets the current state to +// the empty set, taking a lock to prevent concurrent Update() calls. func (c *Aggregator) Checkpoint(ctx context.Context, desc *export.Descriptor) { c.lock.Lock() c.checkpoint, c.current = c.current, nil diff --git a/sdk/metric/aggregator/ddsketch/ddsketch.go b/sdk/metric/aggregator/ddsketch/ddsketch.go index 92d1e228238..5160afeda89 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch.go @@ -100,8 +100,8 @@ func (c *Aggregator) toNumber(f float64) core.Number { return core.NewInt64Number(int64(f)) } -// Checkpoint saves the current state, taking a lock to prevent -// concurrent Update() calls. +// Checkpoint saves the current state and resets the current state to +// the empty set, taking a lock to prevent concurrent Update() calls. func (c *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) { replace := sdk.NewDDSketch(c.cfg) diff --git a/sdk/metric/aggregator/maxsumcount/msc.go b/sdk/metric/aggregator/maxsumcount/msc.go index 12b60da4515..4f936f297a3 100644 --- a/sdk/metric/aggregator/maxsumcount/msc.go +++ b/sdk/metric/aggregator/maxsumcount/msc.go @@ -69,9 +69,10 @@ func (c *Aggregator) Max() (core.Number, error) { return c.checkpoint.max, nil } -// Checkpoint saves the current state. Since no locks are taken, -// there is a chance that the independent Max, Sum, and Count are not -// consistent with each other. +// Checkpoint saves the current state and resets the current state to +// the empty set. Since no locks are taken, there is a chance that +// the independent Max, Sum, and Count are not consistent with each +// other. func (c *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) { // N.B. There is no atomic operation that can update all three // values at once without a memory allocation. From 723d0845d6bece5e5957c9847120ed34d2ac2a0f Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 14 Nov 2019 20:11:48 -0800 Subject: [PATCH 64/73] Add multiple updates to the ungrouped test --- sdk/metric/batcher/ungrouped/ungrouped_test.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/sdk/metric/batcher/ungrouped/ungrouped_test.go b/sdk/metric/batcher/ungrouped/ungrouped_test.go index a57d2c82a2d..40da3269079 100644 --- a/sdk/metric/batcher/ungrouped/ungrouped_test.go +++ b/sdk/metric/batcher/ungrouped/ungrouped_test.go @@ -26,20 +26,28 @@ import ( "go.opentelemetry.io/otel/sdk/metric/batcher/ungrouped" ) -// These tests use the original label encoding. +// These tests use the ../test label encoding. func TestUngroupedStateless(t *testing.T) { ctx := context.Background() b := ungrouped.New(test.NewAggregationSelector(), false) + // Set initial gauge values _ = b.Process(ctx, test.GaugeDesc, test.Labels1, test.GaugeAgg(10)) _ = b.Process(ctx, test.GaugeDesc, test.Labels2, test.GaugeAgg(20)) _ = b.Process(ctx, test.GaugeDesc, test.Labels3, test.GaugeAgg(30)) + // Another gauge Set for Labels1 + _ = b.Process(ctx, test.GaugeDesc, test.Labels1, test.GaugeAgg(50)) + + // Set initial counter values _ = b.Process(ctx, test.CounterDesc, test.Labels1, test.CounterAgg(10)) _ = b.Process(ctx, test.CounterDesc, test.Labels2, test.CounterAgg(20)) _ = b.Process(ctx, test.CounterDesc, test.Labels3, test.CounterAgg(40)) + // Another counter Add for Labels1 + _ = b.Process(ctx, test.CounterDesc, test.Labels1, test.CounterAgg(50)) + processor := b.ReadCheckpoint() records := test.Output{} @@ -48,10 +56,10 @@ func TestUngroupedStateless(t *testing.T) { // Output gauge should have only the "G=H" and "G=" keys. // Output counter should have only the "C=D" and "C=" keys. require.EqualValues(t, map[string]int64{ - "counter/G~H&C~D": 10, // labels1 + "counter/G~H&C~D": 60, // labels1 "counter/C~D&E~F": 20, // labels2 "counter/": 40, // labels3 - "gauge/G~H&C~D": 10, // labels1 + "gauge/G~H&C~D": 50, // labels1 "gauge/C~D&E~F": 20, // labels2 "gauge/": 30, // labels3 }, records) From 8b5a4d6564e78aae65fe87c55e619b01417aeb1e Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 14 Nov 2019 20:38:40 -0800 Subject: [PATCH 65/73] Fixes for feedback from Gustavo and Liz --- sdk/export/metric/metric.go | 3 + sdk/metric/batcher/defaultkeys/defaultkeys.go | 87 +++++++++---------- 2 files changed, 45 insertions(+), 45 deletions(-) diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index d2611b9cec6..0944916e57f 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -374,6 +374,9 @@ func (d *Descriptor) NumberKind() core.NumberKind { // - A counter instrument is non-monotonic // - A gauge instrument is monotonic // - A measure instrument is non-absolute +// +// TODO: Consider renaming this method, or expanding to provide +// kind-specific tests (e.g., Monotonic(), Absolute()). func (d *Descriptor) Alternate() bool { return d.alternate } diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys.go b/sdk/metric/batcher/defaultkeys/defaultkeys.go index 75306e937ef..24b1ffda9fe 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys.go @@ -23,38 +23,38 @@ import ( type ( Batcher struct { - selector export.AggregationSelector - lencoder export.LabelEncoder - stateful bool - dki dkiMap - agg aggMap + selector export.AggregationSelector + labelEncoder export.LabelEncoder + stateful bool + descKeyIndex descKeyIndexMap + aggCheckpoint aggCheckpointMap } - aggEntry struct { - descriptor *export.Descriptor - labels export.Labels - aggregator export.Aggregator - } + // descKeyIndexMap is a mapping, for each Descriptor, from the + // Key to the position in the descriptor's recommended keys. + descKeyIndexMap map[*export.Descriptor]map[core.Key]int - dkiMap map[*export.Descriptor]map[core.Key]int - aggMap map[string]aggEntry + // aggCheckpointMap is a mapping from encoded label set to current + // export record. If the batcher is stateful, this map is + // never cleared. + aggCheckpointMap map[string]export.Record producer struct { - aggMap aggMap - lencoder export.LabelEncoder + aggCheckpointMap aggCheckpointMap + labelEncoder export.LabelEncoder } ) var _ export.Batcher = &Batcher{} var _ export.Producer = &producer{} -func New(selector export.AggregationSelector, lencoder export.LabelEncoder, stateful bool) *Batcher { +func New(selector export.AggregationSelector, labelEncoder export.LabelEncoder, stateful bool) *Batcher { return &Batcher{ - selector: selector, - lencoder: lencoder, - dki: dkiMap{}, - agg: aggMap{}, - stateful: stateful, + selector: selector, + labelEncoder: labelEncoder, + descKeyIndex: descKeyIndexMap{}, + aggCheckpoint: aggCheckpointMap{}, + stateful: stateful, } } @@ -66,10 +66,10 @@ func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels exp keys := desc.Keys() // Cache the mapping from Descriptor->Key->Index - ki, ok := b.dki[desc] + ki, ok := b.descKeyIndex[desc] if !ok { ki = map[core.Key]int{} - b.dki[desc] = ki + b.descKeyIndex[desc] = ki for i, k := range keys { ki[k] = i @@ -79,28 +79,29 @@ func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels exp // Compute the value list. Note: Unspecified values become // empty strings. TODO: pin this down, we have no appropriate // Value constructor. - canon := make([]core.KeyValue, len(keys)) + outputLabels := make([]core.KeyValue, len(keys)) for i, key := range keys { - canon[i] = key.String("") + outputLabels[i] = key.String("") } // Note also the possibility to speed this computation of - // "encoded" via "canon" in the form of a (Descriptor, + // "encoded" via "outputLabels" in the form of a (Descriptor, // LabelSet)->(Labels, Encoded) cache. for _, kv := range labels.Ordered() { pos, ok := ki[kv.Key] if !ok { continue } - canon[pos].Value = kv.Value + outputLabels[pos].Value = kv.Value } // Compute an encoded lookup key. - encoded := b.lencoder.EncodeLabels(canon) + encoded := b.labelEncoder.EncodeLabels(outputLabels) - // Reduce dimensionality. - rag, ok := b.agg[encoded] + // Merge this aggregator with all preceding aggregators that + // map to the same set of `outputLabels` labels. + rag, ok := b.aggCheckpoint[encoded] if !ok { // If this Batcher is stateful, create a copy of the // Aggregator for long-term storage. Otherwise the @@ -113,33 +114,29 @@ func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels exp return err } } - b.agg[encoded] = aggEntry{ - descriptor: desc, - labels: export.NewLabels(canon, encoded, b.lencoder), - aggregator: agg, - } + b.aggCheckpoint[encoded] = export.NewRecord( + desc, + export.NewLabels(outputLabels, encoded, b.labelEncoder), + agg, + ) return nil } - return rag.aggregator.Merge(agg, desc) + return rag.Aggregator().Merge(agg, desc) } func (b *Batcher) ReadCheckpoint() export.Producer { - checkpoint := b.agg + checkpoint := b.aggCheckpoint if !b.stateful { - b.agg = aggMap{} + b.aggCheckpoint = aggCheckpointMap{} } return &producer{ - aggMap: checkpoint, - lencoder: b.lencoder, + aggCheckpointMap: checkpoint, + labelEncoder: b.labelEncoder, } } func (p *producer) ForEach(f func(export.Record)) { - for _, entry := range p.aggMap { - f(export.NewRecord( - entry.descriptor, - entry.labels, - entry.aggregator, - )) + for _, entry := range p.aggCheckpointMap { + f(entry) } } From 13e0580be18ec768d9e48eb06f85de42d50381b1 Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 14 Nov 2019 20:58:49 -0800 Subject: [PATCH 66/73] Producer->CheckpointSet; add FinishedCollection --- exporter/metric/stdout/stdout.go | 4 +- exporter/metric/stdout/stdout_test.go | 46 +++++++-------- exporter/metric/test/test.go | 12 ++-- sdk/export/metric/metric.go | 22 +++++--- sdk/metric/batcher/defaultkeys/defaultkeys.go | 56 ++++++++++--------- .../batcher/defaultkeys/defaultkeys_test.go | 32 ++++++----- sdk/metric/batcher/ungrouped/ungrouped.go | 44 ++++++++------- .../batcher/ungrouped/ungrouped_test.go | 30 ++++++---- sdk/metric/benchmark_test.go | 9 ++- sdk/metric/controller/push/push.go | 1 + sdk/metric/controller/push/push_test.go | 42 ++++++++------ sdk/metric/correct_test.go | 5 +- sdk/metric/doc.go | 6 +- sdk/metric/monotone_test.go | 7 ++- sdk/metric/stress_test.go | 7 ++- 15 files changed, 182 insertions(+), 141 deletions(-) diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index ce28315fcff..8fde799c7e7 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -97,7 +97,7 @@ func New(options Options) (*Exporter, error) { }, nil } -func (e *Exporter) Export(_ context.Context, producer export.Producer) error { +func (e *Exporter) Export(_ context.Context, checkpointSet export.CheckpointSet) error { // N.B. Only return one aggError, if any occur. They're likely // to be duplicates of the same error. var aggError error @@ -106,7 +106,7 @@ func (e *Exporter) Export(_ context.Context, producer export.Producer) error { ts := time.Now() batch.Timestamp = &ts } - producer.ForEach(func(record export.Record) { + checkpointSet.ForEach(func(record export.Record) { desc := record.Descriptor() labels := record.Labels() agg := record.Aggregator() diff --git a/exporter/metric/stdout/stdout_test.go b/exporter/metric/stdout/stdout_test.go index 009d1cfbda2..aa80ed22036 100644 --- a/exporter/metric/stdout/stdout_test.go +++ b/exporter/metric/stdout/stdout_test.go @@ -52,8 +52,8 @@ func (fix testFixture) Output() string { return strings.TrimSpace(fix.output.String()) } -func (fix testFixture) Export(producer export.Producer) { - err := fix.exporter.Export(fix.ctx, producer) +func (fix testFixture) Export(checkpointSet export.CheckpointSet) { + err := fix.exporter.Export(fix.ctx, checkpointSet) if err != nil { fix.t.Error("export failed: ", err) } @@ -79,7 +79,7 @@ func TestStdoutTimestamp(t *testing.T) { before := time.Now() - producer := test.NewProducer(sdk.DefaultLabelEncoder()) + checkpointSet := test.NewCheckpointSet(sdk.DefaultLabelEncoder()) ctx := context.Background() desc := export.NewDescriptor("test.name", export.GaugeKind, nil, "", "", core.Int64NumberKind, false) @@ -87,9 +87,9 @@ func TestStdoutTimestamp(t *testing.T) { aggtest.CheckedUpdate(t, gagg, core.NewInt64Number(321), desc) gagg.Checkpoint(ctx, desc) - producer.Add(desc, gagg) + checkpointSet.Add(desc, gagg) - if err := exporter.Export(ctx, producer); err != nil { + if err := exporter.Export(ctx, checkpointSet); err != nil { t.Fatal("Unexpected export error: ", err) } @@ -125,16 +125,16 @@ func TestStdoutTimestamp(t *testing.T) { func TestStdoutCounterFormat(t *testing.T) { fix := newFixture(t, stdout.Options{}) - producer := test.NewProducer(sdk.DefaultLabelEncoder()) + checkpointSet := test.NewCheckpointSet(sdk.DefaultLabelEncoder()) desc := export.NewDescriptor("test.name", export.CounterKind, nil, "", "", core.Int64NumberKind, false) cagg := counter.New() aggtest.CheckedUpdate(fix.t, cagg, core.NewInt64Number(123), desc) cagg.Checkpoint(fix.ctx, desc) - producer.Add(desc, cagg, key.String("A", "B"), key.String("C", "D")) + checkpointSet.Add(desc, cagg, key.String("A", "B"), key.String("C", "D")) - fix.Export(producer) + fix.Export(checkpointSet) require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","sum":123}]}`, fix.Output()) } @@ -142,16 +142,16 @@ func TestStdoutCounterFormat(t *testing.T) { func TestStdoutGaugeFormat(t *testing.T) { fix := newFixture(t, stdout.Options{}) - producer := test.NewProducer(sdk.DefaultLabelEncoder()) + checkpointSet := test.NewCheckpointSet(sdk.DefaultLabelEncoder()) desc := export.NewDescriptor("test.name", export.GaugeKind, nil, "", "", core.Float64NumberKind, false) gagg := gauge.New() aggtest.CheckedUpdate(fix.t, gagg, core.NewFloat64Number(123.456), desc) gagg.Checkpoint(fix.ctx, desc) - producer.Add(desc, gagg, key.String("A", "B"), key.String("C", "D")) + checkpointSet.Add(desc, gagg, key.String("A", "B"), key.String("C", "D")) - fix.Export(producer) + fix.Export(checkpointSet) require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","last":123.456}]}`, fix.Output()) } @@ -159,7 +159,7 @@ func TestStdoutGaugeFormat(t *testing.T) { func TestStdoutMaxSumCount(t *testing.T) { fix := newFixture(t, stdout.Options{}) - producer := test.NewProducer(sdk.DefaultLabelEncoder()) + checkpointSet := test.NewCheckpointSet(sdk.DefaultLabelEncoder()) desc := export.NewDescriptor("test.name", export.MeasureKind, nil, "", "", core.Float64NumberKind, false) magg := maxsumcount.New() @@ -167,9 +167,9 @@ func TestStdoutMaxSumCount(t *testing.T) { aggtest.CheckedUpdate(fix.t, magg, core.NewFloat64Number(876.543), desc) magg.Checkpoint(fix.ctx, desc) - producer.Add(desc, magg, key.String("A", "B"), key.String("C", "D")) + checkpointSet.Add(desc, magg, key.String("A", "B"), key.String("C", "D")) - fix.Export(producer) + fix.Export(checkpointSet) require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","max":876.543,"sum":999.999,"count":2}]}`, fix.Output()) } @@ -179,7 +179,7 @@ func TestStdoutMeasureFormat(t *testing.T) { PrettyPrint: true, }) - producer := test.NewProducer(sdk.DefaultLabelEncoder()) + checkpointSet := test.NewCheckpointSet(sdk.DefaultLabelEncoder()) desc := export.NewDescriptor("test.name", export.MeasureKind, nil, "", "", core.Float64NumberKind, false) magg := array.New() @@ -190,9 +190,9 @@ func TestStdoutMeasureFormat(t *testing.T) { magg.Checkpoint(fix.ctx, desc) - producer.Add(desc, magg, key.String("A", "B"), key.String("C", "D")) + checkpointSet.Add(desc, magg, key.String("A", "B"), key.String("C", "D")) - fix.Export(producer) + fix.Export(checkpointSet) require.Equal(t, `{ "updates": [ @@ -223,15 +223,15 @@ func TestStdoutMeasureFormat(t *testing.T) { func TestStdoutAggError(t *testing.T) { fix := newFixture(t, stdout.Options{}) - producer := test.NewProducer(sdk.DefaultLabelEncoder()) + checkpointSet := test.NewCheckpointSet(sdk.DefaultLabelEncoder()) desc := export.NewDescriptor("test.name", export.MeasureKind, nil, "", "", core.Float64NumberKind, false) magg := ddsketch.New(ddsketch.NewDefaultConfig(), desc) magg.Checkpoint(fix.ctx, desc) - producer.Add(desc, magg) + checkpointSet.Add(desc, magg) - err := fix.exporter.Export(fix.ctx, producer) + err := fix.exporter.Export(fix.ctx, checkpointSet) // An error is returned and NaN values are printed. require.Error(t, err) @@ -242,15 +242,15 @@ func TestStdoutAggError(t *testing.T) { func TestStdoutGaugeNotSet(t *testing.T) { fix := newFixture(t, stdout.Options{}) - producer := test.NewProducer(sdk.DefaultLabelEncoder()) + checkpointSet := test.NewCheckpointSet(sdk.DefaultLabelEncoder()) desc := export.NewDescriptor("test.name", export.GaugeKind, nil, "", "", core.Float64NumberKind, false) gagg := gauge.New() gagg.Checkpoint(fix.ctx, desc) - producer.Add(desc, gagg, key.String("A", "B"), key.String("C", "D")) + checkpointSet.Add(desc, gagg, key.String("A", "B"), key.String("C", "D")) - fix.Export(producer) + fix.Export(checkpointSet) require.Equal(t, `{"updates":null}`, fix.Output()) } diff --git a/exporter/metric/test/test.go b/exporter/metric/test/test.go index 49bef7f57ef..019a3d527ab 100644 --- a/exporter/metric/test/test.go +++ b/exporter/metric/test/test.go @@ -5,29 +5,29 @@ import ( export "go.opentelemetry.io/otel/sdk/export/metric" ) -type Producer struct { +type CheckpointSet struct { encoder export.LabelEncoder updates []export.Record } -func NewProducer(encoder export.LabelEncoder) *Producer { - return &Producer{ +func NewCheckpointSet(encoder export.LabelEncoder) *CheckpointSet { + return &CheckpointSet{ encoder: encoder, } } -func (p *Producer) Reset() { +func (p *CheckpointSet) Reset() { p.updates = nil } -func (p *Producer) Add(desc *export.Descriptor, agg export.Aggregator, labels ...core.KeyValue) { +func (p *CheckpointSet) Add(desc *export.Descriptor, agg export.Aggregator, labels ...core.KeyValue) { encoded := p.encoder.EncodeLabels(labels) elabels := export.NewLabels(labels, encoded, p.encoder) p.updates = append(p.updates, export.NewRecord(desc, elabels, agg)) } -func (p *Producer) ForEach(f func(export.Record)) { +func (p *CheckpointSet) ForEach(f func(export.Record)) { for _, r := range p.updates { f(r) } diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 0944916e57f..09d360a2096 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -80,8 +80,13 @@ type Batcher interface { // ReadCheckpoint is the interface used by the controller to // access the fully aggregated checkpoint after collection. // - // The returned Producer is passed to the Exporter. - ReadCheckpoint() Producer + // The returned CheckpointSet is passed to the Exporter. + ReadCheckpoint() CheckpointSet + + // FinishedCollection informs the Batcher that a complete + // collection round was completed. Stateless batchers might + // reset state in this method, for example. + FinishedCollection() } // AggregationSelector supports selecting the kind of Aggregator to @@ -161,9 +166,9 @@ type Exporter interface { // The Context comes from the controller that initiated // collection. // - // The Producer interface refers to the Batcher that just + // The CheckpointSet interface refers to the Batcher that just // completed collection. - Export(context.Context, Producer) error + Export(context.Context, CheckpointSet) error } // LabelEncoder enables an optimization for export pipelines that use @@ -189,13 +194,14 @@ type LabelEncoder interface { EncodeLabels([]core.KeyValue) string } -// Producer allows a controller to access a complete checkpoint of +// CheckpointSet allows a controller to access a complete checkpoint of // aggregated metrics from the Batcher. This is passed to the // Exporter which may then use ForEach to iterate over the collection // of aggregated metrics. -type Producer interface { - // ForEach iterates over all metrics that were updated during - // the last collection period. +type CheckpointSet interface { + // ForEach iterates over aggregated checkpoints for all + // metrics that were updated during the last collection + // period. ForEach(func(Record)) } diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys.go b/sdk/metric/batcher/defaultkeys/defaultkeys.go index 24b1ffda9fe..cbe16e00b31 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys.go @@ -39,14 +39,14 @@ type ( // never cleared. aggCheckpointMap map[string]export.Record - producer struct { + checkpointSet struct { aggCheckpointMap aggCheckpointMap labelEncoder export.LabelEncoder } ) var _ export.Batcher = &Batcher{} -var _ export.Producer = &producer{} +var _ export.CheckpointSet = &checkpointSet{} func New(selector export.AggregationSelector, labelEncoder export.LabelEncoder, stateful bool) *Batcher { return &Batcher{ @@ -102,40 +102,42 @@ func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels exp // Merge this aggregator with all preceding aggregators that // map to the same set of `outputLabels` labels. rag, ok := b.aggCheckpoint[encoded] - if !ok { - // If this Batcher is stateful, create a copy of the - // Aggregator for long-term storage. Otherwise the - // Meter implementation will checkpoint the aggregator - // again, overwriting the long-lived state. - if b.stateful { - tmp := agg - agg = b.AggregatorFor(desc) - if err := agg.Merge(tmp, desc); err != nil { - return err - } + if ok { + return rag.Aggregator().Merge(agg, desc) + } + // If this Batcher is stateful, create a copy of the + // Aggregator for long-term storage. Otherwise the + // Meter implementation will checkpoint the aggregator + // again, overwriting the long-lived state. + if b.stateful { + tmp := agg + agg = b.AggregatorFor(desc) + if err := agg.Merge(tmp, desc); err != nil { + return err } - b.aggCheckpoint[encoded] = export.NewRecord( - desc, - export.NewLabels(outputLabels, encoded, b.labelEncoder), - agg, - ) - return nil } - return rag.Aggregator().Merge(agg, desc) + b.aggCheckpoint[encoded] = export.NewRecord( + desc, + export.NewLabels(outputLabels, encoded, b.labelEncoder), + agg, + ) + return nil +} + +func (b *Batcher) ReadCheckpoint() export.CheckpointSet { + return &checkpointSet{ + aggCheckpointMap: b.aggCheckpoint, + labelEncoder: b.labelEncoder, + } } -func (b *Batcher) ReadCheckpoint() export.Producer { - checkpoint := b.aggCheckpoint +func (b *Batcher) FinishedCollection() { if !b.stateful { b.aggCheckpoint = aggCheckpointMap{} } - return &producer{ - aggCheckpointMap: checkpoint, - labelEncoder: b.labelEncoder, - } } -func (p *producer) ForEach(f func(export.Record)) { +func (p *checkpointSet) ForEach(f func(export.Record)) { for _, entry := range p.aggCheckpointMap { f(entry) } diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys_test.go b/sdk/metric/batcher/defaultkeys/defaultkeys_test.go index 7c021afeff9..816351ab0ff 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys_test.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys_test.go @@ -38,10 +38,11 @@ func TestGroupingStateless(t *testing.T) { _ = b.Process(ctx, test.CounterDesc, test.Labels2, test.CounterAgg(20)) _ = b.Process(ctx, test.CounterDesc, test.Labels3, test.CounterAgg(40)) - processor := b.ReadCheckpoint() + checkpointSet := b.ReadCheckpoint() + b.FinishedCollection() records := test.Output{} - processor.ForEach(records.AddTo) + checkpointSet.ForEach(records.AddTo) // Output gauge should have only the "G=H" and "G=" keys. // Output counter should have only the "C=D" and "C=" keys. @@ -52,9 +53,10 @@ func TestGroupingStateless(t *testing.T) { "gauge/G=": 30, // labels3 = last value }, records) - // Verify that state was reset - processor = b.ReadCheckpoint() - processor.ForEach(func(rec export.Record) { + // Verify that state is reset by FinishedCollection() + checkpointSet = b.ReadCheckpoint() + b.FinishedCollection() + checkpointSet.ForEach(func(rec export.Record) { t.Fatal("Unexpected call") }) } @@ -66,20 +68,22 @@ func TestGroupingStateful(t *testing.T) { cagg := test.CounterAgg(10) _ = b.Process(ctx, test.CounterDesc, test.Labels1, cagg) - processor := b.ReadCheckpoint() + checkpointSet := b.ReadCheckpoint() + b.FinishedCollection() records1 := test.Output{} - processor.ForEach(records1.AddTo) + checkpointSet.ForEach(records1.AddTo) require.EqualValues(t, map[string]int64{ "counter/C=D": 10, // labels1 }, records1) // Test that state was NOT reset - processor = b.ReadCheckpoint() + checkpointSet = b.ReadCheckpoint() + b.FinishedCollection() records2 := test.Output{} - processor.ForEach(records2.AddTo) + checkpointSet.ForEach(records2.AddTo) require.EqualValues(t, records1, records2) @@ -89,20 +93,22 @@ func TestGroupingStateful(t *testing.T) { // As yet cagg has not been passed to Batcher.Process. Should // not see an update. - processor = b.ReadCheckpoint() + checkpointSet = b.ReadCheckpoint() + b.FinishedCollection() records3 := test.Output{} - processor.ForEach(records3.AddTo) + checkpointSet.ForEach(records3.AddTo) require.EqualValues(t, records1, records3) // Now process the second update _ = b.Process(ctx, test.CounterDesc, test.Labels1, cagg) - processor = b.ReadCheckpoint() + checkpointSet = b.ReadCheckpoint() + b.FinishedCollection() records4 := test.Output{} - processor.ForEach(records4.AddTo) + checkpointSet.ForEach(records4.AddTo) require.EqualValues(t, map[string]int64{ "counter/C=D": 30, diff --git a/sdk/metric/batcher/ungrouped/ungrouped.go b/sdk/metric/batcher/ungrouped/ungrouped.go index 6ceda972be4..6b4133b4131 100644 --- a/sdk/metric/batcher/ungrouped/ungrouped.go +++ b/sdk/metric/batcher/ungrouped/ungrouped.go @@ -41,7 +41,7 @@ type ( ) var _ export.Batcher = &Batcher{} -var _ export.Producer = batchMap{} +var _ export.CheckpointSet = batchMap{} func New(selector export.AggregationSelector, stateful bool) *Batcher { return &Batcher{ @@ -61,33 +61,35 @@ func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels exp encoded: labels.Encoded(), } value, ok := b.batchMap[key] - if !ok { - // If this Batcher is stateful, create a copy of the - // Aggregator for long-term storage. Otherwise the - // Meter implementation will checkpoint the aggregator - // again, overwriting the long-lived state. - if b.stateful { - tmp := agg - agg = b.AggregatorFor(desc) - if err := agg.Merge(tmp, desc); err != nil { - return err - } - } - b.batchMap[key] = batchValue{ - aggregator: agg, - labels: labels, + if ok { + return value.aggregator.Merge(agg, desc) + } + // If this Batcher is stateful, create a copy of the + // Aggregator for long-term storage. Otherwise the + // Meter implementation will checkpoint the aggregator + // again, overwriting the long-lived state. + if b.stateful { + tmp := agg + agg = b.AggregatorFor(desc) + if err := agg.Merge(tmp, desc); err != nil { + return err } - return nil } - return value.aggregator.Merge(agg, desc) + b.batchMap[key] = batchValue{ + aggregator: agg, + labels: labels, + } + return nil +} + +func (b *Batcher) ReadCheckpoint() export.CheckpointSet { + return b.batchMap } -func (b *Batcher) ReadCheckpoint() export.Producer { - checkpoint := b.batchMap +func (b *Batcher) FinishedCollection() { if !b.stateful { b.batchMap = batchMap{} } - return checkpoint } func (c batchMap) ForEach(f func(export.Record)) { diff --git a/sdk/metric/batcher/ungrouped/ungrouped_test.go b/sdk/metric/batcher/ungrouped/ungrouped_test.go index 40da3269079..737ceacb22f 100644 --- a/sdk/metric/batcher/ungrouped/ungrouped_test.go +++ b/sdk/metric/batcher/ungrouped/ungrouped_test.go @@ -48,10 +48,11 @@ func TestUngroupedStateless(t *testing.T) { // Another counter Add for Labels1 _ = b.Process(ctx, test.CounterDesc, test.Labels1, test.CounterAgg(50)) - processor := b.ReadCheckpoint() + checkpointSet := b.ReadCheckpoint() + b.FinishedCollection() records := test.Output{} - processor.ForEach(records.AddTo) + checkpointSet.ForEach(records.AddTo) // Output gauge should have only the "G=H" and "G=" keys. // Output counter should have only the "C=D" and "C=" keys. @@ -65,8 +66,9 @@ func TestUngroupedStateless(t *testing.T) { }, records) // Verify that state was reset - processor = b.ReadCheckpoint() - processor.ForEach(func(rec export.Record) { + checkpointSet = b.ReadCheckpoint() + b.FinishedCollection() + checkpointSet.ForEach(func(rec export.Record) { t.Fatal("Unexpected call") }) } @@ -78,20 +80,22 @@ func TestUngroupedStateful(t *testing.T) { cagg := test.CounterAgg(10) _ = b.Process(ctx, test.CounterDesc, test.Labels1, cagg) - processor := b.ReadCheckpoint() + checkpointSet := b.ReadCheckpoint() + b.FinishedCollection() records1 := test.Output{} - processor.ForEach(records1.AddTo) + checkpointSet.ForEach(records1.AddTo) require.EqualValues(t, map[string]int64{ "counter/G~H&C~D": 10, // labels1 }, records1) // Test that state was NOT reset - processor = b.ReadCheckpoint() + checkpointSet = b.ReadCheckpoint() + b.FinishedCollection() records2 := test.Output{} - processor.ForEach(records2.AddTo) + checkpointSet.ForEach(records2.AddTo) require.EqualValues(t, records1, records2) @@ -101,20 +105,22 @@ func TestUngroupedStateful(t *testing.T) { // As yet cagg has not been passed to Batcher.Process. Should // not see an update. - processor = b.ReadCheckpoint() + checkpointSet = b.ReadCheckpoint() + b.FinishedCollection() records3 := test.Output{} - processor.ForEach(records3.AddTo) + checkpointSet.ForEach(records3.AddTo) require.EqualValues(t, records1, records3) // Now process the second update _ = b.Process(ctx, test.CounterDesc, test.Labels1, cagg) - processor = b.ReadCheckpoint() + checkpointSet = b.ReadCheckpoint() + b.FinishedCollection() records4 := test.Output{} - processor.ForEach(records4.AddTo) + checkpointSet.ForEach(records4.AddTo) require.EqualValues(t, map[string]int64{ "counter/G~H&C~D": 30, diff --git a/sdk/metric/benchmark_test.go b/sdk/metric/benchmark_test.go index a63ab87ff24..c490108c1f3 100644 --- a/sdk/metric/benchmark_test.go +++ b/sdk/metric/benchmark_test.go @@ -46,7 +46,7 @@ func newFixture(b *testing.B) *benchFixture { return bf } -func (bf *benchFixture) AggregatorFor(descriptor *export.Descriptor) export.Aggregator { +func (*benchFixture) AggregatorFor(descriptor *export.Descriptor) export.Aggregator { switch descriptor.MetricKind() { case export.CounterKind: return counter.New() @@ -64,14 +64,17 @@ func (bf *benchFixture) AggregatorFor(descriptor *export.Descriptor) export.Aggr return nil } -func (bf *benchFixture) Process(context.Context, *export.Descriptor, export.Labels, export.Aggregator) error { +func (*benchFixture) Process(context.Context, *export.Descriptor, export.Labels, export.Aggregator) error { return nil } -func (bf *benchFixture) ReadCheckpoint() export.Producer { +func (*benchFixture) ReadCheckpoint() export.CheckpointSet { return nil } +func (*benchFixture) FinishedCollection() { +} + func makeLabelSets(n int) [][]core.KeyValue { r := make([][]core.KeyValue, n) diff --git a/sdk/metric/controller/push/push.go b/sdk/metric/controller/push/push.go index b864db17898..08613a23511 100644 --- a/sdk/metric/controller/push/push.go +++ b/sdk/metric/controller/push/push.go @@ -141,6 +141,7 @@ func (c *Controller) tick() { ctx := context.Background() c.sdk.Collect(ctx) err := c.exporter.Export(ctx, c.batcher.ReadCheckpoint()) + c.batcher.FinishedCollection() if err != nil { c.errorHandler(err) diff --git a/sdk/metric/controller/push/push_test.go b/sdk/metric/controller/push/push_test.go index 5f59b3d5063..6ac14a2cc66 100644 --- a/sdk/metric/controller/push/push_test.go +++ b/sdk/metric/controller/push/push_test.go @@ -33,9 +33,9 @@ import ( ) type testBatcher struct { - t *testing.T - producer *test.Producer - checkpoints int + t *testing.T + checkpointSet *test.CheckpointSet + checkpoints int } type testExporter struct { @@ -46,25 +46,25 @@ type testExporter struct { } type testFixture struct { - producer *test.Producer - batcher *testBatcher - exporter *testExporter + checkpointSet *test.CheckpointSet + batcher *testBatcher + exporter *testExporter } func newFixture(t *testing.T) testFixture { - producer := test.NewProducer(sdk.DefaultLabelEncoder()) + checkpointSet := test.NewCheckpointSet(sdk.DefaultLabelEncoder()) batcher := &testBatcher{ - t: t, - producer: producer, + t: t, + checkpointSet: checkpointSet, } exporter := &testExporter{ t: t, } return testFixture{ - producer: producer, - batcher: batcher, - exporter: exporter, + checkpointSet: checkpointSet, + batcher: batcher, + exporter: exporter, } } @@ -72,19 +72,22 @@ func (b *testBatcher) AggregatorFor(*export.Descriptor) export.Aggregator { return counter.New() } -func (b *testBatcher) ReadCheckpoint() export.Producer { +func (b *testBatcher) ReadCheckpoint() export.CheckpointSet { b.checkpoints++ - return b.producer + return b.checkpointSet +} + +func (*testBatcher) FinishedCollection() { } func (b *testBatcher) Process(_ context.Context, desc *export.Descriptor, labels export.Labels, agg export.Aggregator) error { - b.producer.Add(desc, agg, labels.Ordered()...) + b.checkpointSet.Add(desc, agg, labels.Ordered()...) return nil } -func (e *testExporter) Export(_ context.Context, producer export.Producer) error { +func (e *testExporter) Export(_ context.Context, checkpointSet export.CheckpointSet) error { e.exports++ - producer.ForEach(func(r export.Record) { + checkpointSet.ForEach(func(r export.Record) { e.records = append(e.records, r) }) return e.retErr @@ -139,7 +142,7 @@ func TestPushTicker(t *testing.T) { require.Equal(t, int64(3), sum.AsInt64()) require.Nil(t, err) - fix.producer.Reset() + fix.checkpointSet.Reset() fix.exporter.records = nil counter.Add(ctx, 7, meter.Labels()) @@ -188,3 +191,6 @@ func TestPushExportError(t *testing.T) { p.Stop() } + +// TODO add a test that FinishedCollection() is callled +// TODO remove the clock import from push.go diff --git a/sdk/metric/correct_test.go b/sdk/metric/correct_test.go index 087f0fff175..f989fedfed7 100644 --- a/sdk/metric/correct_test.go +++ b/sdk/metric/correct_test.go @@ -45,11 +45,14 @@ func (cb *correctnessBatcher) AggregatorFor(*export.Descriptor) export.Aggregato return cb.agg } -func (cb *correctnessBatcher) ReadCheckpoint() export.Producer { +func (cb *correctnessBatcher) ReadCheckpoint() export.CheckpointSet { cb.t.Fatal("Should not be called") return nil } +func (*correctnessBatcher) FinishedCollection() { +} + func (cb *correctnessBatcher) Process(_ context.Context, desc *export.Descriptor, labels export.Labels, agg export.Aggregator) error { cb.records = append(cb.records, export.NewRecord(desc, labels, agg)) return nil diff --git a/sdk/metric/doc.go b/sdk/metric/doc.go index 3dd400b47e9..94eeb253ac0 100644 --- a/sdk/metric/doc.go +++ b/sdk/metric/doc.go @@ -141,9 +141,9 @@ provide the serialization logic for labels. This allows avoiding duplicate serialization of labels, once as a unique key in the SDK (or Batcher) and once in the exporter. -Producer is an interface between the Batcher and the Exporter. +CheckpointSet is an interface between the Batcher and the Exporter. After completing a collection pass, the Batcher.ReadCheckpoint() -method returns a Producer, which the Exporter uses to iterate over all +method returns a CheckpointSet, which the Exporter uses to iterate over all the updated metrics. Record is a struct containing the state of an individual exported @@ -154,7 +154,7 @@ Labels is a struct containing an ordered set of labels, the corresponding unique encoding, and the encoder that produced it. Exporter is the final stage of an export pipeline. It is called with -a Producer capable of enumerating all the updated metrics. +a CheckpointSet capable of enumerating all the updated metrics. Controller is not an export interface per se, but it orchestrates the export pipeline. For example, a "push" controller will establish a diff --git a/sdk/metric/monotone_test.go b/sdk/metric/monotone_test.go index 7af1ef91e02..969bd5db9a9 100644 --- a/sdk/metric/monotone_test.go +++ b/sdk/metric/monotone_test.go @@ -38,14 +38,17 @@ type monotoneBatcher struct { currentTime *time.Time } -func (m *monotoneBatcher) AggregatorFor(*export.Descriptor) export.Aggregator { +func (*monotoneBatcher) AggregatorFor(*export.Descriptor) export.Aggregator { return gauge.New() } -func (m *monotoneBatcher) ReadCheckpoint() export.Producer { +func (*monotoneBatcher) ReadCheckpoint() export.CheckpointSet { return nil } +func (*monotoneBatcher) FinishedCollection() { +} + func (m *monotoneBatcher) Process(_ context.Context, desc *export.Descriptor, labels export.Labels, agg export.Aggregator) error { require.Equal(m.t, "my.gauge.name", desc.Name()) require.Equal(m.t, 1, labels.Len()) diff --git a/sdk/metric/stress_test.go b/sdk/metric/stress_test.go index 6779c4e14d1..feaa16c3dec 100644 --- a/sdk/metric/stress_test.go +++ b/sdk/metric/stress_test.go @@ -228,7 +228,7 @@ func (f *testFixture) preCollect() { f.dupCheck = map[testKey]int{} } -func (f *testFixture) AggregatorFor(descriptor *export.Descriptor) export.Aggregator { +func (*testFixture) AggregatorFor(descriptor *export.Descriptor) export.Aggregator { switch descriptor.MetricKind() { case export.CounterKind: return counter.New() @@ -239,10 +239,13 @@ func (f *testFixture) AggregatorFor(descriptor *export.Descriptor) export.Aggreg } } -func (f *testFixture) ReadCheckpoint() export.Producer { +func (*testFixture) ReadCheckpoint() export.CheckpointSet { return nil } +func (*testFixture) FinishedCollection() { +} + func (f *testFixture) Process(ctx context.Context, desc *export.Descriptor, labels export.Labels, agg export.Aggregator) error { key := testKey{ labels: canonicalizeLabels(labels.Ordered()), From b75059e17e8db4f44154c8f5e06d39d6496d138f Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 14 Nov 2019 22:17:29 -0800 Subject: [PATCH 67/73] Process takes an export.Record --- exporter/metric/stdout/stdout.go | 7 +++---- sdk/export/metric/metric.go | 18 ++++++++--------- sdk/metric/batcher/defaultkeys/defaultkeys.go | 6 ++++-- .../batcher/defaultkeys/defaultkeys_test.go | 16 +++++++-------- sdk/metric/batcher/ungrouped/ungrouped.go | 8 +++++--- .../batcher/ungrouped/ungrouped_test.go | 20 +++++++++---------- sdk/metric/benchmark_test.go | 2 +- sdk/metric/controller/push/push_test.go | 4 ++-- sdk/metric/correct_test.go | 4 ++-- sdk/metric/monotone_test.go | 12 +++++------ sdk/metric/sdk.go | 2 +- sdk/metric/stress_test.go | 9 +++++---- 12 files changed, 55 insertions(+), 53 deletions(-) diff --git a/exporter/metric/stdout/stdout.go b/exporter/metric/stdout/stdout.go index 8fde799c7e7..7a91959e270 100644 --- a/exporter/metric/stdout/stdout.go +++ b/exporter/metric/stdout/stdout.go @@ -31,6 +31,8 @@ type Exporter struct { options Options } +var _ export.Exporter = &Exporter{} + // Options are the options to be used when initializing a stdout export. type Options struct { // File is the destination. If not set, os.Stdout is used. @@ -77,8 +79,6 @@ type expoQuantile struct { V interface{} `json:"v"` } -var _ export.Exporter = &Exporter{} - func New(options Options) (*Exporter, error) { if options.File == nil { options.File = os.Stdout @@ -108,7 +108,6 @@ func (e *Exporter) Export(_ context.Context, checkpointSet export.CheckpointSet) } checkpointSet.ForEach(func(record export.Record) { desc := record.Descriptor() - labels := record.Labels() agg := record.Aggregator() kind := desc.NumberKind() @@ -186,7 +185,7 @@ func (e *Exporter) Export(_ context.Context, checkpointSet export.CheckpointSet) sb.WriteString(desc.Name()) - if labels.Len() > 0 { + if labels := record.Labels(); labels.Len() > 0 { sb.WriteRune('{') sb.WriteString(labels.Encoded()) sb.WriteRune('}') diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 09d360a2096..ca2def088aa 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -63,19 +63,17 @@ type Batcher interface { AggregationSelector // Process is called by the SDK once per internal record, - // passing the descriptor, the corresponding labels, and the - // checkpointed aggregator. The Batcher should be prepared to - // process duplicate (Descriptor, Labels) pairs during this - // pass due to race conditions, but this will usually be the - // ordinary course of events, as Aggregators are merged to - // reduce their dimensionality (i.e., group-by). + // passing the export Record (a Descriptor, the corresponding + // Labels, and the checkpointed Aggregator). The Batcher + // should be prepared to process duplicate (Descriptor, + // Labels) pairs during this pass due to race conditions, but + // this will usually be the ordinary course of events, as + // Aggregators are typically merged according the output set + // of labels. // // The Context argument originates from the controller that // orchestrates collection. - Process(ctx context.Context, - descriptor *Descriptor, - labels Labels, - aggregator Aggregator) error + Process(ctx context.Context, record Record) error // ReadCheckpoint is the interface used by the controller to // access the fully aggregated checkpoint after collection. diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys.go b/sdk/metric/batcher/defaultkeys/defaultkeys.go index cbe16e00b31..e4282c7ab45 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys.go @@ -62,7 +62,8 @@ func (b *Batcher) AggregatorFor(descriptor *export.Descriptor) export.Aggregator return b.selector.AggregatorFor(descriptor) } -func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels export.Labels, agg export.Aggregator) error { +func (b *Batcher) Process(_ context.Context, record export.Record) error { + desc := record.Descriptor() keys := desc.Keys() // Cache the mapping from Descriptor->Key->Index @@ -88,7 +89,7 @@ func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels exp // Note also the possibility to speed this computation of // "encoded" via "outputLabels" in the form of a (Descriptor, // LabelSet)->(Labels, Encoded) cache. - for _, kv := range labels.Ordered() { + for _, kv := range record.Labels().Ordered() { pos, ok := ki[kv.Key] if !ok { continue @@ -101,6 +102,7 @@ func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels exp // Merge this aggregator with all preceding aggregators that // map to the same set of `outputLabels` labels. + agg := record.Aggregator() rag, ok := b.aggCheckpoint[encoded] if ok { return rag.Aggregator().Merge(agg, desc) diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys_test.go b/sdk/metric/batcher/defaultkeys/defaultkeys_test.go index 816351ab0ff..36aea7cc1b5 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys_test.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys_test.go @@ -30,13 +30,13 @@ func TestGroupingStateless(t *testing.T) { ctx := context.Background() b := defaultkeys.New(test.NewAggregationSelector(), test.GroupEncoder, false) - _ = b.Process(ctx, test.GaugeDesc, test.Labels1, test.GaugeAgg(10)) - _ = b.Process(ctx, test.GaugeDesc, test.Labels2, test.GaugeAgg(20)) - _ = b.Process(ctx, test.GaugeDesc, test.Labels3, test.GaugeAgg(30)) + _ = b.Process(ctx, export.NewRecord(test.GaugeDesc, test.Labels1, test.GaugeAgg(10))) + _ = b.Process(ctx, export.NewRecord(test.GaugeDesc, test.Labels2, test.GaugeAgg(20))) + _ = b.Process(ctx, export.NewRecord(test.GaugeDesc, test.Labels3, test.GaugeAgg(30))) - _ = b.Process(ctx, test.CounterDesc, test.Labels1, test.CounterAgg(10)) - _ = b.Process(ctx, test.CounterDesc, test.Labels2, test.CounterAgg(20)) - _ = b.Process(ctx, test.CounterDesc, test.Labels3, test.CounterAgg(40)) + _ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, test.CounterAgg(10))) + _ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels2, test.CounterAgg(20))) + _ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels3, test.CounterAgg(40))) checkpointSet := b.ReadCheckpoint() b.FinishedCollection() @@ -66,7 +66,7 @@ func TestGroupingStateful(t *testing.T) { b := defaultkeys.New(test.NewAggregationSelector(), test.GroupEncoder, true) cagg := test.CounterAgg(10) - _ = b.Process(ctx, test.CounterDesc, test.Labels1, cagg) + _ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, cagg)) checkpointSet := b.ReadCheckpoint() b.FinishedCollection() @@ -102,7 +102,7 @@ func TestGroupingStateful(t *testing.T) { require.EqualValues(t, records1, records3) // Now process the second update - _ = b.Process(ctx, test.CounterDesc, test.Labels1, cagg) + _ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, cagg)) checkpointSet = b.ReadCheckpoint() b.FinishedCollection() diff --git a/sdk/metric/batcher/ungrouped/ungrouped.go b/sdk/metric/batcher/ungrouped/ungrouped.go index 6b4133b4131..f7a392f5ea0 100644 --- a/sdk/metric/batcher/ungrouped/ungrouped.go +++ b/sdk/metric/batcher/ungrouped/ungrouped.go @@ -55,11 +55,13 @@ func (b *Batcher) AggregatorFor(descriptor *export.Descriptor) export.Aggregator return b.selector.AggregatorFor(descriptor) } -func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels export.Labels, agg export.Aggregator) error { +func (b *Batcher) Process(_ context.Context, record export.Record) error { + desc := record.Descriptor() key := batchKey{ descriptor: desc, - encoded: labels.Encoded(), + encoded: record.Labels().Encoded(), } + agg := record.Aggregator() value, ok := b.batchMap[key] if ok { return value.aggregator.Merge(agg, desc) @@ -77,7 +79,7 @@ func (b *Batcher) Process(_ context.Context, desc *export.Descriptor, labels exp } b.batchMap[key] = batchValue{ aggregator: agg, - labels: labels, + labels: record.Labels(), } return nil } diff --git a/sdk/metric/batcher/ungrouped/ungrouped_test.go b/sdk/metric/batcher/ungrouped/ungrouped_test.go index 737ceacb22f..ddfedf6ff4c 100644 --- a/sdk/metric/batcher/ungrouped/ungrouped_test.go +++ b/sdk/metric/batcher/ungrouped/ungrouped_test.go @@ -33,20 +33,20 @@ func TestUngroupedStateless(t *testing.T) { b := ungrouped.New(test.NewAggregationSelector(), false) // Set initial gauge values - _ = b.Process(ctx, test.GaugeDesc, test.Labels1, test.GaugeAgg(10)) - _ = b.Process(ctx, test.GaugeDesc, test.Labels2, test.GaugeAgg(20)) - _ = b.Process(ctx, test.GaugeDesc, test.Labels3, test.GaugeAgg(30)) + _ = b.Process(ctx, export.NewRecord(test.GaugeDesc, test.Labels1, test.GaugeAgg(10))) + _ = b.Process(ctx, export.NewRecord(test.GaugeDesc, test.Labels2, test.GaugeAgg(20))) + _ = b.Process(ctx, export.NewRecord(test.GaugeDesc, test.Labels3, test.GaugeAgg(30))) // Another gauge Set for Labels1 - _ = b.Process(ctx, test.GaugeDesc, test.Labels1, test.GaugeAgg(50)) + _ = b.Process(ctx, export.NewRecord(test.GaugeDesc, test.Labels1, test.GaugeAgg(50))) // Set initial counter values - _ = b.Process(ctx, test.CounterDesc, test.Labels1, test.CounterAgg(10)) - _ = b.Process(ctx, test.CounterDesc, test.Labels2, test.CounterAgg(20)) - _ = b.Process(ctx, test.CounterDesc, test.Labels3, test.CounterAgg(40)) + _ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, test.CounterAgg(10))) + _ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels2, test.CounterAgg(20))) + _ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels3, test.CounterAgg(40))) // Another counter Add for Labels1 - _ = b.Process(ctx, test.CounterDesc, test.Labels1, test.CounterAgg(50)) + _ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, test.CounterAgg(50))) checkpointSet := b.ReadCheckpoint() b.FinishedCollection() @@ -78,7 +78,7 @@ func TestUngroupedStateful(t *testing.T) { b := ungrouped.New(test.NewAggregationSelector(), true) cagg := test.CounterAgg(10) - _ = b.Process(ctx, test.CounterDesc, test.Labels1, cagg) + _ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, cagg)) checkpointSet := b.ReadCheckpoint() b.FinishedCollection() @@ -114,7 +114,7 @@ func TestUngroupedStateful(t *testing.T) { require.EqualValues(t, records1, records3) // Now process the second update - _ = b.Process(ctx, test.CounterDesc, test.Labels1, cagg) + _ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, cagg)) checkpointSet = b.ReadCheckpoint() b.FinishedCollection() diff --git a/sdk/metric/benchmark_test.go b/sdk/metric/benchmark_test.go index c490108c1f3..99afebd75ee 100644 --- a/sdk/metric/benchmark_test.go +++ b/sdk/metric/benchmark_test.go @@ -64,7 +64,7 @@ func (*benchFixture) AggregatorFor(descriptor *export.Descriptor) export.Aggrega return nil } -func (*benchFixture) Process(context.Context, *export.Descriptor, export.Labels, export.Aggregator) error { +func (*benchFixture) Process(context.Context, export.Record) error { return nil } diff --git a/sdk/metric/controller/push/push_test.go b/sdk/metric/controller/push/push_test.go index 6ac14a2cc66..cc416d55ba2 100644 --- a/sdk/metric/controller/push/push_test.go +++ b/sdk/metric/controller/push/push_test.go @@ -80,8 +80,8 @@ func (b *testBatcher) ReadCheckpoint() export.CheckpointSet { func (*testBatcher) FinishedCollection() { } -func (b *testBatcher) Process(_ context.Context, desc *export.Descriptor, labels export.Labels, agg export.Aggregator) error { - b.checkpointSet.Add(desc, agg, labels.Ordered()...) +func (b *testBatcher) Process(_ context.Context, record export.Record) error { + b.checkpointSet.Add(record.Descriptor(), record.Aggregator(), record.Labels().Ordered()...) return nil } diff --git a/sdk/metric/correct_test.go b/sdk/metric/correct_test.go index f989fedfed7..6c91ebfb762 100644 --- a/sdk/metric/correct_test.go +++ b/sdk/metric/correct_test.go @@ -53,8 +53,8 @@ func (cb *correctnessBatcher) ReadCheckpoint() export.CheckpointSet { func (*correctnessBatcher) FinishedCollection() { } -func (cb *correctnessBatcher) Process(_ context.Context, desc *export.Descriptor, labels export.Labels, agg export.Aggregator) error { - cb.records = append(cb.records, export.NewRecord(desc, labels, agg)) +func (cb *correctnessBatcher) Process(_ context.Context, record export.Record) error { + cb.records = append(cb.records, record) return nil } diff --git a/sdk/metric/monotone_test.go b/sdk/metric/monotone_test.go index 969bd5db9a9..5fc4674d13b 100644 --- a/sdk/metric/monotone_test.go +++ b/sdk/metric/monotone_test.go @@ -49,13 +49,13 @@ func (*monotoneBatcher) ReadCheckpoint() export.CheckpointSet { func (*monotoneBatcher) FinishedCollection() { } -func (m *monotoneBatcher) Process(_ context.Context, desc *export.Descriptor, labels export.Labels, agg export.Aggregator) error { - require.Equal(m.t, "my.gauge.name", desc.Name()) - require.Equal(m.t, 1, labels.Len()) - require.Equal(m.t, "a", string(labels.Ordered()[0].Key)) - require.Equal(m.t, "b", labels.Ordered()[0].Value.Emit()) +func (m *monotoneBatcher) Process(_ context.Context, record export.Record) error { + require.Equal(m.t, "my.gauge.name", record.Descriptor().Name()) + require.Equal(m.t, 1, record.Labels().Len()) + require.Equal(m.t, "a", string(record.Labels().Ordered()[0].Key)) + require.Equal(m.t, "b", record.Labels().Ordered()[0].Value.Emit()) - gauge := agg.(*gauge.Aggregator) + gauge := record.Aggregator().(*gauge.Aggregator) val, ts, err := gauge.LastValue() require.Nil(m.t, err) diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index 9cf157f5f9c..b0c07e957da 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -425,7 +425,7 @@ func (m *SDK) checkpoint(ctx context.Context, r *record) int { } r.recorder.Checkpoint(ctx, r.descriptor) labels := export.NewLabels(r.labels.sorted, r.labels.encoded, m.lencoder) - err := m.batcher.Process(ctx, r.descriptor, labels, r.recorder) + err := m.batcher.Process(ctx, export.NewRecord(r.descriptor, labels, r.recorder)) if err != nil { m.errorHandler(err) diff --git a/sdk/metric/stress_test.go b/sdk/metric/stress_test.go index feaa16c3dec..2f8b8e175e4 100644 --- a/sdk/metric/stress_test.go +++ b/sdk/metric/stress_test.go @@ -246,10 +246,10 @@ func (*testFixture) ReadCheckpoint() export.CheckpointSet { func (*testFixture) FinishedCollection() { } -func (f *testFixture) Process(ctx context.Context, desc *export.Descriptor, labels export.Labels, agg export.Aggregator) error { +func (f *testFixture) Process(_ context.Context, record export.Record) error { key := testKey{ - labels: canonicalizeLabels(labels.Ordered()), - descriptor: desc, + labels: canonicalizeLabels(record.Labels().Ordered()), + descriptor: record.Descriptor(), } if f.dupCheck[key] == 0 { f.dupCheck[key]++ @@ -259,7 +259,8 @@ func (f *testFixture) Process(ctx context.Context, desc *export.Descriptor, labe actual, _ := f.received.LoadOrStore(key, f.impl.newStore()) - switch desc.MetricKind() { + agg := record.Aggregator() + switch record.Descriptor().MetricKind() { case export.CounterKind: counter := agg.(aggregator.Sum) sum, err := counter.Sum() From d03709a2483059601489892cb3c4f73c19193865 Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 14 Nov 2019 22:22:03 -0800 Subject: [PATCH 68/73] ReadCheckpoint->CheckpointSet --- sdk/export/metric/metric.go | 6 +++--- sdk/metric/batcher/defaultkeys/defaultkeys.go | 2 +- sdk/metric/batcher/defaultkeys/defaultkeys_test.go | 12 ++++++------ sdk/metric/batcher/ungrouped/ungrouped.go | 2 +- sdk/metric/batcher/ungrouped/ungrouped_test.go | 12 ++++++------ sdk/metric/benchmark_test.go | 2 +- sdk/metric/controller/push/push.go | 2 +- sdk/metric/controller/push/push_test.go | 2 +- sdk/metric/correct_test.go | 2 +- sdk/metric/doc.go | 6 +++--- sdk/metric/monotone_test.go | 2 +- sdk/metric/stress_test.go | 2 +- 12 files changed, 26 insertions(+), 26 deletions(-) diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index ca2def088aa..f45c93dd13e 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -39,7 +39,7 @@ import ( // checkpointed, allowing the batcher to build the set of metrics // currently being exported. // -// The `ReadCheckpoint` method is called during collection in a +// The `CheckpointSet` method is called during collection in a // single-threaded context from the Exporter, giving the exporter // access to a producer for iterating over the complete checkpoint. type Batcher interface { @@ -75,11 +75,11 @@ type Batcher interface { // orchestrates collection. Process(ctx context.Context, record Record) error - // ReadCheckpoint is the interface used by the controller to + // CheckpointSet is the interface used by the controller to // access the fully aggregated checkpoint after collection. // // The returned CheckpointSet is passed to the Exporter. - ReadCheckpoint() CheckpointSet + CheckpointSet() CheckpointSet // FinishedCollection informs the Batcher that a complete // collection round was completed. Stateless batchers might diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys.go b/sdk/metric/batcher/defaultkeys/defaultkeys.go index e4282c7ab45..f0b25bd64e7 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys.go @@ -126,7 +126,7 @@ func (b *Batcher) Process(_ context.Context, record export.Record) error { return nil } -func (b *Batcher) ReadCheckpoint() export.CheckpointSet { +func (b *Batcher) CheckpointSet() export.CheckpointSet { return &checkpointSet{ aggCheckpointMap: b.aggCheckpoint, labelEncoder: b.labelEncoder, diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys_test.go b/sdk/metric/batcher/defaultkeys/defaultkeys_test.go index 36aea7cc1b5..67e03d80746 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys_test.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys_test.go @@ -38,7 +38,7 @@ func TestGroupingStateless(t *testing.T) { _ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels2, test.CounterAgg(20))) _ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels3, test.CounterAgg(40))) - checkpointSet := b.ReadCheckpoint() + checkpointSet := b.CheckpointSet() b.FinishedCollection() records := test.Output{} @@ -54,7 +54,7 @@ func TestGroupingStateless(t *testing.T) { }, records) // Verify that state is reset by FinishedCollection() - checkpointSet = b.ReadCheckpoint() + checkpointSet = b.CheckpointSet() b.FinishedCollection() checkpointSet.ForEach(func(rec export.Record) { t.Fatal("Unexpected call") @@ -68,7 +68,7 @@ func TestGroupingStateful(t *testing.T) { cagg := test.CounterAgg(10) _ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, cagg)) - checkpointSet := b.ReadCheckpoint() + checkpointSet := b.CheckpointSet() b.FinishedCollection() records1 := test.Output{} @@ -79,7 +79,7 @@ func TestGroupingStateful(t *testing.T) { }, records1) // Test that state was NOT reset - checkpointSet = b.ReadCheckpoint() + checkpointSet = b.CheckpointSet() b.FinishedCollection() records2 := test.Output{} @@ -93,7 +93,7 @@ func TestGroupingStateful(t *testing.T) { // As yet cagg has not been passed to Batcher.Process. Should // not see an update. - checkpointSet = b.ReadCheckpoint() + checkpointSet = b.CheckpointSet() b.FinishedCollection() records3 := test.Output{} @@ -104,7 +104,7 @@ func TestGroupingStateful(t *testing.T) { // Now process the second update _ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, cagg)) - checkpointSet = b.ReadCheckpoint() + checkpointSet = b.CheckpointSet() b.FinishedCollection() records4 := test.Output{} diff --git a/sdk/metric/batcher/ungrouped/ungrouped.go b/sdk/metric/batcher/ungrouped/ungrouped.go index f7a392f5ea0..2bda79ddb05 100644 --- a/sdk/metric/batcher/ungrouped/ungrouped.go +++ b/sdk/metric/batcher/ungrouped/ungrouped.go @@ -84,7 +84,7 @@ func (b *Batcher) Process(_ context.Context, record export.Record) error { return nil } -func (b *Batcher) ReadCheckpoint() export.CheckpointSet { +func (b *Batcher) CheckpointSet() export.CheckpointSet { return b.batchMap } diff --git a/sdk/metric/batcher/ungrouped/ungrouped_test.go b/sdk/metric/batcher/ungrouped/ungrouped_test.go index ddfedf6ff4c..b1c556943c0 100644 --- a/sdk/metric/batcher/ungrouped/ungrouped_test.go +++ b/sdk/metric/batcher/ungrouped/ungrouped_test.go @@ -48,7 +48,7 @@ func TestUngroupedStateless(t *testing.T) { // Another counter Add for Labels1 _ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, test.CounterAgg(50))) - checkpointSet := b.ReadCheckpoint() + checkpointSet := b.CheckpointSet() b.FinishedCollection() records := test.Output{} @@ -66,7 +66,7 @@ func TestUngroupedStateless(t *testing.T) { }, records) // Verify that state was reset - checkpointSet = b.ReadCheckpoint() + checkpointSet = b.CheckpointSet() b.FinishedCollection() checkpointSet.ForEach(func(rec export.Record) { t.Fatal("Unexpected call") @@ -80,7 +80,7 @@ func TestUngroupedStateful(t *testing.T) { cagg := test.CounterAgg(10) _ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, cagg)) - checkpointSet := b.ReadCheckpoint() + checkpointSet := b.CheckpointSet() b.FinishedCollection() records1 := test.Output{} @@ -91,7 +91,7 @@ func TestUngroupedStateful(t *testing.T) { }, records1) // Test that state was NOT reset - checkpointSet = b.ReadCheckpoint() + checkpointSet = b.CheckpointSet() b.FinishedCollection() records2 := test.Output{} @@ -105,7 +105,7 @@ func TestUngroupedStateful(t *testing.T) { // As yet cagg has not been passed to Batcher.Process. Should // not see an update. - checkpointSet = b.ReadCheckpoint() + checkpointSet = b.CheckpointSet() b.FinishedCollection() records3 := test.Output{} @@ -116,7 +116,7 @@ func TestUngroupedStateful(t *testing.T) { // Now process the second update _ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, cagg)) - checkpointSet = b.ReadCheckpoint() + checkpointSet = b.CheckpointSet() b.FinishedCollection() records4 := test.Output{} diff --git a/sdk/metric/benchmark_test.go b/sdk/metric/benchmark_test.go index 99afebd75ee..89867f17446 100644 --- a/sdk/metric/benchmark_test.go +++ b/sdk/metric/benchmark_test.go @@ -68,7 +68,7 @@ func (*benchFixture) Process(context.Context, export.Record) error { return nil } -func (*benchFixture) ReadCheckpoint() export.CheckpointSet { +func (*benchFixture) CheckpointSet() export.CheckpointSet { return nil } diff --git a/sdk/metric/controller/push/push.go b/sdk/metric/controller/push/push.go index 08613a23511..5e098cab1eb 100644 --- a/sdk/metric/controller/push/push.go +++ b/sdk/metric/controller/push/push.go @@ -140,7 +140,7 @@ func (c *Controller) tick() { // configure a timeout here? ctx := context.Background() c.sdk.Collect(ctx) - err := c.exporter.Export(ctx, c.batcher.ReadCheckpoint()) + err := c.exporter.Export(ctx, c.batcher.CheckpointSet()) c.batcher.FinishedCollection() if err != nil { diff --git a/sdk/metric/controller/push/push_test.go b/sdk/metric/controller/push/push_test.go index cc416d55ba2..f822d04a46e 100644 --- a/sdk/metric/controller/push/push_test.go +++ b/sdk/metric/controller/push/push_test.go @@ -72,7 +72,7 @@ func (b *testBatcher) AggregatorFor(*export.Descriptor) export.Aggregator { return counter.New() } -func (b *testBatcher) ReadCheckpoint() export.CheckpointSet { +func (b *testBatcher) CheckpointSet() export.CheckpointSet { b.checkpoints++ return b.checkpointSet } diff --git a/sdk/metric/correct_test.go b/sdk/metric/correct_test.go index 6c91ebfb762..b4edc3ef972 100644 --- a/sdk/metric/correct_test.go +++ b/sdk/metric/correct_test.go @@ -45,7 +45,7 @@ func (cb *correctnessBatcher) AggregatorFor(*export.Descriptor) export.Aggregato return cb.agg } -func (cb *correctnessBatcher) ReadCheckpoint() export.CheckpointSet { +func (cb *correctnessBatcher) CheckpointSet() export.CheckpointSet { cb.t.Fatal("Should not be called") return nil } diff --git a/sdk/metric/doc.go b/sdk/metric/doc.go index 94eeb253ac0..00071a3233e 100644 --- a/sdk/metric/doc.go +++ b/sdk/metric/doc.go @@ -130,7 +130,7 @@ record has an associated aggregator. Batcher is an interface which sits between the SDK and an exporter. The Batcher embeds an AggregationSelector, used by the SDK to assign new Aggregators. The Batcher supports a Process() API for submitting -checkpointed aggregators to the batcher, and a ReadCheckpoint() API +checkpointed aggregators to the batcher, and a CheckpointSet() API for producing a complete checkpoint for the exporter. Two default Batcher implementations are provided, the "defaultkeys" Batcher groups aggregate metrics by their recommended Descriptor.Keys(), the @@ -142,8 +142,8 @@ duplicate serialization of labels, once as a unique key in the SDK (or Batcher) and once in the exporter. CheckpointSet is an interface between the Batcher and the Exporter. -After completing a collection pass, the Batcher.ReadCheckpoint() -method returns a CheckpointSet, which the Exporter uses to iterate over all +After completing a collection pass, the Batcher.CheckpointSet() method +returns a CheckpointSet, which the Exporter uses to iterate over all the updated metrics. Record is a struct containing the state of an individual exported diff --git a/sdk/metric/monotone_test.go b/sdk/metric/monotone_test.go index 5fc4674d13b..fbb4ab93dda 100644 --- a/sdk/metric/monotone_test.go +++ b/sdk/metric/monotone_test.go @@ -42,7 +42,7 @@ func (*monotoneBatcher) AggregatorFor(*export.Descriptor) export.Aggregator { return gauge.New() } -func (*monotoneBatcher) ReadCheckpoint() export.CheckpointSet { +func (*monotoneBatcher) CheckpointSet() export.CheckpointSet { return nil } diff --git a/sdk/metric/stress_test.go b/sdk/metric/stress_test.go index 2f8b8e175e4..ac55a5a375d 100644 --- a/sdk/metric/stress_test.go +++ b/sdk/metric/stress_test.go @@ -239,7 +239,7 @@ func (*testFixture) AggregatorFor(descriptor *export.Descriptor) export.Aggregat } } -func (*testFixture) ReadCheckpoint() export.CheckpointSet { +func (*testFixture) CheckpointSet() export.CheckpointSet { return nil } From 93fe58fa51a7646e90f8a38e490f9a14d56715fd Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 14 Nov 2019 22:25:09 -0800 Subject: [PATCH 69/73] EncodeLabels->Encode --- exporter/metric/test/test.go | 2 +- go.sum | 1 - sdk/export/metric/metric.go | 8 ++++---- sdk/metric/aggregator/array/array_test.go | 3 +++ sdk/metric/batcher/defaultkeys/defaultkeys.go | 2 +- sdk/metric/batcher/test/test.go | 4 ++-- sdk/metric/correct_test.go | 4 ++-- sdk/metric/labelencoder.go | 2 +- sdk/metric/sdk.go | 2 +- 9 files changed, 15 insertions(+), 13 deletions(-) diff --git a/exporter/metric/test/test.go b/exporter/metric/test/test.go index 019a3d527ab..387be08540e 100644 --- a/exporter/metric/test/test.go +++ b/exporter/metric/test/test.go @@ -21,7 +21,7 @@ func (p *CheckpointSet) Reset() { } func (p *CheckpointSet) Add(desc *export.Descriptor, agg export.Aggregator, labels ...core.KeyValue) { - encoded := p.encoder.EncodeLabels(labels) + encoded := p.encoder.Encode(labels) elabels := export.NewLabels(labels, encoded, p.encoder) p.updates = append(p.updates, export.NewRecord(desc, elabels, agg)) diff --git a/go.sum b/go.sum index b483f721b6e..28b92022a51 100644 --- a/go.sum +++ b/go.sum @@ -280,7 +280,6 @@ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opentelemetry.io v0.1.0 h1:EANZoRCOP+A3faIlw/iN6YEWoYb1vleZRKm1EvH8T48= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index f45c93dd13e..0c391ea40ea 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -178,9 +178,9 @@ type Exporter interface { // // If none is provided, a default will be used. type LabelEncoder interface { - // EncodeLabels is called (concurrently) in instrumentation - // context. It should return a unique representation of the - // labels suitable for the SDK to use as a map key. + // Encode is called (concurrently) in instrumentation context. + // It should return a unique representation of the labels + // suitable for the SDK to use as a map key. // // The exported Labels object retains a reference to its // LabelEncoder to determine which encoding was used. @@ -189,7 +189,7 @@ type LabelEncoder interface { // syntax for serialized label sets should implement // LabelEncoder, thus avoiding duplicate computation in the // export path. - EncodeLabels([]core.KeyValue) string + Encode([]core.KeyValue) string } // CheckpointSet allows a controller to access a complete checkpoint of diff --git a/sdk/metric/aggregator/array/array_test.go b/sdk/metric/aggregator/array/array_test.go index 279554deb3a..bbb56d9850f 100644 --- a/sdk/metric/aggregator/array/array_test.go +++ b/sdk/metric/aggregator/array/array_test.go @@ -216,6 +216,7 @@ func TestArrayErrors(t *testing.T) { count, err := agg.Count() require.Equal(t, int64(1), count, "NaN value was not counted") + require.Nil(t, err) num, err := agg.Quantile(0) require.Nil(t, err) @@ -287,9 +288,11 @@ func TestArrayFloat64(t *testing.T) { sum, err := agg.Sum() require.InEpsilon(t, all.Sum().AsFloat64(), sum.AsFloat64(), 0.0000001, "Same sum") + require.Nil(t, err) count, err := agg.Count() require.Equal(t, all.Count(), count, "Same count") + require.Nil(t, err) min, err := agg.Min() require.Nil(t, err) diff --git a/sdk/metric/batcher/defaultkeys/defaultkeys.go b/sdk/metric/batcher/defaultkeys/defaultkeys.go index f0b25bd64e7..a07c616bfc4 100644 --- a/sdk/metric/batcher/defaultkeys/defaultkeys.go +++ b/sdk/metric/batcher/defaultkeys/defaultkeys.go @@ -98,7 +98,7 @@ func (b *Batcher) Process(_ context.Context, record export.Record) error { } // Compute an encoded lookup key. - encoded := b.labelEncoder.EncodeLabels(outputLabels) + encoded := b.labelEncoder.Encode(outputLabels) // Merge this aggregator with all preceding aggregators that // map to the same set of `outputLabels` labels. diff --git a/sdk/metric/batcher/test/test.go b/sdk/metric/batcher/test/test.go index e9d7aea4389..6f56949c81f 100644 --- a/sdk/metric/batcher/test/test.go +++ b/sdk/metric/batcher/test/test.go @@ -83,11 +83,11 @@ func (*testAggregationSelector) AggregatorFor(desc *export.Descriptor) export.Ag } func makeLabels(encoder export.LabelEncoder, labels ...core.KeyValue) export.Labels { - encoded := encoder.EncodeLabels(labels) + encoded := encoder.Encode(labels) return export.NewLabels(labels, encoded, encoder) } -func (Encoder) EncodeLabels(labels []core.KeyValue) string { +func (Encoder) Encode(labels []core.KeyValue) string { var sb strings.Builder for i, l := range labels { if i > 0 { diff --git a/sdk/metric/correct_test.go b/sdk/metric/correct_test.go index b4edc3ef972..3b11aeaa8c4 100644 --- a/sdk/metric/correct_test.go +++ b/sdk/metric/correct_test.go @@ -58,7 +58,7 @@ func (cb *correctnessBatcher) Process(_ context.Context, record export.Record) e return nil } -func (testLabelEncoder) EncodeLabels(labels []core.KeyValue) string { +func (testLabelEncoder) Encode(labels []core.KeyValue) string { return fmt.Sprint(labels) } @@ -189,6 +189,6 @@ func TestSDKLabelEncoder(t *testing.T) { func TestDefaultLabelEncoder(t *testing.T) { encoder := sdk.DefaultLabelEncoder() - encoded := encoder.EncodeLabels([]core.KeyValue{key.String("A", "B"), key.String("C", "D")}) + encoded := encoder.Encode([]core.KeyValue{key.String("A", "B"), key.String("C", "D")}) require.Equal(t, `A=B,C=D`, encoded) } diff --git a/sdk/metric/labelencoder.go b/sdk/metric/labelencoder.go index c444b4eb07c..1be80d9d592 100644 --- a/sdk/metric/labelencoder.go +++ b/sdk/metric/labelencoder.go @@ -45,7 +45,7 @@ func DefaultLabelEncoder() export.LabelEncoder { } } -func (d *defaultLabelEncoder) EncodeLabels(labels []core.KeyValue) string { +func (d *defaultLabelEncoder) Encode(labels []core.KeyValue) string { buf := d.pool.Get().(*bytes.Buffer) defer d.pool.Put(buf) buf.Reset() diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go index b0c07e957da..ba4153ce4e6 100644 --- a/sdk/metric/sdk.go +++ b/sdk/metric/sdk.go @@ -270,7 +270,7 @@ func (m *SDK) Labels(kvs ...core.KeyValue) api.LabelSet { } sorted = sorted[0:oi] - encoded := m.lencoder.EncodeLabels(sorted) + encoded := m.lencoder.Encode(sorted) return &labels{ meter: m, From 782cabf9e7b579ba1446be34bcf27e46ad38e844 Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 14 Nov 2019 22:54:57 -0800 Subject: [PATCH 70/73] Format a better inconsistent type error; add more aggregator API tests --- .../aggregator/{api.go => aggregator.go} | 12 ++- .../metric/aggregator/aggregator_test.go | 95 +++++++++++++++++++ sdk/metric/aggregator/array/array.go | 2 +- sdk/metric/aggregator/counter/counter.go | 2 +- sdk/metric/aggregator/ddsketch/ddsketch.go | 2 +- sdk/metric/aggregator/gauge/gauge.go | 2 +- sdk/metric/aggregator/maxsumcount/msc.go | 2 +- 7 files changed, 109 insertions(+), 8 deletions(-) rename sdk/export/metric/aggregator/{api.go => aggregator.go} (88%) create mode 100644 sdk/export/metric/aggregator/aggregator_test.go diff --git a/sdk/export/metric/aggregator/api.go b/sdk/export/metric/aggregator/aggregator.go similarity index 88% rename from sdk/export/metric/aggregator/api.go rename to sdk/export/metric/aggregator/aggregator.go index 1530c48dcc7..8e968d5ef72 100644 --- a/sdk/export/metric/aggregator/api.go +++ b/sdk/export/metric/aggregator/aggregator.go @@ -73,7 +73,7 @@ var ( ErrNegativeInput = fmt.Errorf("Negative value is out of range for this instrument") ErrNaNInput = fmt.Errorf("NaN value is an invalid input") ErrNonMonotoneInput = fmt.Errorf("The new value is not monotone") - ErrInconsistentType = fmt.Errorf("Cannot merge different aggregator types") + ErrInconsistentType = fmt.Errorf("Inconsistent aggregator types") // ErrNoLastValue is returned by the LastValue interface when // (due to a race with collection) the Aggregator is @@ -88,15 +88,21 @@ var ( ErrEmptyDataSet = fmt.Errorf("The result is not defined on an empty data set") ) +// NewInconsistentMergeError formats an error describing an attempt to +// merge different-type aggregators. The result can be unwrapped as +// an ErrInconsistentType. +func NewInconsistentMergeError(a1, a2 export.Aggregator) error { + return fmt.Errorf("Cannot merge %T with %T: %w", a1, a2, ErrInconsistentType) +} + // RangeTest is a commmon routine for testing for valid input values. // This rejects NaN values. This rejects negative values when the // metric instrument does not support negative values, including -// monotonic counter metrics and absolute measure metrics). +// monotonic counter metrics and absolute measure metrics. func RangeTest(number core.Number, descriptor *export.Descriptor) error { numberKind := descriptor.NumberKind() if numberKind == core.Float64NumberKind && math.IsNaN(number.AsFloat64()) { - // NOTE: add this to the specification. return ErrNaNInput } diff --git a/sdk/export/metric/aggregator/aggregator_test.go b/sdk/export/metric/aggregator/aggregator_test.go new file mode 100644 index 00000000000..53b468f53ff --- /dev/null +++ b/sdk/export/metric/aggregator/aggregator_test.go @@ -0,0 +1,95 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregator_test // import "go.opentelemetry.io/otel/sdk/metric/aggregator" + +import ( + "errors" + "fmt" + "math" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/api/core" + export "go.opentelemetry.io/otel/sdk/export/metric" + "go.opentelemetry.io/otel/sdk/export/metric/aggregator" + "go.opentelemetry.io/otel/sdk/metric/aggregator/counter" + "go.opentelemetry.io/otel/sdk/metric/aggregator/gauge" +) + +func TestInconsistentMergeErr(t *testing.T) { + err := aggregator.NewInconsistentMergeError(counter.New(), gauge.New()) + require.Equal( + t, + "Cannot merge *counter.Aggregator with *gauge.Aggregator: Inconsistent aggregator types", + err.Error(), + ) + require.True(t, errors.Is(err, aggregator.ErrInconsistentType)) +} + +func testRangeNaN(t *testing.T, desc *export.Descriptor) { + // If the descriptor uses int64 numbers, this won't register as NaN + nan := core.NewFloat64Number(math.NaN()) + err := aggregator.RangeTest(nan, desc) + + if desc.NumberKind() == core.Float64NumberKind { + require.Equal(t, aggregator.ErrNaNInput, err) + } else { + require.Nil(t, err) + } +} + +func testRangeNegative(t *testing.T, alt bool, desc *export.Descriptor) { + var neg, pos core.Number + + if desc.NumberKind() == core.Float64NumberKind { + pos = core.NewFloat64Number(+1) + neg = core.NewFloat64Number(-1) + } else { + pos = core.NewInt64Number(+1) + neg = core.NewInt64Number(-1) + } + + posErr := aggregator.RangeTest(pos, desc) + negErr := aggregator.RangeTest(neg, desc) + + require.Nil(t, posErr) + require.Equal(t, negErr == nil, alt) +} + +func TestRangeTest(t *testing.T) { + for _, nkind := range []core.NumberKind{core.Float64NumberKind, core.Int64NumberKind} { + t.Run(nkind.String(), func(t *testing.T) { + for _, mkind := range []export.MetricKind{export.CounterKind, export.MeasureKind} { + t.Run(nkind.String(), func(t *testing.T) { + for _, alt := range []bool{true, false} { + t.Run(fmt.Sprint(alt), func(t *testing.T) { + desc := export.NewDescriptor( + "name", + mkind, + nil, + "", + "", + nkind, + alt, + ) + testRangeNaN(t, desc) + testRangeNegative(t, alt, desc) + }) + } + }) + } + }) + } +} diff --git a/sdk/metric/aggregator/array/array.go b/sdk/metric/aggregator/array/array.go index 34e17100b0f..e3d7572cb9e 100644 --- a/sdk/metric/aggregator/array/array.go +++ b/sdk/metric/aggregator/array/array.go @@ -110,7 +110,7 @@ func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export. func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { - return aggregator.ErrInconsistentType + return aggregator.NewInconsistentMergeError(c, oa) } c.ckptSum.AddNumber(desc.NumberKind(), o.ckptSum) diff --git a/sdk/metric/aggregator/counter/counter.go b/sdk/metric/aggregator/counter/counter.go index e79cac00f15..93660e0d2d9 100644 --- a/sdk/metric/aggregator/counter/counter.go +++ b/sdk/metric/aggregator/counter/counter.go @@ -63,7 +63,7 @@ func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export. func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { - return aggregator.ErrInconsistentType + return aggregator.NewInconsistentMergeError(c, oa) } c.checkpoint.AddNumber(desc.NumberKind(), o.checkpoint) return nil diff --git a/sdk/metric/aggregator/ddsketch/ddsketch.go b/sdk/metric/aggregator/ddsketch/ddsketch.go index 5160afeda89..b07c0fd1358 100644 --- a/sdk/metric/aggregator/ddsketch/ddsketch.go +++ b/sdk/metric/aggregator/ddsketch/ddsketch.go @@ -125,7 +125,7 @@ func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export. func (c *Aggregator) Merge(oa export.Aggregator, d *export.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { - return aggregator.ErrInconsistentType + return aggregator.NewInconsistentMergeError(c, oa) } c.checkpoint.Merge(o.checkpoint) diff --git a/sdk/metric/aggregator/gauge/gauge.go b/sdk/metric/aggregator/gauge/gauge.go index ce61a788892..5b86d7585bf 100644 --- a/sdk/metric/aggregator/gauge/gauge.go +++ b/sdk/metric/aggregator/gauge/gauge.go @@ -131,7 +131,7 @@ func (g *Aggregator) updateMonotonic(number core.Number, desc *export.Descriptor func (g *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { - return aggregator.ErrInconsistentType + return aggregator.NewInconsistentMergeError(g, oa) } ggd := (*gaugeData)(atomic.LoadPointer(&g.checkpoint)) diff --git a/sdk/metric/aggregator/maxsumcount/msc.go b/sdk/metric/aggregator/maxsumcount/msc.go index 4f936f297a3..464feb0fb5a 100644 --- a/sdk/metric/aggregator/maxsumcount/msc.go +++ b/sdk/metric/aggregator/maxsumcount/msc.go @@ -113,7 +113,7 @@ func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export. func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) error { o, _ := oa.(*Aggregator) if o == nil { - return aggregator.ErrInconsistentType + return aggregator.NewInconsistentMergeError(c, oa) } c.checkpoint.sum.AddNumber(desc.NumberKind(), o.checkpoint.sum) From eedaaab20ab70eec359085daf74a4da3f0f40bd8 Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 14 Nov 2019 23:21:42 -0800 Subject: [PATCH 71/73] More RangeTest test coverage --- sdk/export/metric/aggregator/aggregator_test.go | 16 +++++++++++++--- sdk/export/metric/metric.go | 2 ++ sdk/metric/controller/push/push_test.go | 8 ++++++-- 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/sdk/export/metric/aggregator/aggregator_test.go b/sdk/export/metric/aggregator/aggregator_test.go index 53b468f53ff..35b0bc012d6 100644 --- a/sdk/export/metric/aggregator/aggregator_test.go +++ b/sdk/export/metric/aggregator/aggregator_test.go @@ -21,6 +21,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/api/core" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/export/metric/aggregator" @@ -65,14 +66,23 @@ func testRangeNegative(t *testing.T, alt bool, desc *export.Descriptor) { negErr := aggregator.RangeTest(neg, desc) require.Nil(t, posErr) - require.Equal(t, negErr == nil, alt) + + if desc.MetricKind() == export.GaugeKind { + require.Nil(t, negErr) + } else { + require.Equal(t, negErr == nil, alt) + } } func TestRangeTest(t *testing.T) { for _, nkind := range []core.NumberKind{core.Float64NumberKind, core.Int64NumberKind} { t.Run(nkind.String(), func(t *testing.T) { - for _, mkind := range []export.MetricKind{export.CounterKind, export.MeasureKind} { - t.Run(nkind.String(), func(t *testing.T) { + for _, mkind := range []export.MetricKind{ + export.CounterKind, + export.GaugeKind, + export.MeasureKind, + } { + t.Run(mkind.String(), func(t *testing.T) { for _, alt := range []bool{true, false} { t.Run(fmt.Sprint(alt), func(t *testing.T) { desc := export.NewDescriptor( diff --git a/sdk/export/metric/metric.go b/sdk/export/metric/metric.go index 0c391ea40ea..8a1731fc075 100644 --- a/sdk/export/metric/metric.go +++ b/sdk/export/metric/metric.go @@ -14,6 +14,8 @@ package export +//go:generate stringer -type=MetricKind + import ( "context" diff --git a/sdk/metric/controller/push/push_test.go b/sdk/metric/controller/push/push_test.go index f822d04a46e..47095f4fb65 100644 --- a/sdk/metric/controller/push/push_test.go +++ b/sdk/metric/controller/push/push_test.go @@ -36,6 +36,7 @@ type testBatcher struct { t *testing.T checkpointSet *test.CheckpointSet checkpoints int + finishes int } type testExporter struct { @@ -77,7 +78,8 @@ func (b *testBatcher) CheckpointSet() export.CheckpointSet { return b.checkpointSet } -func (*testBatcher) FinishedCollection() { +func (b *testBatcher) FinishedCollection() { + b.finishes++ } func (b *testBatcher) Process(_ context.Context, record export.Record) error { @@ -127,6 +129,7 @@ func TestPushTicker(t *testing.T) { counter.Add(ctx, 3, meter.Labels()) require.Equal(t, 0, fix.batcher.checkpoints) + require.Equal(t, 0, fix.batcher.finishes) require.Equal(t, 0, fix.exporter.exports) require.Equal(t, 0, len(fix.exporter.records)) @@ -135,6 +138,7 @@ func TestPushTicker(t *testing.T) { require.Equal(t, 1, fix.batcher.checkpoints) require.Equal(t, 1, fix.exporter.exports) + require.Equal(t, 1, fix.batcher.finishes) require.Equal(t, 1, len(fix.exporter.records)) require.Equal(t, "counter", fix.exporter.records[0].Descriptor().Name()) @@ -151,6 +155,7 @@ func TestPushTicker(t *testing.T) { runtime.Gosched() require.Equal(t, 2, fix.batcher.checkpoints) + require.Equal(t, 2, fix.batcher.finishes) require.Equal(t, 2, fix.exporter.exports) require.Equal(t, 1, len(fix.exporter.records)) require.Equal(t, "counter", fix.exporter.records[0].Descriptor().Name()) @@ -192,5 +197,4 @@ func TestPushExportError(t *testing.T) { p.Stop() } -// TODO add a test that FinishedCollection() is callled // TODO remove the clock import from push.go From f67b47c495cd50f3df2ffbb7540a91b2c3fba107 Mon Sep 17 00:00:00 2001 From: jmacd Date: Thu, 14 Nov 2019 23:37:47 -0800 Subject: [PATCH 72/73] Make benbjohnson/clock a test-only dependency --- sdk/export/metric/metrickind_string.go | 25 ++++++++++++ sdk/metric/controller/push/push.go | 51 +++++++++++++++++++++---- sdk/metric/controller/push/push_test.go | 37 ++++++++++++++++-- 3 files changed, 102 insertions(+), 11 deletions(-) create mode 100644 sdk/export/metric/metrickind_string.go diff --git a/sdk/export/metric/metrickind_string.go b/sdk/export/metric/metrickind_string.go new file mode 100644 index 00000000000..94ccadc5928 --- /dev/null +++ b/sdk/export/metric/metrickind_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=MetricKind"; DO NOT EDIT. + +package export + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[CounterKind-0] + _ = x[GaugeKind-1] + _ = x[MeasureKind-2] +} + +const _MetricKind_name = "CounterKindGaugeKindMeasureKind" + +var _MetricKind_index = [...]uint8{0, 11, 20, 31} + +func (i MetricKind) String() string { + if i < 0 || i >= MetricKind(len(_MetricKind_index)-1) { + return "MetricKind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _MetricKind_name[_MetricKind_index[i]:_MetricKind_index[i+1]] +} diff --git a/sdk/metric/controller/push/push.go b/sdk/metric/controller/push/push.go index 5e098cab1eb..fcb586875ad 100644 --- a/sdk/metric/controller/push/push.go +++ b/sdk/metric/controller/push/push.go @@ -19,8 +19,6 @@ import ( "sync" "time" - "github.com/benbjohnson/clock" - "go.opentelemetry.io/otel/api/metric" export "go.opentelemetry.io/otel/sdk/export/metric" sdk "go.opentelemetry.io/otel/sdk/metric" @@ -36,12 +34,35 @@ type Controller struct { wg sync.WaitGroup ch chan struct{} period time.Duration - ticker *clock.Ticker - clock clock.Clock + ticker Ticker + clock Clock } var _ metric.Provider = &Controller{} +// Several types below are created to match "github.com/benbjohnson/clock" +// so that it remains a test-only dependency. + +type Clock interface { + Now() time.Time + Ticker(time.Duration) Ticker +} + +type Ticker interface { + Stop() + C() <-chan time.Time +} + +type realClock struct { +} + +type realTicker struct { + ticker *time.Ticker +} + +var _ Clock = realClock{} +var _ Ticker = realTicker{} + // New constructs a Controller, an implementation of metric.Provider, // using the provided batcher, exporter, and collection period to // configure an SDK with periodic collection. The batcher itself is @@ -65,13 +86,13 @@ func New(batcher export.Batcher, exporter export.Exporter, period time.Duration) exporter: exporter, ch: make(chan struct{}), period: period, - clock: clock.New(), + clock: realClock{}, } } // SetClock supports setting a mock clock for testing. This must be // called before Start(). -func (c *Controller) SetClock(clock clock.Clock) { +func (c *Controller) SetClock(clock Clock) { c.lock.Lock() defer c.lock.Unlock() c.clock = clock @@ -129,7 +150,7 @@ func (c *Controller) run(ch chan struct{}) { case <-ch: c.wg.Done() return - case <-c.ticker.C: + case <-c.ticker.C(): c.tick() } } @@ -147,3 +168,19 @@ func (c *Controller) tick() { c.errorHandler(err) } } + +func (realClock) Now() time.Time { + return time.Now() +} + +func (realClock) Ticker(period time.Duration) Ticker { + return realTicker{time.NewTicker(period)} +} + +func (t realTicker) Stop() { + t.ticker.Stop() +} + +func (t realTicker) C() <-chan time.Time { + return t.ticker.C +} diff --git a/sdk/metric/controller/push/push_test.go b/sdk/metric/controller/push/push_test.go index 47095f4fb65..0894ad616e3 100644 --- a/sdk/metric/controller/push/push_test.go +++ b/sdk/metric/controller/push/push_test.go @@ -52,6 +52,17 @@ type testFixture struct { exporter *testExporter } +type mockClock struct { + mock *clock.Mock +} + +type mockTicker struct { + ticker *clock.Ticker +} + +var _ push.Clock = mockClock{} +var _ push.Ticker = mockTicker{} + func newFixture(t *testing.T) testFixture { checkpointSet := test.NewCheckpointSet(sdk.DefaultLabelEncoder()) @@ -95,6 +106,26 @@ func (e *testExporter) Export(_ context.Context, checkpointSet export.Checkpoint return e.retErr } +func (c mockClock) Now() time.Time { + return c.mock.Now() +} + +func (c mockClock) Ticker(period time.Duration) push.Ticker { + return mockTicker{c.mock.Ticker(period)} +} + +func (c mockClock) Add(d time.Duration) { + c.mock.Add(d) +} + +func (t mockTicker) Stop() { + t.ticker.Stop() +} + +func (t mockTicker) C() <-chan time.Time { + return t.ticker.C +} + func TestPushDoubleStop(t *testing.T) { fix := newFixture(t) p := push.New(fix.batcher, fix.exporter, time.Second) @@ -117,7 +148,7 @@ func TestPushTicker(t *testing.T) { p := push.New(fix.batcher, fix.exporter, time.Second) meter := p.GetMeter("name") - mock := clock.NewMock() + mock := mockClock{clock.NewMock()} p.SetClock(mock) ctx := context.Background() @@ -178,7 +209,7 @@ func TestPushExportError(t *testing.T) { err = sdkErr }) - mock := clock.NewMock() + mock := mockClock{clock.NewMock()} p.SetClock(mock) p.Start() @@ -196,5 +227,3 @@ func TestPushExportError(t *testing.T) { p.Stop() } - -// TODO remove the clock import from push.go From 046db4a7c62587c3cbd6cb0dc595924aaf01e3ad Mon Sep 17 00:00:00 2001 From: jmacd Date: Fri, 15 Nov 2019 00:03:17 -0800 Subject: [PATCH 73/73] Handle ErrNoLastValue in stress_test --- sdk/metric/stress_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sdk/metric/stress_test.go b/sdk/metric/stress_test.go index ac55a5a375d..5fb62b33e4c 100644 --- a/sdk/metric/stress_test.go +++ b/sdk/metric/stress_test.go @@ -265,14 +265,14 @@ func (f *testFixture) Process(_ context.Context, record export.Record) error { counter := agg.(aggregator.Sum) sum, err := counter.Sum() if err != nil { - panic("Impossible") + f.T.Fatal("Sum error: ", err) } f.impl.storeCollect(actual, sum, time.Time{}) case export.GaugeKind: gauge := agg.(aggregator.LastValue) lv, ts, err := gauge.LastValue() - if err != nil { - panic("Impossible") + if err != nil && err != aggregator.ErrNoLastValue { + f.T.Fatal("Last value error: ", err) } f.impl.storeCollect(actual, lv, ts) default: