diff --git a/exporter/prometheusremotewriteexporter/exporter.go b/exporter/prometheusremotewriteexporter/exporter.go index c9b57eb3bb2..c74f63dcc80 100644 --- a/exporter/prometheusremotewriteexporter/exporter.go +++ b/exporter/prometheusremotewriteexporter/exporter.go @@ -20,16 +20,18 @@ import ( "bytes" "context" "errors" + "fmt" "io" "net/http" "net/url" - "strings" + "strconv" "sync" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/prometheus/prompb" + "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/consumer/pdatautil" otlp "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1old" @@ -87,7 +89,7 @@ func (prwe *prwExporter) pushMetrics(ctx context.Context, md pdata.Metrics) (int default: tsMap := map[string]*prompb.TimeSeries{} dropped := 0 - errs := []string{} + errs := []error{} resourceMetrics := dataold.MetricDataToOtlp(pdatautil.MetricsToOldInternalMetrics(md)) for _, resourceMetric := range resourceMetrics { @@ -107,7 +109,7 @@ func (prwe *prwExporter) pushMetrics(ctx context.Context, md pdata.Metrics) (int // check for valid type and temporality combination if ok := validateMetrics(metric.MetricDescriptor); !ok { dropped++ - errs = append(errs, "invalid temporality and type combination") + errs = append(errs, errors.New("invalid temporality and type combination")) continue } // handle individual metric based on type @@ -116,19 +118,28 @@ func (prwe *prwExporter) pushMetrics(ctx context.Context, md pdata.Metrics) (int otlp.MetricDescriptor_MONOTONIC_DOUBLE, otlp.MetricDescriptor_DOUBLE: if err := prwe.handleScalarMetric(tsMap, metric); err != nil { dropped++ - errs = append(errs, err.Error()) + errs = append(errs, err) } + case otlp.MetricDescriptor_HISTOGRAM: + if err := prwe.handleHistogramMetric(tsMap, metric); err != nil { + dropped++ + errs = append(errs, err) + } + default: + dropped++ + errs = append(errs, errors.New("unsupported metric type")) } } } } if err := prwe.export(ctx, tsMap); err != nil { - return pdatautil.MetricCount(md), err + dropped = pdatautil.MetricCount(md) + errs = append(errs, err) } if dropped != 0 { - return dropped, errors.New(strings.Join(errs, "\n")) + return dropped, componenterror.CombineErrors(errs) } return 0, nil @@ -146,11 +157,10 @@ func (prwe *prwExporter) handleScalarMetric(tsMap map[string]*prompb.TimeSeries, // int points case otlp.MetricDescriptor_MONOTONIC_INT64, otlp.MetricDescriptor_INT64: if metric.Int64DataPoints == nil { - return errors.New("nil data point field in metric" + metric.GetMetricDescriptor().Name) + return fmt.Errorf("nil data point field in metric %s", metric.GetMetricDescriptor().Name) } for _, pt := range metric.Int64DataPoints { - // create parameters for addSample name := getPromMetricName(metric.GetMetricDescriptor(), prwe.namespace) labels := createLabelSet(pt.GetLabels(), nameStr, name) @@ -167,7 +177,7 @@ func (prwe *prwExporter) handleScalarMetric(tsMap map[string]*prompb.TimeSeries, // double points case otlp.MetricDescriptor_MONOTONIC_DOUBLE, otlp.MetricDescriptor_DOUBLE: if metric.DoubleDataPoints == nil { - return errors.New("nil data point field in metric" + metric.GetMetricDescriptor().Name) + return fmt.Errorf("nil data point field in metric %s", metric.GetMetricDescriptor().Name) } for _, pt := range metric.DoubleDataPoints { @@ -183,10 +193,70 @@ func (prwe *prwExporter) handleScalarMetric(tsMap map[string]*prompb.TimeSeries, } return nil } - return errors.New("invalid metric type: wants int or double data points") } +// handleHistogramMetric processes data points in a single OTLP histogram metric by mapping the sum, count and each +// bucket of every data point as a Sample, and adding each Sample to its corresponding TimeSeries. +// tsMap and metric cannot be nil. +func (prwe *prwExporter) handleHistogramMetric(tsMap map[string]*prompb.TimeSeries, metric *otlp.Metric) error { + + if metric.HistogramDataPoints == nil { + return fmt.Errorf("nil data point field in metric %s", metric.GetMetricDescriptor().Name) + } + + for _, pt := range metric.HistogramDataPoints { + if pt == nil { + continue + } + time := convertTimeStamp(pt.TimeUnixNano) + mType := metric.GetMetricDescriptor().GetType() + + // sum, count, and buckets of the histogram should append suffix to baseName + baseName := getPromMetricName(metric.GetMetricDescriptor(), prwe.namespace) + + // treat sum as a sample in an individual TimeSeries + sum := &prompb.Sample{ + Value: pt.GetSum(), + Timestamp: time, + } + sumlabels := createLabelSet(pt.GetLabels(), nameStr, baseName+sumStr) + addSample(tsMap, sum, sumlabels, mType) + + // treat count as a sample in an individual TimeSeries + count := &prompb.Sample{ + Value: float64(pt.GetCount()), + Timestamp: time, + } + countlabels := createLabelSet(pt.GetLabels(), nameStr, baseName+countStr) + addSample(tsMap, count, countlabels, mType) + + // count for +Inf bound + var totalCount uint64 + + // process each bucket + for le, bk := range pt.GetBuckets() { + bucket := &prompb.Sample{ + Value: float64(bk.Count), + Timestamp: time, + } + boundStr := strconv.FormatFloat(pt.GetExplicitBounds()[le], 'f', -1, 64) + labels := createLabelSet(pt.GetLabels(), nameStr, baseName+bucketStr, leStr, boundStr) + addSample(tsMap, bucket, labels, mType) + + totalCount += bk.GetCount() + } + // add le=+Inf bucket + infBucket := &prompb.Sample{ + Value: float64(totalCount), + Timestamp: time, + } + infLabels := createLabelSet(pt.GetLabels(), nameStr, baseName+bucketStr, leStr, pInfStr) + addSample(tsMap, infBucket, infLabels, mType) + } + return nil +} + // export sends a Snappy-compressed WriteRequest containing TimeSeries to a remote write endpoint in order func (prwe *prwExporter) export(ctx context.Context, tsMap map[string]*prompb.TimeSeries) error { //Calls the helper function to convert the TsMap to the desired format diff --git a/exporter/prometheusremotewriteexporter/exporter_test.go b/exporter/prometheusremotewriteexporter/exporter_test.go index 6a15745dd89..c5533cfd7c9 100644 --- a/exporter/prometheusremotewriteexporter/exporter_test.go +++ b/exporter/prometheusremotewriteexporter/exporter_test.go @@ -16,11 +16,11 @@ package prometheusremotewriteexporter import ( "context" - "fmt" "io/ioutil" "net/http" "net/http/httptest" "net/url" + "strconv" "sync" "testing" @@ -142,7 +142,114 @@ func Test_handleScalarMetric(t *testing.T) { } } -// Test_newPrwExporter checks that a new exporter instance with non-nil fields is initialized +// Test_handleHistogramMetric checks whether data points(sum, count, buckets) within a single Histogram metric can be +// added to a map of TimeSeries correctly. +// Test cases are a histogram data point with two buckets and nil data points case. +func Test_handleHistogramMetric(t *testing.T) { + sum := "sum" + count := "count" + bucket1 := "bucket1" + bucket2 := "bucket2" + bucketInf := "bucketInf" + histPoint := getHistogramDataPoint( + lbs1, + time1, + floatVal2, + uint64(intVal2), []float64{floatVal1, floatVal2}, + []uint64{uint64(intVal1), uint64(intVal1)}) + + // string signature of the data point is the key of the map + sigs := map[string]string{ + sum: typeHistogram + "-" + nameStr + "-" + name1 + "_sum" + lb1Sig, + count: typeHistogram + "-" + nameStr + "-" + name1 + "_count" + lb1Sig, + bucket1: typeHistogram + "-" + nameStr + "-" + name1 + "_bucket" + "-" + "le-" + + strconv.FormatFloat(floatVal1, 'f', -1, 64) + lb1Sig, + bucket2: typeHistogram + "-" + nameStr + "-" + name1 + "_bucket" + "-" + "le-" + + strconv.FormatFloat(floatVal2, 'f', -1, 64) + lb1Sig, + bucketInf: typeHistogram + "-" + nameStr + "-" + name1 + "_bucket" + "-" + "le-" + + "+Inf" + lb1Sig, + } + labels := map[string][]prompb.Label{ + sum: append(promLbs1, getPromLabels(nameStr, name1+"_sum")...), + count: append(promLbs1, getPromLabels(nameStr, name1+"_count")...), + bucket1: append(promLbs1, getPromLabels(nameStr, name1+"_bucket", "le", + strconv.FormatFloat(floatVal1, 'f', -1, 64))...), + bucket2: append(promLbs1, getPromLabels(nameStr, name1+"_bucket", "le", + strconv.FormatFloat(floatVal2, 'f', -1, 64))...), + bucketInf: append(promLbs1, getPromLabels(nameStr, name1+"_bucket", "le", + "+Inf")...), + } + tests := []struct { + name string + m otlp.Metric + returnError bool + want map[string]*prompb.TimeSeries + }{ + { + "invalid_type_array", + otlp.Metric{ + MetricDescriptor: getDescriptor("invalid_type_array", histogramComb, validCombinations), + Int64DataPoints: nil, + DoubleDataPoints: nil, + HistogramDataPoints: nil, + SummaryDataPoints: nil, + }, + true, + map[string]*prompb.TimeSeries{}, + }, + { + "hist_nil_pt", + otlp.Metric{ + MetricDescriptor: getDescriptor("hist_nil_pt", histogramComb, validCombinations), + Int64DataPoints: nil, + DoubleDataPoints: nil, + HistogramDataPoints: []*otlp.HistogramDataPoint{nil}, + SummaryDataPoints: nil, + }, + false, + map[string]*prompb.TimeSeries{}, + }, + { + "single_histogram_point", + otlp.Metric{ + MetricDescriptor: getDescriptor(name1+"", histogramComb, validCombinations), + Int64DataPoints: nil, + DoubleDataPoints: nil, + HistogramDataPoints: []*otlp.HistogramDataPoint{histPoint}, + SummaryDataPoints: nil, + }, + false, + map[string]*prompb.TimeSeries{ + sigs[sum]: getTimeSeries(labels[sum], getSample(floatVal2, msTime1)), + sigs[count]: getTimeSeries(labels[count], getSample(float64(intVal2), msTime1)), + sigs[bucket1]: getTimeSeries(labels[bucket1], getSample(float64(intVal1), msTime1)), + sigs[bucket2]: getTimeSeries(labels[bucket2], getSample(float64(intVal1), msTime1)), + sigs[bucketInf]: getTimeSeries(labels[bucketInf], getSample(float64(intVal2), msTime1)), + }, + }, + } + + // run tests + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tsMap := map[string]*prompb.TimeSeries{} + prw := &prwExporter{} + ok := prw.handleHistogramMetric(tsMap, &tt.m) + if tt.returnError { + assert.Error(t, ok) + return + } + assert.Exactly(t, len(tt.want), len(tsMap)) + for k, v := range tsMap { + require.NotNil(t, tt.want[k], k) + assert.ElementsMatch(t, tt.want[k].Labels, v.Labels) + assert.ElementsMatch(t, tt.want[k].Samples, v.Samples) + } + }) + } +} + +// Test_ newPrwExporter checks that a new exporter instance with non-nil fields is initialized func Test_newPrwExporter(t *testing.T) { config := &Config{ ExporterSettings: configmodels.ExporterSettings{}, @@ -263,7 +370,6 @@ func Test_export(t *testing.T) { require.NotNil(t, writeReq.GetTimeseries()) assert.Equal(t, *ts1, writeReq.GetTimeseries()[0]) w.WriteHeader(code) - fmt.Fprintf(w, "error message") } // Create in test table format to check if different HTTP response codes or server errors @@ -299,7 +405,9 @@ func Test_export(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - handleFunc(w, r, tt.httpResponseCode) + if handleFunc != nil { + handleFunc(w, r, tt.httpResponseCode) + } })) defer server.Close() serverURL, uErr := url.Parse(server.URL) @@ -335,38 +443,65 @@ func runExportPipeline(t *testing.T, ts *prompb.TimeSeries, endpoint *url.URL) e // Test_pushMetrics checks the number of TimeSeries received by server and the number of metrics dropped is the same as // expected func Test_pushMetrics(t *testing.T) { - // fail cases - noTempBatch := pdatautil.MetricsFromOldInternalMetrics(testdataold.GenerateMetricDataManyMetricsSameResource(10)) - invalidTypeBatch := pdatautil.MetricsFromOldInternalMetrics(testdataold.GenerateMetricDataMetricTypeInvalid()) - invalidTemp := testdataold.GenerateMetricDataManyMetricsSameResource(10) - setTemporality(&invalidTemp, otlp.MetricDescriptor_INVALID_TEMPORALITY) - invalidTempBatch := pdatautil.MetricsFromOldInternalMetrics(invalidTemp) + noTempBatch := pdatautil.MetricsFromOldInternalMetrics((testdataold.GenerateMetricDataManyMetricsSameResource(10))) + invalidTypeBatch := pdatautil.MetricsFromOldInternalMetrics((testdataold.GenerateMetricDataMetricTypeInvalid())) + nilDescBatch := pdatautil.MetricsFromOldInternalMetrics((testdataold.GenerateMetricDataNilMetricDescriptor())) + + // 10 counter metrics, 2 points in each. Two TimeSeries in total + batch := testdataold.GenerateMetricDataManyMetricsSameResource(10) + setCumulative(&batch) + scalarBatch := pdatautil.MetricsFromOldInternalMetrics((batch)) - nilDescBatch := pdatautil.MetricsFromOldInternalMetrics(testdataold.GenerateMetricDataNilMetricDescriptor()) nilBatch1 := testdataold.GenerateMetricDataManyMetricsSameResource(10) nilBatch2 := testdataold.GenerateMetricDataManyMetricsSameResource(10) + nilBatch3 := testdataold.GenerateMetricDataManyMetricsSameResource(10) + nilBatch4 := testdataold.GenerateMetricDataManyMetricsSameResource(10) + nilBatch5 := testdataold.GenerateMetricDataOneEmptyResourceMetrics() + nilBatch6 := testdataold.GenerateMetricDataOneEmptyInstrumentationLibrary() + nilBatch7 := testdataold.GenerateMetricDataOneMetric() + + nilResource := dataold.MetricDataToOtlp(nilBatch5) + nilResource[0] = nil + nilResourceBatch := pdatautil.MetricsFromOldInternalMetrics(dataold.MetricDataFromOtlp(nilResource)) + + nilInstrumentation := dataold.MetricDataToOtlp(nilBatch6) + nilInstrumentation[0].InstrumentationLibraryMetrics[0] = nil + nilInstrumentationBatch := pdatautil.MetricsFromOldInternalMetrics(dataold.MetricDataFromOtlp(nilInstrumentation)) + + nilMetric := dataold.MetricDataToOtlp(nilBatch7) + nilMetric[0].InstrumentationLibraryMetrics[0].Metrics[0] = nil + nilMetricBatch := pdatautil.MetricsFromOldInternalMetrics(dataold.MetricDataFromOtlp(nilMetric)) + + setCumulative(&nilBatch1) + setCumulative(&nilBatch2) + setCumulative(&nilBatch3) + setCumulative(&nilBatch4) - setTemporality(&nilBatch1, otlp.MetricDescriptor_CUMULATIVE) - setTemporality(&nilBatch2, otlp.MetricDescriptor_CUMULATIVE) setDataPointToNil(&nilBatch1, typeMonotonicInt64) setType(&nilBatch2, typeMonotonicDouble) + setType(&nilBatch3, typeHistogram) + setType(&nilBatch4, typeSummary) - nilIntDataPointsBatch := pdatautil.MetricsFromOldInternalMetrics(nilBatch1) - nilDoubleDataPointsBatch := pdatautil.MetricsFromOldInternalMetrics(nilBatch2) - - // Success cases: 10 counter metrics, 2 points in each. Two TimeSeries in total - batch1 := testdataold.GenerateMetricDataManyMetricsSameResource(10) - setTemporality(&batch1, otlp.MetricDescriptor_CUMULATIVE) - scalarBatch := pdatautil.MetricsFromOldInternalMetrics(batch1) + nilIntDataPointsBatch := pdatautil.MetricsFromOldInternalMetrics((nilBatch1)) + nilDoubleDataPointsBatch := pdatautil.MetricsFromOldInternalMetrics((nilBatch2)) + nilHistogramDataPointsBatch := pdatautil.MetricsFromOldInternalMetrics((nilBatch3)) - // Partial Success cases - batch2 := testdataold.GenerateMetricDataManyMetricsSameResource(10) - setTemporality(&batch2, otlp.MetricDescriptor_CUMULATIVE) - failDesc := dataold.MetricDataToOtlp(batch2)[0].InstrumentationLibraryMetrics[0].Metrics[0].GetMetricDescriptor() - failDesc.Temporality = otlp.MetricDescriptor_INVALID_TEMPORALITY - partialBatch := pdatautil.MetricsFromOldInternalMetrics(batch2) + hist := dataold.MetricDataToOtlp(testdataold.GenerateMetricDataOneMetric()) + hist[0].InstrumentationLibraryMetrics[0].Metrics[0] = &otlp.Metric{ + MetricDescriptor: getDescriptor("hist_test", histogramComb, validCombinations), + HistogramDataPoints: []*otlp.HistogramDataPoint{getHistogramDataPoint( + lbs1, + time1, + floatVal1, + uint64(intVal1), + []float64{floatVal1}, + []uint64{uint64(intVal1)}, + ), + }, + } + histBatch := pdatautil.MetricsFromOldInternalMetrics((dataold.MetricDataFromOtlp(hist))) checkFunc := func(t *testing.T, r *http.Request, expected int) { body, err := ioutil.ReadAll(r.Body) if err != nil { @@ -385,6 +520,13 @@ func Test_pushMetrics(t *testing.T) { assert.EqualValues(t, expected, len(wr.Timeseries)) } + summary := dataold.MetricDataToOtlp(testdataold.GenerateMetricDataOneMetric()) + summary[0].InstrumentationLibraryMetrics[0].Metrics[0] = &otlp.Metric{ + MetricDescriptor: getDescriptor("summary_test", summaryComb, validCombinations), + SummaryDataPoints: []*otlp.SummaryDataPoint{}, + } + summaryBatch := pdatautil.MetricsFromOldInternalMetrics(dataold.MetricDataFromOtlp(summary)) + tests := []struct { name string md *pdata.Metrics @@ -404,21 +546,39 @@ func Test_pushMetrics(t *testing.T) { true, }, { - "invalid_temporality_case", - &invalidTempBatch, + "nil_desc_case", + &nilDescBatch, nil, 0, http.StatusAccepted, - pdatautil.MetricCount(invalidTempBatch), + pdatautil.MetricCount(nilDescBatch), true, }, { - "nil_desc_case", - &nilDescBatch, + "nil_resourece_case", + &nilResourceBatch, nil, 0, http.StatusAccepted, - pdatautil.MetricCount(nilDescBatch), + pdatautil.MetricCount(nilResourceBatch), + false, + }, + { + "nil_instrumentation_case", + &nilInstrumentationBatch, + nil, + 0, + http.StatusAccepted, + pdatautil.MetricCount(nilInstrumentationBatch), + false, + }, + { + "nil_metric_case", + &nilMetricBatch, + nil, + 0, + http.StatusAccepted, + pdatautil.MetricCount(nilMetricBatch), true, }, { @@ -439,6 +599,24 @@ func Test_pushMetrics(t *testing.T) { pdatautil.MetricCount(nilDoubleDataPointsBatch), true, }, + { + "nil_histogram_point_case", + &nilHistogramDataPointsBatch, + nil, + 0, + http.StatusAccepted, + pdatautil.MetricCount(nilHistogramDataPointsBatch), + true, + }, + { + "nil_histogram_point_case", + &nilHistogramDataPointsBatch, + nil, + 0, + http.StatusAccepted, + pdatautil.MetricCount(nilHistogramDataPointsBatch), + true, + }, { "no_temp_case", &noTempBatch, @@ -466,13 +644,20 @@ func Test_pushMetrics(t *testing.T) { 0, false, }, - { - "partial_success_case", - &partialBatch, + {"histogram_case", + &histBatch, checkFunc, - 2, + 4, + http.StatusAccepted, + 0, + false, + }, + {"summary_case", + &summaryBatch, + checkFunc, + 0, http.StatusAccepted, - 1, + pdatautil.MetricCount(summaryBatch), true, }, } diff --git a/exporter/prometheusremotewriteexporter/helper.go b/exporter/prometheusremotewriteexporter/helper.go index a11b04b2624..a08e5c91f7b 100644 --- a/exporter/prometheusremotewriteexporter/helper.go +++ b/exporter/prometheusremotewriteexporter/helper.go @@ -30,6 +30,11 @@ import ( const ( nameStr = "__name__" + sumStr = "_sum" + countStr = "_count" + bucketStr = "_bucket" + leStr = "le" + pInfStr = "+Inf" totalStr = "total" delimeter = "_" keyStr = "key" diff --git a/exporter/prometheusremotewriteexporter/testutil_test.go b/exporter/prometheusremotewriteexporter/testutil_test.go index be18b56553c..f545b495c35 100644 --- a/exporter/prometheusremotewriteexporter/testutil_test.go +++ b/exporter/prometheusremotewriteexporter/testutil_test.go @@ -156,6 +156,25 @@ func getDoubleDataPoint(labels []*commonpb.StringKeyValue, value float64, ts uin } } +func getHistogramDataPoint(labels []*commonpb.StringKeyValue, ts uint64, sum float64, count uint64, bounds []float64, buckets []uint64) *otlp.HistogramDataPoint { + bks := []*otlp.HistogramDataPoint_Bucket{} + for _, c := range buckets { + bks = append(bks, &otlp.HistogramDataPoint_Bucket{ + Count: c, + Exemplar: nil, + }) + } + return &otlp.HistogramDataPoint{ + Labels: labels, + StartTimeUnixNano: 0, + TimeUnixNano: ts, + Count: count, + Sum: sum, + Buckets: bks, + ExplicitBounds: bounds, + } +} + // Prometheus TimeSeries func getPromLabels(lbs ...string) []prompb.Label { pbLbs := prompb.Labels{ @@ -188,12 +207,11 @@ func getTimeSeries(labels []prompb.Label, samples ...prompb.Sample) *prompb.Time } } -//setCumulative is for creating the dataold.MetricData to test with -func setTemporality(metricsData *dataold.MetricData, temp otlp.MetricDescriptor_Temporality) { +func setCumulative(metricsData *dataold.MetricData) { for _, r := range dataold.MetricDataToOtlp(*metricsData) { for _, instMetrics := range r.InstrumentationLibraryMetrics { for _, m := range instMetrics.Metrics { - m.MetricDescriptor.Temporality = temp + m.MetricDescriptor.Temporality = otlp.MetricDescriptor_CUMULATIVE } } } diff --git a/go.mod b/go.mod index 7b972792cd1..ae2a5ba4e72 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,6 @@ require ( github.com/orijtech/prometheus-go-metrics-exporter v0.0.5 github.com/ory/go-acc v0.2.5 github.com/pavius/impi v0.0.3 - github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.7.1 github.com/prometheus/common v0.11.1 github.com/prometheus/prometheus v1.8.2-0.20200626085723-c448ada63d83 diff --git a/service/defaultcomponents/defaults.go b/service/defaultcomponents/defaults.go index 6a90913e6c5..93c509c1ecb 100644 --- a/service/defaultcomponents/defaults.go +++ b/service/defaultcomponents/defaults.go @@ -25,6 +25,7 @@ import ( "go.opentelemetry.io/collector/exporter/opencensusexporter" "go.opentelemetry.io/collector/exporter/otlpexporter" "go.opentelemetry.io/collector/exporter/prometheusexporter" + "go.opentelemetry.io/collector/exporter/prometheusremotewriteexporter" "go.opentelemetry.io/collector/exporter/zipkinexporter" "go.opentelemetry.io/collector/extension/fluentbitextension" "go.opentelemetry.io/collector/extension/healthcheckextension" @@ -85,6 +86,7 @@ func Components() ( exporters, err := component.MakeExporterFactoryMap( opencensusexporter.NewFactory(), prometheusexporter.NewFactory(), + prometheusremotewriteexporter.NewFactory(), loggingexporter.NewFactory(), zipkinexporter.NewFactory(), jaegerexporter.NewFactory(), diff --git a/service/defaultcomponents/defaults_test.go b/service/defaultcomponents/defaults_test.go index 65af9b0e6f7..ab4b0e96d48 100644 --- a/service/defaultcomponents/defaults_test.go +++ b/service/defaultcomponents/defaults_test.go @@ -57,6 +57,7 @@ func TestDefaultComponents(t *testing.T) { expectedExporters := []configmodels.Type{ "opencensus", "prometheus", + "prometheusremotewrite", "logging", "zipkin", "jaeger",