From f1b6b1a8c034fc118484ddb6e9ee9836983f4a78 Mon Sep 17 00:00:00 2001 From: Ying Date: Thu, 15 Oct 2020 07:33:09 -0700 Subject: [PATCH 1/8] fix xray trace/span id transfer (#1264) * fix xray trace/span id transfer * add length limit on xray spanid --- .../internal/translator/parentspanid.go | 34 --------- .../internal/translator/translator.go | 74 +++++++++++++++++-- .../internal/translator/translator_test.go | 48 +++++++++++- 3 files changed, 114 insertions(+), 42 deletions(-) delete mode 100644 receiver/awsxrayreceiver/internal/translator/parentspanid.go diff --git a/receiver/awsxrayreceiver/internal/translator/parentspanid.go b/receiver/awsxrayreceiver/internal/translator/parentspanid.go deleted file mode 100644 index 128cac159185..000000000000 --- a/receiver/awsxrayreceiver/internal/translator/parentspanid.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package translator - -import ( - "go.opentelemetry.io/collector/consumer/pdata" - - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/awsxray" -) - -func addParentSpanID(seg *awsxray.Segment, parentID *string, span *pdata.Span) { - if parentID != nil { - // `seg` is an embedded subsegment. Please refer to: - // https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-subsegments - // for the difference between an embedded and an independent subsegment. - span.SetParentSpanID(pdata.NewSpanID([]byte(*parentID))) - } else if seg.ParentID != nil { - // `seg` is an independent subsegment - span.SetParentSpanID(pdata.NewSpanID([]byte(*seg.ParentID))) - } - // else: `seg` is the root segment with no parent segment. -} diff --git a/receiver/awsxrayreceiver/internal/translator/translator.go b/receiver/awsxrayreceiver/internal/translator/translator.go index 8e72e7c8a621..f18ad8223d2e 100644 --- a/receiver/awsxrayreceiver/internal/translator/translator.go +++ b/receiver/awsxrayreceiver/internal/translator/translator.go @@ -15,7 +15,9 @@ package translator import ( + "encoding/hex" "encoding/json" + "errors" "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/translator/conventions" @@ -134,16 +136,51 @@ func populateSpan( return err } + // decode trace id + var traceIDBytes []byte if seg.TraceID == nil { // if seg.TraceID is nil, then `seg` must be an embedded subsegment. - span.SetTraceID(pdata.NewTraceID([]byte(*traceID))) + traceIDBytes, err = decodeXRayTraceID(traceID) + if err != nil { + return err + } + } else { - span.SetTraceID(pdata.NewTraceID([]byte(*seg.TraceID))) + traceIDBytes, err = decodeXRayTraceID(seg.TraceID) + if err != nil { + return err + } + } - span.SetSpanID(pdata.NewSpanID([]byte(*seg.ID))) - addParentSpanID(seg, parentID, span) - addStartTime(seg.StartTime, span) + // decode parent id + var parentIDBytes []byte + if parentID != nil { + parentIDBytes, err = decodeXRaySpanID(parentID) + if err != nil { + return err + } + } else if seg.ParentID != nil { + parentIDBytes, err = decodeXRaySpanID(seg.ParentID) + if err != nil { + return err + } + } + + // decode span id + spanIDBytes, err := decodeXRaySpanID(seg.ID) + if err != nil { + return err + } + + span.SetTraceID(pdata.NewTraceID(traceIDBytes)) + span.SetSpanID(pdata.NewSpanID(spanIDBytes)) + + if parentIDBytes != nil { + span.SetParentSpanID(pdata.NewSpanID(parentIDBytes)) + } + + addStartTime(seg.StartTime, span) addEndTime(seg.EndTime, span) addBool(seg.InProgress, awsxray.AWSXRayInProgressAttribute, &attrs) addString(seg.User, conventions.AttributeEnduserID, &attrs) @@ -189,3 +226,30 @@ func totalSegmentsCount(seg awsxray.Segment) int { return 1 + subsegmentCount } + +/* +decodeXRayTraceID decodes the traceid from xraysdk +one example of xray format: "1-5f84c7a1-e7d1852db8c4fd35d88bf49a" +decodeXRayTraceID transfers it to "5f84c7a1e7d1852db8c4fd35d88bf49a" and decode it from hex +*/ +func decodeXRayTraceID(traceID *string) ([]byte, error) { + if traceID == nil { + return nil, errors.New("traceID is null") + } + if len(*traceID) < 35 { + return nil, errors.New("traceID length is wrong") + } + traceIDtoBeDecoded := (*traceID)[2:10] + (*traceID)[11:] + return hex.DecodeString(traceIDtoBeDecoded) +} + +// decodeXRaySpanID decodes the spanid from xraysdk +func decodeXRaySpanID(spanID *string) ([]byte, error) { + if spanID == nil { + return nil, errors.New("spanid is null") + } + if len(*spanID) != 16 { + return nil, errors.New("spanID length is wrong") + } + return hex.DecodeString(*spanID) +} diff --git a/receiver/awsxrayreceiver/internal/translator/translator_test.go b/receiver/awsxrayreceiver/internal/translator/translator_test.go index b05df5180b62..9722e6837e02 100644 --- a/receiver/awsxrayreceiver/internal/translator/translator_test.go +++ b/receiver/awsxrayreceiver/internal/translator/translator_test.go @@ -953,9 +953,11 @@ func initResourceSpans(expectedSeg *awsxray.Segment, for i, props := range propsPerSpan { sp := ls.Spans().At(i) - sp.SetSpanID(pdata.NewSpanID([]byte(props.spanID))) + spanIDBytes, _ := decodeXRaySpanID(&props.spanID) + sp.SetSpanID(pdata.NewSpanID(spanIDBytes)) if props.parentSpanID != nil { - sp.SetParentSpanID(pdata.NewSpanID([]byte(*props.parentSpanID))) + parentIDBytes, _ := decodeXRaySpanID(props.parentSpanID) + sp.SetParentSpanID(pdata.NewSpanID(parentIDBytes)) } sp.SetName(props.name) sp.SetStartTime(pdata.TimestampUnixNano(props.startTimeSec * float64(time.Second))) @@ -963,7 +965,8 @@ func initResourceSpans(expectedSeg *awsxray.Segment, sp.SetEndTime(pdata.TimestampUnixNano(*props.endTimeSec * float64(time.Second))) } sp.SetKind(props.spanKind) - sp.SetTraceID(pdata.NewTraceID([]byte(props.traceID))) + traceIDBytes, _ := decodeXRayTraceID(&props.traceID) + sp.SetTraceID(pdata.NewTraceID(traceIDBytes)) sp.Status().InitEmpty() sp.Status().SetMessage(props.spanStatus.message) sp.Status().SetCode(props.spanStatus.code) @@ -1060,3 +1063,42 @@ func compare2ResourceSpans(t *testing.T, testCase string, exp, act *pdata.Resour assert.Equal(t, exp, act, testCase+": actual ResourceSpans differ from the expected") } + +func TestDecodeXRayTraceID(t *testing.T) { + // normal + traceID := "1-5f84c7a1-e7d1852db8c4fd35d88bf49a" + traceIDBytes, err := decodeXRayTraceID(&traceID) + expectedTraceIDBytes := []byte("\x5f\x84\xc7\xa1\xe7\xd1\x85\x2d\xb8\xc4\xfd\x35\xd8\x8b\xf4\x9a") + if assert.NoError(t, err) { + assert.Equal(t, traceIDBytes, expectedTraceIDBytes) + } + + // invalid format + traceID = "1-5f84c7a1-e7d1852db" + _, err = decodeXRayTraceID(&traceID) + assert.Error(t, err) + + // null point + _, err = decodeXRayTraceID(nil) + assert.Error(t, err) +} + +func TestDecodeXRaySpanID(t *testing.T) { + // normal + spanID := "defdfd9912dc5a56" + spanIDBytes, err := decodeXRaySpanID(&spanID) + expectedSpanIDBytes := []byte("\xde\xfd\xfd\x99\x12\xdc\x5a\x56") + if assert.NoError(t, err) { + assert.Equal(t, spanIDBytes, expectedSpanIDBytes) + } + + // invalid format + spanID = "12345566" + _, err = decodeXRaySpanID(&spanID) + assert.Error(t, err) + + // null point + _, err = decodeXRaySpanID(nil) + assert.Error(t, err) + +} From c36d7a036365c75c0be29198319a34f41a907a99 Mon Sep 17 00:00:00 2001 From: Akash Suresh Date: Thu, 15 Oct 2020 10:33:45 -0400 Subject: [PATCH 2/8] sfxexporter: Update disk.summary_utilization translation rule to accomodate new labels (#1258) Update translation rules to mean over "mode" and "mountpoint" which are recently added labels to the filesystems scraper in hostmetrics receiver. This will ensure utilization is not double counted across mountpoints of a single device. Also, add "type" (also a newly added label) to existing translation rules to sum across fs types as well apart from device name. --- exporter/signalfxexporter/factory_test.go | 2 +- .../signalfxexporter/translation/constants.go | 16 +++++++++++++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/exporter/signalfxexporter/factory_test.go b/exporter/signalfxexporter/factory_test.go index 8d3e731f2892..eef8377c4b94 100644 --- a/exporter/signalfxexporter/factory_test.go +++ b/exporter/signalfxexporter/factory_test.go @@ -180,7 +180,7 @@ func TestCreateMetricsExporterWithDefaultTranslaitonRules(t *testing.T) { // Validate that default translation rules are loaded // Expected values has to be updated once default config changed - assert.Equal(t, 49, len(config.TranslationRules)) + assert.Equal(t, 51, len(config.TranslationRules)) assert.Equal(t, translation.ActionRenameDimensionKeys, config.TranslationRules[0].Action) assert.Equal(t, 33, len(config.TranslationRules[0].Mapping)) } diff --git a/exporter/signalfxexporter/translation/constants.go b/exporter/signalfxexporter/translation/constants.go index 5fe9e8cf7ca0..2347fc1464e6 100644 --- a/exporter/signalfxexporter/translation/constants.go +++ b/exporter/signalfxexporter/translation/constants.go @@ -376,12 +376,19 @@ translation_rules: - action: copy_metrics mapping: system.filesystem.usage: disk.summary_total +- action: aggregate_metric + metric_name: disk.summary_total + aggregation_method: avg + without_dimensions: + - mode + - mountpoint - action: aggregate_metric metric_name: disk.summary_total aggregation_method: sum without_dimensions: - state - device + - type # convert filesystem metrics - action: split_metric @@ -401,12 +408,19 @@ translation_rules: # df_complex.used_total - action: copy_metrics mapping: - df_complex.used: df_complex.used_total + df_complex.used: df_complex.used_total +- action: aggregate_metric + metric_name: df_complex.used_total + aggregation_method: avg + without_dimensions: + - mode + - mountpoint - action: aggregate_metric metric_name: df_complex.used_total aggregation_method: sum without_dimensions: - device + - type # disk utilization - action: calculate_new_metric From 5c5ca6e037d7dddcdc00e1665ae6c0e4edd4f3a8 Mon Sep 17 00:00:00 2001 From: Eric Mustin Date: Thu, 15 Oct 2020 18:43:39 +0200 Subject: [PATCH 3/8] exporters/datadog: remove trace support for windows for now (#1274) This PR removes windows support for trace export for the datadog exporter for windows as it relies on packages which use cgo/gcc, which are causing the `windows-test` build step to fail https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/1228. It should fix the failing build on master, cc @tigrannajaryan @mx-psi @james-bebbington --- exporter/datadogexporter/README.md | 4 ++++ exporter/datadogexporter/model.go | 2 ++ exporter/datadogexporter/translate_traces.go | 2 ++ exporter/datadogexporter/translate_traces_test.go | 2 ++ 4 files changed, 10 insertions(+) diff --git a/exporter/datadogexporter/README.md b/exporter/datadogexporter/README.md index fb0d10627777..3e420ed2da9c 100644 --- a/exporter/datadogexporter/README.md +++ b/exporter/datadogexporter/README.md @@ -22,3 +22,7 @@ datadog: The hostname, environment, service and version can be set in the configuration for unified service tagging. See the sample configuration file under the `example` folder for other available options. + +## Trace Export Configuration + +_Note: Trace Export is not supported on windows at the moment_ \ No newline at end of file diff --git a/exporter/datadogexporter/model.go b/exporter/datadogexporter/model.go index 9db89010475e..63c0088deffa 100644 --- a/exporter/datadogexporter/model.go +++ b/exporter/datadogexporter/model.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// +build !windows + package datadogexporter import ( diff --git a/exporter/datadogexporter/translate_traces.go b/exporter/datadogexporter/translate_traces.go index da3eaf2978ad..9d454d3ea511 100644 --- a/exporter/datadogexporter/translate_traces.go +++ b/exporter/datadogexporter/translate_traces.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// +build !windows + package datadogexporter import ( diff --git a/exporter/datadogexporter/translate_traces_test.go b/exporter/datadogexporter/translate_traces_test.go index 6cae786a2dd1..3558f93fd1e5 100644 --- a/exporter/datadogexporter/translate_traces_test.go +++ b/exporter/datadogexporter/translate_traces_test.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// +build !windows + package datadogexporter import ( From 9349e4c1b64e68fee8d04f2a21fa3e13032ff781 Mon Sep 17 00:00:00 2001 From: Antoine Toulme Date: Thu, 15 Oct 2020 10:51:59 -0700 Subject: [PATCH 4/8] Support receiving logs with splunkhec receiver (#1268) Adds the ability for the splunkhec receiver to ingest logs. --- exporter/splunkhecexporter/client.go | 6 +- exporter/splunkhecexporter/client_test.go | 6 +- exporter/splunkhecexporter/exporter.go | 4 +- exporter/splunkhecexporter/exporter_test.go | 4 +- .../splunkhecexporter/logdata_to_splunk.go | 10 +- .../logdata_to_splunk_test.go | 46 +- .../splunkhecexporter/metricdata_to_splunk.go | 12 +- .../metricdata_to_splunk_test.go | 20 +- .../splunkhecexporter/tracedata_to_splunk.go | 18 +- .../tracedata_to_splunk_test.go | 12 +- internal/common/splunk/common.go | 24 +- internal/common/splunk/common_test.go | 44 +- receiver/splunkhecreceiver/config.go | 5 + receiver/splunkhecreceiver/factory.go | 22 +- receiver/splunkhecreceiver/factory_test.go | 24 + receiver/splunkhecreceiver/go.mod | 6 + receiver/splunkhecreceiver/receiver.go | 303 +++++++++++ receiver/splunkhecreceiver/receiver_test.go | 484 ++++++++++++++++++ .../splunkhecreceiver/splunk_to_logdata.go | 149 ++++++ .../splunk_to_logdata_test.go | 221 ++++++++ 20 files changed, 1335 insertions(+), 85 deletions(-) create mode 100644 receiver/splunkhecreceiver/receiver.go create mode 100644 receiver/splunkhecreceiver/receiver_test.go create mode 100644 receiver/splunkhecreceiver/splunk_to_logdata.go create mode 100644 receiver/splunkhecreceiver/splunk_to_logdata_test.go diff --git a/exporter/splunkhecexporter/client.go b/exporter/splunkhecexporter/client.go index 7e54a3e60297..88eb2203f5f7 100644 --- a/exporter/splunkhecexporter/client.go +++ b/exporter/splunkhecexporter/client.go @@ -118,7 +118,7 @@ func (c *client) pushTraceData( return numDroppedSpans, nil } -func (c *client) sendSplunkEvents(ctx context.Context, splunkEvents []*splunkEvent) error { +func (c *client) sendSplunkEvents(ctx context.Context, splunkEvents []*splunk.Event) error { body, compressed, err := encodeBodyEvents(&c.zippers, splunkEvents, c.config.DisableCompression) if err != nil { return consumererror.Permanent(err) @@ -173,7 +173,7 @@ func (c *client) pushLogData(ctx context.Context, ld pdata.Logs) (numDroppedLogs return numDroppedLogs, nil } -func encodeBodyEvents(zippers *sync.Pool, evs []*splunkEvent, disableCompression bool) (bodyReader io.Reader, compressed bool, err error) { +func encodeBodyEvents(zippers *sync.Pool, evs []*splunk.Event, disableCompression bool) (bodyReader io.Reader, compressed bool, err error) { buf := new(bytes.Buffer) encoder := json.NewEncoder(buf) for _, e := range evs { @@ -186,7 +186,7 @@ func encodeBodyEvents(zippers *sync.Pool, evs []*splunkEvent, disableCompression return getReader(zippers, buf, disableCompression) } -func encodeBody(zippers *sync.Pool, dps []*splunk.Metric, disableCompression bool) (bodyReader io.Reader, compressed bool, err error) { +func encodeBody(zippers *sync.Pool, dps []*splunk.Event, disableCompression bool) (bodyReader io.Reader, compressed bool, err error) { buf := new(bytes.Buffer) encoder := json.NewEncoder(buf) for _, e := range dps { diff --git a/exporter/splunkhecexporter/client_test.go b/exporter/splunkhecexporter/client_test.go index fa69e3648950..2dd608d903bd 100644 --- a/exporter/splunkhecexporter/client_test.go +++ b/exporter/splunkhecexporter/client_test.go @@ -409,7 +409,7 @@ func TestInvalidJson(t *testing.T) { syncPool := sync.Pool{New: func() interface{} { return gzip.NewWriter(nil) }} - evs := []*splunkEvent{ + evs := []*splunk.Event{ { Event: badEvent, }, @@ -429,7 +429,7 @@ func TestInvalidJsonClient(t *testing.T) { badEvent := badJSON{ Foo: math.Inf(1), } - evs := []*splunkEvent{ + evs := []*splunk.Event{ { Event: badEvent, }, @@ -454,6 +454,6 @@ func TestInvalidURLClient(t *testing.T) { }}, config: &Config{}, } - err := c.sendSplunkEvents(context.Background(), []*splunkEvent{}) + err := c.sendSplunkEvents(context.Background(), []*splunk.Event{}) assert.EqualError(t, err, "Permanent error: parse \"//in%20va%20lid\": invalid URL escape \"%20\"") } diff --git a/exporter/splunkhecexporter/exporter.go b/exporter/splunkhecexporter/exporter.go index cadbb69878d3..c9536f7fdd10 100644 --- a/exporter/splunkhecexporter/exporter.go +++ b/exporter/splunkhecexporter/exporter.go @@ -29,6 +29,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/pdata" "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/splunk" ) const ( @@ -105,7 +107,7 @@ func buildClient(options *exporterOptions, config *Config, logger *zap.Logger) * "Connection": "keep-alive", "Content-Type": "application/json", "User-Agent": "OpenTelemetry-Collector Splunk Exporter/v0.0.1", - "Authorization": "Splunk " + config.Token, + "Authorization": splunk.SplunkHECTokenHeader + " " + config.Token, }, config: config, } diff --git a/exporter/splunkhecexporter/exporter_test.go b/exporter/splunkhecexporter/exporter_test.go index 1f1bc36f6255..566677651e37 100644 --- a/exporter/splunkhecexporter/exporter_test.go +++ b/exporter/splunkhecexporter/exporter_test.go @@ -98,7 +98,7 @@ func TestConsumeMetricsData(t *testing.T) { t.Fatal("Small batch should not be compressed") } firstPayload := strings.Split(string(body), "\n\r\n\r")[0] - var metric splunk.Metric + var metric splunk.Event err = json.Unmarshal([]byte(firstPayload), &metric) if err != nil { t.Fatal(err) @@ -249,7 +249,7 @@ func TestConsumeLogsData(t *testing.T) { t.Fatal("Small batch should not be compressed") } firstPayload := strings.Split(string(body), "\n\r\n\r")[0] - var event splunkEvent + var event splunk.Event err = json.Unmarshal([]byte(firstPayload), &event) if err != nil { t.Fatal(err) diff --git a/exporter/splunkhecexporter/logdata_to_splunk.go b/exporter/splunkhecexporter/logdata_to_splunk.go index 5310e1143c73..a9358d5197c4 100644 --- a/exporter/splunkhecexporter/logdata_to_splunk.go +++ b/exporter/splunkhecexporter/logdata_to_splunk.go @@ -24,9 +24,9 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/splunk" ) -func logDataToSplunk(logger *zap.Logger, ld pdata.Logs, config *Config) ([]*splunkEvent, int) { +func logDataToSplunk(logger *zap.Logger, ld pdata.Logs, config *Config) ([]*splunk.Event, int) { numDroppedLogs := 0 - splunkEvents := make([]*splunkEvent, 0) + splunkEvents := make([]*splunk.Event, 0) rls := ld.ResourceLogs() for i := 0; i < rls.Len(); i++ { rl := rls.At(i) @@ -60,14 +60,14 @@ func logDataToSplunk(logger *zap.Logger, ld pdata.Logs, config *Config) ([]*splu return splunkEvents, numDroppedLogs } -func mapLogRecordToSplunkEvent(lr pdata.LogRecord, config *Config, logger *zap.Logger) *splunkEvent { +func mapLogRecordToSplunkEvent(lr pdata.LogRecord, config *Config, logger *zap.Logger) *splunk.Event { if lr.Body().IsNil() { return nil } var host string var source string var sourcetype string - fields := map[string]string{} + fields := map[string]interface{}{} lr.Attributes().ForEach(func(k string, v pdata.AttributeValue) { if v.Type() != pdata.AttributeValueSTRING { logger.Debug("Failed to convert log record attribute value to Splunk property value, value is not a string", zap.String("key", k)) @@ -97,7 +97,7 @@ func mapLogRecordToSplunkEvent(lr pdata.LogRecord, config *Config, logger *zap.L } eventValue := convertAttributeValue(lr.Body(), logger) - return &splunkEvent{ + return &splunk.Event{ Time: nanoTimestampToEpochMilliseconds(lr.Timestamp()), Host: host, Source: source, diff --git a/exporter/splunkhecexporter/logdata_to_splunk_test.go b/exporter/splunkhecexporter/logdata_to_splunk_test.go index b1c932e43c06..11818670febf 100644 --- a/exporter/splunkhecexporter/logdata_to_splunk_test.go +++ b/exporter/splunkhecexporter/logdata_to_splunk_test.go @@ -34,7 +34,7 @@ func Test_logDataToSplunk(t *testing.T) { name string logDataFn func() pdata.Logs configDataFn func() *Config - wantSplunkEvents []*splunkEvent + wantSplunkEvents []*splunk.Event wantNumDroppedLogs int }{ { @@ -56,8 +56,8 @@ func Test_logDataToSplunk(t *testing.T) { SourceType: "sourcetype", } }, - wantSplunkEvents: []*splunkEvent{ - commonLogSplunkEvent("mylog", ts, map[string]string{"custom": "custom"}, "myhost", "myapp", "myapp-type"), + wantSplunkEvents: []*splunk.Event{ + commonLogSplunkEvent("mylog", ts, map[string]interface{}{"custom": "custom"}, "myhost", "myapp", "myapp-type"), }, wantNumDroppedLogs: 0, }, @@ -80,8 +80,8 @@ func Test_logDataToSplunk(t *testing.T) { SourceType: "sourcetype", } }, - wantSplunkEvents: []*splunkEvent{ - commonLogSplunkEvent("mylog", ts, map[string]string{}, "myhost", "myapp", "myapp-type"), + wantSplunkEvents: []*splunk.Event{ + commonLogSplunkEvent("mylog", ts, map[string]interface{}{}, "myhost", "myapp", "myapp-type"), }, wantNumDroppedLogs: 0, }, @@ -101,8 +101,8 @@ func Test_logDataToSplunk(t *testing.T) { SourceType: "sourcetype", } }, - wantSplunkEvents: []*splunkEvent{ - commonLogSplunkEvent("mylog", ts, map[string]string{"custom": "custom"}, "unknown", "source", "sourcetype"), + wantSplunkEvents: []*splunk.Event{ + commonLogSplunkEvent("mylog", ts, map[string]interface{}{"custom": "custom"}, "unknown", "source", "sourcetype"), }, wantNumDroppedLogs: 0, }, @@ -119,7 +119,7 @@ func Test_logDataToSplunk(t *testing.T) { SourceType: "sourcetype", } }, - wantSplunkEvents: []*splunkEvent{}, + wantSplunkEvents: []*splunk.Event{}, wantNumDroppedLogs: 1, }, { @@ -141,8 +141,8 @@ func Test_logDataToSplunk(t *testing.T) { SourceType: "sourcetype", } }, - wantSplunkEvents: []*splunkEvent{ - commonLogSplunkEvent(float64(42), ts, map[string]string{"custom": "custom"}, "myhost", "myapp", "myapp-type"), + wantSplunkEvents: []*splunk.Event{ + commonLogSplunkEvent(float64(42), ts, map[string]interface{}{"custom": "custom"}, "myhost", "myapp", "myapp-type"), }, wantNumDroppedLogs: 0, }, @@ -165,8 +165,8 @@ func Test_logDataToSplunk(t *testing.T) { SourceType: "sourcetype", } }, - wantSplunkEvents: []*splunkEvent{ - commonLogSplunkEvent(int64(42), ts, map[string]string{"custom": "custom"}, "myhost", "myapp", "myapp-type"), + wantSplunkEvents: []*splunk.Event{ + commonLogSplunkEvent(int64(42), ts, map[string]interface{}{"custom": "custom"}, "myhost", "myapp", "myapp-type"), }, wantNumDroppedLogs: 0, }, @@ -189,8 +189,8 @@ func Test_logDataToSplunk(t *testing.T) { SourceType: "sourcetype", } }, - wantSplunkEvents: []*splunkEvent{ - commonLogSplunkEvent(true, ts, map[string]string{"custom": "custom"}, "myhost", "myapp", "myapp-type"), + wantSplunkEvents: []*splunk.Event{ + commonLogSplunkEvent(true, ts, map[string]interface{}{"custom": "custom"}, "myhost", "myapp", "myapp-type"), }, wantNumDroppedLogs: 0, }, @@ -216,8 +216,8 @@ func Test_logDataToSplunk(t *testing.T) { SourceType: "sourcetype", } }, - wantSplunkEvents: []*splunkEvent{ - commonLogSplunkEvent(map[string]interface{}{"23": float64(45), "foo": "bar"}, ts, map[string]string{"custom": "custom"}, "myhost", "myapp", "myapp-type"), + wantSplunkEvents: []*splunk.Event{ + commonLogSplunkEvent(map[string]interface{}{"23": float64(45), "foo": "bar"}, ts, map[string]interface{}{"custom": "custom"}, "myhost", "myapp", "myapp-type"), }, wantNumDroppedLogs: 0, }, @@ -240,8 +240,8 @@ func Test_logDataToSplunk(t *testing.T) { SourceType: "sourcetype", } }, - wantSplunkEvents: []*splunkEvent{ - commonLogSplunkEvent(nil, ts, map[string]string{"custom": "custom"}, "myhost", "myapp", "myapp-type"), + wantSplunkEvents: []*splunk.Event{ + commonLogSplunkEvent(nil, ts, map[string]interface{}{"custom": "custom"}, "myhost", "myapp", "myapp-type"), }, wantNumDroppedLogs: 0, }, @@ -266,8 +266,8 @@ func Test_logDataToSplunk(t *testing.T) { SourceType: "sourcetype", } }, - wantSplunkEvents: []*splunkEvent{ - commonLogSplunkEvent([]interface{}{"foo"}, ts, map[string]string{"custom": "custom"}, "myhost", "myapp", "myapp-type"), + wantSplunkEvents: []*splunk.Event{ + commonLogSplunkEvent([]interface{}{"foo"}, ts, map[string]interface{}{"custom": "custom"}, "myhost", "myapp", "myapp-type"), }, wantNumDroppedLogs: 0, }, @@ -300,12 +300,12 @@ func makeLog(record pdata.LogRecord) pdata.Logs { func commonLogSplunkEvent( event interface{}, ts pdata.TimestampUnixNano, - fields map[string]string, + fields map[string]interface{}, host string, source string, sourcetype string, -) *splunkEvent { - return &splunkEvent{ +) *splunk.Event { + return &splunk.Event{ Time: nanoTimestampToEpochMilliseconds(ts), Host: host, Event: event, diff --git a/exporter/splunkhecexporter/metricdata_to_splunk.go b/exporter/splunkhecexporter/metricdata_to_splunk.go index 93a8197e5fb5..6c8f91548359 100644 --- a/exporter/splunkhecexporter/metricdata_to_splunk.go +++ b/exporter/splunkhecexporter/metricdata_to_splunk.go @@ -31,10 +31,6 @@ import ( ) const ( - // hecEventMetricType is the type of HEC event. Set to metric, as per https://docs.splunk.com/Documentation/Splunk/8.0.3/Metrics/GetMetricsInOther. - hecEventMetricType = "metric" - // hostnameLabel is the hostname label name. - // unknownHostName is the default host name when no hostname label is passed. unknownHostName = "unknown" // separator for metric values. @@ -56,11 +52,11 @@ var ( infinityBoundSFxDimValue = float64ToDimValue(math.Inf(1)) ) -func metricDataToSplunk(logger *zap.Logger, data pdata.Metrics, config *Config) ([]*splunk.Metric, int, error) { +func metricDataToSplunk(logger *zap.Logger, data pdata.Metrics, config *Config) ([]*splunk.Event, int, error) { ocmds := internaldata.MetricsToOC(data) numDroppedTimeSeries := 0 _, numPoints := data.MetricAndDataPointCount() - splunkMetrics := make([]*splunk.Metric, 0, numPoints) + splunkMetrics := make([]*splunk.Event, 0, numPoints) for _, ocmd := range ocmds { var host string if ocmd.Resource != nil { @@ -99,13 +95,13 @@ func metricDataToSplunk(logger *zap.Logger, data pdata.Metrics, config *Config) for i, desc := range metric.MetricDescriptor.GetLabelKeys() { fields[desc.Key] = timeSeries.LabelValues[i].Value } - sm := &splunk.Metric{ + sm := &splunk.Event{ Time: timestampToEpochMilliseconds(tsPoint.GetTimestamp()), Host: host, Source: config.Source, SourceType: config.SourceType, Index: config.Index, - Event: hecEventMetricType, + Event: splunk.HecEventMetricType, Fields: fields, } splunkMetrics = append(splunkMetrics, sm) diff --git a/exporter/splunkhecexporter/metricdata_to_splunk_test.go b/exporter/splunkhecexporter/metricdata_to_splunk_test.go index c5374c5d451a..c2436f4f3798 100644 --- a/exporter/splunkhecexporter/metricdata_to_splunk_test.go +++ b/exporter/splunkhecexporter/metricdata_to_splunk_test.go @@ -63,7 +63,7 @@ func Test_metricDataToSplunk(t *testing.T) { tests := []struct { name string metricsDataFn func() consumerdata.MetricsData - wantSplunkMetrics []*splunk.Metric + wantSplunkMetrics []*splunk.Event wantNumDroppedTimeseries int }{ { @@ -78,7 +78,7 @@ func Test_metricDataToSplunk(t *testing.T) { }, } }, - wantSplunkMetrics: []*splunk.Metric{ + wantSplunkMetrics: []*splunk.Event{ commonSplunkMetric("gauge_double_with_dims", tsMSecs, []string{}, []string{}, doubleVal), commonSplunkMetric("gauge_int_with_dims", tsMSecs, []string{}, []string{}, int64Val), commonSplunkMetric("cumulative_double_with_dims", tsMSecs, []string{}, []string{}, doubleVal), @@ -97,7 +97,7 @@ func Test_metricDataToSplunk(t *testing.T) { }, } }, - wantSplunkMetrics: []*splunk.Metric{ + wantSplunkMetrics: []*splunk.Event{ commonSplunkMetric("gauge_double_with_dims", tsMSecs, keys, values, doubleVal), commonSplunkMetric("gauge_int_with_dims", tsMSecs, keys, values, int64Val), commonSplunkMetric("cumulative_double_with_dims", tsMSecs, keys, values, doubleVal), @@ -126,7 +126,7 @@ func Test_metricDataToSplunk(t *testing.T) { }, } }, - wantSplunkMetrics: []*splunk.Metric{ + wantSplunkMetrics: []*splunk.Event{ commonSplunkMetric( "gauge_double_with_dims", tsMSecs, @@ -173,7 +173,7 @@ func Test_metricDataToSplunk(t *testing.T) { } } -func sortMetrics(metrics []*splunk.Metric) { +func sortMetrics(metrics []*splunk.Event) { sort.Slice(metrics, func(p, q int) bool { firstField := getFieldValue(metrics[p]) secondField := getFieldValue(metrics[q]) @@ -181,7 +181,7 @@ func sortMetrics(metrics []*splunk.Metric) { }) } -func getFieldValue(metric *splunk.Metric) string { +func getFieldValue(metric *splunk.Event) string { for k := range metric.Fields { if strings.HasPrefix(k, "metric_name:") { return k @@ -196,14 +196,14 @@ func commonSplunkMetric( keys []string, values []string, val interface{}, -) *splunk.Metric { +) *splunk.Event { fields := map[string]interface{}{fmt.Sprintf("metric_name:%s", metricName): val} for i, k := range keys { fields[k] = values[i] } - return &splunk.Metric{ + return &splunk.Event{ Time: ts, Host: "unknown", Event: "metric", @@ -217,12 +217,12 @@ func expectedFromDistribution( keys []string, values []string, distributionTimeSeries *metricspb.TimeSeries, -) []*splunk.Metric { +) []*splunk.Event { distributionValue := distributionTimeSeries.Points[0].GetDistributionValue() // Three additional data points: one for count, one for sum and one for sum of squared deviation. const extraDataPoints = 3 - dps := make([]*splunk.Metric, 0, len(distributionValue.Buckets)+extraDataPoints) + dps := make([]*splunk.Event, 0, len(distributionValue.Buckets)+extraDataPoints) dps = append(dps, commonSplunkMetric(metricName, ts, keys, values, diff --git a/exporter/splunkhecexporter/tracedata_to_splunk.go b/exporter/splunkhecexporter/tracedata_to_splunk.go index 251deb5a26d7..10ea0b38ccdd 100644 --- a/exporter/splunkhecexporter/tracedata_to_splunk.go +++ b/exporter/splunkhecexporter/tracedata_to_splunk.go @@ -19,22 +19,14 @@ import ( "go.opentelemetry.io/collector/translator/conventions" "go.opentelemetry.io/collector/translator/internaldata" "go.uber.org/zap" -) -type splunkEvent struct { - Time float64 `json:"time"` // epoch time - Host string `json:"host"` // hostname - Source string `json:"source,omitempty"` // optional description of the source of the event; typically the app's name - SourceType string `json:"sourcetype,omitempty"` // optional name of a Splunk parsing configuration; this is usually inferred by Splunk - Index string `json:"index,omitempty"` // optional name of the Splunk index to store the event in; not required if the token has a default index set in Splunk - Event interface{} `json:"event"` // Payload of the event. - Fields map[string]string `json:"fields,omitempty"` // Fields of the event. -} + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/splunk" +) -func traceDataToSplunk(logger *zap.Logger, data pdata.Traces, config *Config) ([]*splunkEvent, int) { +func traceDataToSplunk(logger *zap.Logger, data pdata.Traces, config *Config) ([]*splunk.Event, int) { octds := internaldata.TraceDataToOC(data) numDroppedSpans := 0 - splunkEvents := make([]*splunkEvent, 0, data.SpanCount()) + splunkEvents := make([]*splunk.Event, 0, data.SpanCount()) for _, octd := range octds { var host string if octd.Resource != nil { @@ -51,7 +43,7 @@ func traceDataToSplunk(logger *zap.Logger, data pdata.Traces, config *Config) ([ numDroppedSpans++ continue } - se := &splunkEvent{ + se := &splunk.Event{ Time: timestampToEpochMilliseconds(span.StartTime), Host: host, Source: config.Source, diff --git a/exporter/splunkhecexporter/tracedata_to_splunk_test.go b/exporter/splunkhecexporter/tracedata_to_splunk_test.go index 1b73a0d67124..ad66fcaac382 100644 --- a/exporter/splunkhecexporter/tracedata_to_splunk_test.go +++ b/exporter/splunkhecexporter/tracedata_to_splunk_test.go @@ -24,6 +24,8 @@ import ( "go.opentelemetry.io/collector/translator/internaldata" "go.uber.org/zap" "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/splunk" ) func Test_traceDataToSplunk(t *testing.T) { @@ -35,7 +37,7 @@ func Test_traceDataToSplunk(t *testing.T) { tests := []struct { name string traceDataFn func() consumerdata.TraceData - wantSplunkEvents []*splunkEvent + wantSplunkEvents []*splunk.Event wantNumDroppedSpans int }{ { @@ -47,7 +49,7 @@ func Test_traceDataToSplunk(t *testing.T) { }, } }, - wantSplunkEvents: []*splunkEvent{ + wantSplunkEvents: []*splunk.Event{ commonSplunkEvent("myspan", ts), }, wantNumDroppedSpans: 0, @@ -61,7 +63,7 @@ func Test_traceDataToSplunk(t *testing.T) { }, } }, - wantSplunkEvents: []*splunkEvent{}, + wantSplunkEvents: []*splunk.Event{}, wantNumDroppedSpans: 1, }, } @@ -91,12 +93,12 @@ func makeSpan(name string, ts *timestamppb.Timestamp) *v1.Span { func commonSplunkEvent( name string, ts *timestamppb.Timestamp, -) *splunkEvent { +) *splunk.Event { trunceableName := &v1.TruncatableString{ Value: name, } span := v1.Span{Name: trunceableName, StartTime: ts} - return &splunkEvent{ + return &splunk.Event{ Time: timestampToEpochMilliseconds(ts), Host: "unknown", Event: &span, diff --git a/internal/common/splunk/common.go b/internal/common/splunk/common.go index 1fe74b6f3ac4..c7b45bd6e32f 100644 --- a/internal/common/splunk/common.go +++ b/internal/common/splunk/common.go @@ -22,26 +22,36 @@ const ( SFxEventCategoryKey = "com.splunk.signalfx.event_category" SFxEventPropertiesKey = "com.splunk.signalfx.event_properties" SourcetypeLabel = "com.splunk.sourcetype" + SplunkHECTokenHeader = "Splunk" + SplunkHecTokenLabel = "com.splunk.hec.access_token" + // HecEventMetricType is the type of HEC event. Set to metric, as per https://docs.splunk.com/Documentation/Splunk/8.0.3/Metrics/GetMetricsInOther. + HecEventMetricType = "metric" ) +// AccessTokenPassthroughConfig configures passing through access tokens. type AccessTokenPassthroughConfig struct { - // Whether to associate datapoints with an organization access token received in request. + // AccessTokenPassthrough indicates whether to associate datapoints with an organization access token received in request. AccessTokenPassthrough bool `mapstructure:"access_token_passthrough"` } -// Metric represents a metric in Splunk HEC format -type Metric struct { +// Event represents a metric in Splunk HEC format +type Event struct { Time float64 `json:"time"` // epoch time Host string `json:"host"` // hostname Source string `json:"source,omitempty"` // optional description of the source of the event; typically the app's name SourceType string `json:"sourcetype,omitempty"` // optional name of a Splunk parsing configuration; this is usually inferred by Splunk Index string `json:"index,omitempty"` // optional name of the Splunk index to store the event in; not required if the token has a default index set in Splunk - Event string `json:"event"` // type of event: this is a metric. - Fields map[string]interface{} `json:"fields"` // metric data + Event interface{} `json:"event"` // type of event: set to "metric" or nil if the event represents a metric, or is the payload of the event. + Fields map[string]interface{} `json:"fields,omitempty"` // dimensions and metric data } -// GetValues extracts metric key value pairs from a Splunk HEC metric. -func (m Metric) GetValues() map[string]interface{} { +// IsMetric returns true if the Splunk event is a metric. +func (m Event) IsMetric() bool { + return m.Event == HecEventMetricType || (m.Event == nil && len(m.GetMetricValues()) > 0) +} + +// GetMetricValues extracts metric key value pairs from a Splunk HEC metric. +func (m Event) GetMetricValues() map[string]interface{} { values := map[string]interface{}{} for k, v := range m.Fields { if strings.HasPrefix(k, "metric_name:") { diff --git a/internal/common/splunk/common_test.go b/internal/common/splunk/common_test.go index dae92dbbbd60..6a90e2c8dce4 100644 --- a/internal/common/splunk/common_test.go +++ b/internal/common/splunk/common_test.go @@ -21,12 +21,48 @@ import ( ) func TestGetValues(t *testing.T) { - metric := Metric{ + metric := Event{ Fields: map[string]interface{}{}, } - assert.Equal(t, map[string]interface{}{}, metric.GetValues()) + assert.Equal(t, map[string]interface{}{}, metric.GetMetricValues()) metric.Fields["metric_name:foo"] = "bar" - assert.Equal(t, map[string]interface{}{"foo": "bar"}, metric.GetValues()) + assert.Equal(t, map[string]interface{}{"foo": "bar"}, metric.GetMetricValues()) metric.Fields["metric_name:foo2"] = "foobar" - assert.Equal(t, map[string]interface{}{"foo": "bar", "foo2": "foobar"}, metric.GetValues()) + assert.Equal(t, map[string]interface{}{"foo": "bar", "foo2": "foobar"}, metric.GetMetricValues()) +} + +func TestIsMetric(t *testing.T) { + ev := Event{ + Event: map[string]interface{}{}, + } + assert.False(t, ev.IsMetric()) + metric := Event{ + Event: "metric", + } + assert.True(t, metric.IsMetric()) + arr := Event{ + Event: []interface{}{"foo", "bar"}, + } + assert.False(t, arr.IsMetric()) + yo := Event{ + Event: "yo", + } + assert.False(t, yo.IsMetric()) +} + +func TestIsMetric_WithoutEventField(t *testing.T) { + fieldsOnly := Event{ + Fields: map[string]interface{}{ + "foo": "bar", + }, + } + assert.False(t, fieldsOnly.IsMetric()) + fieldsWithMetrics := Event{ + Fields: map[string]interface{}{ + "foo": "bar", + "metric_name:foo": 123, + "foobar": "foobar", + }, + } + assert.True(t, fieldsWithMetrics.IsMetric()) } diff --git a/receiver/splunkhecreceiver/config.go b/receiver/splunkhecreceiver/config.go index e5ec6abb8a11..2c4d04f1bd3a 100644 --- a/receiver/splunkhecreceiver/config.go +++ b/receiver/splunkhecreceiver/config.go @@ -21,6 +21,11 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/splunk" ) +const ( + // hecPath is the default HEC path on the Splunk instance. + hecPath = "/services/collector" +) + // Config defines configuration for the SignalFx receiver. type Config struct { configmodels.ReceiverSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct diff --git a/receiver/splunkhecreceiver/factory.go b/receiver/splunkhecreceiver/factory.go index 22c38879057f..6b72b211bab8 100644 --- a/receiver/splunkhecreceiver/factory.go +++ b/receiver/splunkhecreceiver/factory.go @@ -45,7 +45,9 @@ func NewFactory() component.ReceiverFactory { return receiverhelper.NewFactory( typeStr, createDefaultConfig, - receiverhelper.WithMetrics(createMetricsReceiver)) + receiverhelper.WithMetrics(createMetricsReceiver), + receiverhelper.WithTraces(createTraceReceiver), + receiverhelper.WithLogs(createLogsReceiver)) } // CreateDefaultConfig creates the default configuration for Splunk HEC receiver. @@ -106,3 +108,21 @@ func createMetricsReceiver( return nil, configerror.ErrDataTypeIsNotSupported } + +// createLogsReceiver creates a logs receiver based on provided config. +func createLogsReceiver( + _ context.Context, + params component.ReceiverCreateParams, + cfg configmodels.Receiver, + consumer consumer.LogsConsumer, +) (component.LogsReceiver, error) { + + rCfg := cfg.(*Config) + + err := rCfg.validate() + if err != nil { + return nil, err + } + + return New(params.Logger, *rCfg, consumer) +} diff --git a/receiver/splunkhecreceiver/factory_test.go b/receiver/splunkhecreceiver/factory_test.go index 6be9357ead7c..3adf792b0430 100644 --- a/receiver/splunkhecreceiver/factory_test.go +++ b/receiver/splunkhecreceiver/factory_test.go @@ -37,6 +37,11 @@ func TestCreateReceiver(t *testing.T) { cfg := createDefaultConfig().(*Config) cfg.Endpoint = "localhost:1" // Endpoint is required, not going to be used here. + mockLogsConsumer := exportertest.NewNopLogsExporter() + lReceiver, err := createLogsReceiver(context.Background(), component.ReceiverCreateParams{Logger: zap.NewNop()}, cfg, mockLogsConsumer) + assert.Nil(t, err, "receiver creation failed") + assert.NotNil(t, lReceiver, "receiver creation failed") + mockMetricsConsumer := exportertest.NewNopMetricsExporter() mReceiver, err := createMetricsReceiver(context.Background(), component.ReceiverCreateParams{Logger: zap.NewNop()}, cfg, mockMetricsConsumer) assert.Equal(t, err, configerror.ErrDataTypeIsNotSupported) @@ -87,3 +92,22 @@ func TestValidateBadEndpoint(t *testing.T) { err := config.validate() assert.EqualError(t, err, "endpoint port is not a number: strconv.ParseInt: parsing \"abr\": invalid syntax") } + +func TestCreateNilNextConsumer(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Endpoint = "localhost:1" + + mReceiver, err := createLogsReceiver(context.Background(), component.ReceiverCreateParams{Logger: zap.NewNop()}, cfg, nil) + assert.EqualError(t, err, "nil metricsConsumer") + assert.Nil(t, mReceiver, "receiver creation failed") +} + +func TestCreateBadEndpoint(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Endpoint = "localhost:abc" + + mockLogsConsumer := exportertest.NewNopLogsExporter() + mReceiver, err := createLogsReceiver(context.Background(), component.ReceiverCreateParams{Logger: zap.NewNop()}, cfg, mockLogsConsumer) + assert.EqualError(t, err, "endpoint port is not a number: strconv.ParseInt: parsing \"abc\": invalid syntax") + assert.Nil(t, mReceiver, "receiver creation failed") +} diff --git a/receiver/splunkhecreceiver/go.mod b/receiver/splunkhecreceiver/go.mod index 85be27669ac3..c0bb79ed895b 100644 --- a/receiver/splunkhecreceiver/go.mod +++ b/receiver/splunkhecreceiver/go.mod @@ -3,11 +3,17 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunk go 1.14 require ( + github.com/census-instrumentation/opencensus-proto v0.3.0 + github.com/golang/protobuf v1.4.2 + github.com/gorilla/mux v1.8.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter v0.0.0-00010101000000-000000000000 github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.0.0-00010101000000-000000000000 github.com/stretchr/testify v1.6.1 + go.opencensus.io v0.22.4 go.opentelemetry.io/collector v0.12.1-0.20201012183541-526f34200197 go.uber.org/zap v1.16.0 google.golang.org/grpc/examples v0.0.0-20200728194956-1c32b02682df // indirect + gotest.tools v2.2.0+incompatible ) replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter => ../../exporter/splunkhecexporter diff --git a/receiver/splunkhecreceiver/receiver.go b/receiver/splunkhecreceiver/receiver.go new file mode 100644 index 000000000000..8a61fe4b5a85 --- /dev/null +++ b/receiver/splunkhecreceiver/receiver.go @@ -0,0 +1,303 @@ +// Copyright 2020, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package splunkhecreceiver + +import ( + "compress/gzip" + "context" + "encoding/json" + "errors" + "fmt" + "net" + "net/http" + "sync" + "time" + + "github.com/gorilla/mux" + "go.opencensus.io/trace" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/translator/conventions" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/splunk" +) + +const ( + defaultServerTimeout = 20 * time.Second + + responseOK = "OK" + responseInvalidMethod = `Only "POST" method is supported` + responseInvalidContentType = `"Content-Type" must be "application/json"` + responseInvalidEncoding = `"Content-Encoding" must be "gzip" or empty` + responseErrGzipReader = "Error on gzip body" + responseErrUnmarshalBody = "Failed to unmarshal message body" + responseErrInternalServerError = "Internal Server Error" + responseErrUnsupportedMetricEvent = "Unsupported metric event" + + // Centralizing some HTTP and related string constants. + jsonContentType = "application/json" + gzipEncoding = "gzip" + httpContentTypeHeader = "Content-Type" + httpContentEncodingHeader = "Content-Encoding" +) + +var ( + errNilNextConsumer = errors.New("nil metricsConsumer") + errEmptyEndpoint = errors.New("empty endpoint") + + okRespBody = initJSONResponse(responseOK) + invalidMethodRespBody = initJSONResponse(responseInvalidMethod) + invalidContentRespBody = initJSONResponse(responseInvalidContentType) + invalidEncodingRespBody = initJSONResponse(responseInvalidEncoding) + errGzipReaderRespBody = initJSONResponse(responseErrGzipReader) + errUnmarshalBodyRespBody = initJSONResponse(responseErrUnmarshalBody) + errInternalServerError = initJSONResponse(responseErrInternalServerError) + errUnsupportedMetricEvent = initJSONResponse(responseErrUnsupportedMetricEvent) +) + +// splunkReceiver implements the component.MetricsReceiver for Splunk HEC metric protocol. +type splunkReceiver struct { + sync.Mutex + logger *zap.Logger + config *Config + logConsumer consumer.LogsConsumer + server *http.Server +} + +var _ component.MetricsReceiver = (*splunkReceiver)(nil) + +// New creates the Splunk HEC receiver with the given configuration. +func New( + logger *zap.Logger, + config Config, + nextConsumer consumer.LogsConsumer, +) (component.MetricsReceiver, error) { + if nextConsumer == nil { + return nil, errNilNextConsumer + } + + if config.Endpoint == "" { + return nil, errEmptyEndpoint + } + + r := &splunkReceiver{ + logger: logger, + config: &config, + logConsumer: nextConsumer, + server: &http.Server{ + Addr: config.Endpoint, + // TODO: Evaluate what properties should be configurable, for now + // set some hard-coded values. + ReadHeaderTimeout: defaultServerTimeout, + WriteTimeout: defaultServerTimeout, + }, + } + + return r, nil +} + +// StartMetricsReception tells the receiver to start its processing. +// By convention the consumer of the received data is set when the receiver +// instance is created. +func (r *splunkReceiver) Start(_ context.Context, host component.Host) error { + r.Lock() + defer r.Unlock() + + var ln net.Listener + // set up the listener + ln, err := r.config.HTTPServerSettings.ToListener() + if err != nil { + return fmt.Errorf("failed to bind to address %s: %w", r.config.Endpoint, err) + } + + mx := mux.NewRouter() + mx.HandleFunc(hecPath, r.handleReq) + + r.server = r.config.HTTPServerSettings.ToServer(mx) + + // TODO: Evaluate what properties should be configurable, for now + // set some hard-coded values. + r.server.ReadHeaderTimeout = defaultServerTimeout + r.server.WriteTimeout = defaultServerTimeout + + go func() { + if errHTTP := r.server.Serve(ln); errHTTP != nil { + host.ReportFatalError(errHTTP) + } + }() + + return err +} + +// StopMetricsReception tells the receiver that should stop reception, +// giving it a chance to perform any necessary clean-up. +func (r *splunkReceiver) Shutdown(context.Context) error { + r.Lock() + defer r.Unlock() + + err := r.server.Close() + + return err +} + +func (r *splunkReceiver) handleReq(resp http.ResponseWriter, req *http.Request) { + transport := "http" + if r.config.TLSSetting != nil { + transport = "https" + } + ctx := obsreport.ReceiverContext(req.Context(), r.config.Name(), transport, r.config.Name()) + ctx = obsreport.StartMetricsReceiveOp(ctx, r.config.Name(), transport) + + if req.Method != http.MethodPost { + r.failRequest(ctx, resp, http.StatusBadRequest, invalidMethodRespBody, nil) + return + } + + if req.Header.Get(httpContentTypeHeader) != jsonContentType { + r.failRequest(ctx, resp, http.StatusUnsupportedMediaType, invalidContentRespBody, nil) + return + } + + encoding := req.Header.Get(httpContentEncodingHeader) + if encoding != "" && encoding != gzipEncoding { + r.failRequest(ctx, resp, http.StatusUnsupportedMediaType, invalidEncodingRespBody, nil) + return + } + + bodyReader := req.Body + if encoding == gzipEncoding { + var err error + bodyReader, err = gzip.NewReader(bodyReader) + if err != nil { + r.failRequest(ctx, resp, http.StatusBadRequest, errGzipReaderRespBody, err) + return + } + } + + if req.ContentLength == 0 { + resp.Write(okRespBody) + return + } + + messagesReceived := 0 + dec := json.NewDecoder(bodyReader) + + var events []*splunk.Event + + var decodeErr error + for dec.More() { + var msg splunk.Event + err := dec.Decode(&msg) + if err != nil { + r.failRequest(ctx, resp, http.StatusBadRequest, errUnmarshalBodyRespBody, err) + return + } + if msg.IsMetric() { + // Currently unsupported. + r.failRequest(ctx, resp, http.StatusBadRequest, errUnsupportedMetricEvent, err) + return + } + + messagesReceived++ + events = append(events, &msg) + } + + customizer := func(resource pdata.Resource) {} + if r.config.AccessTokenPassthrough { + if accessToken := req.Header.Get(splunk.SplunkHECTokenHeader); accessToken != "" { + customizer = func(resource pdata.Resource) { + resource.Attributes().InsertString(splunk.SplunkHecTokenLabel, accessToken) + } + } + } + + ld, err := SplunkHecToLogData(r.logger, events, customizer) + if err != nil { + r.failRequest(ctx, resp, http.StatusBadRequest, errUnmarshalBodyRespBody, err) + return + } + + decodeErr = r.logConsumer.ConsumeLogs(ctx, ld) + + obsreport.EndMetricsReceiveOp( + ctx, + typeStr, + messagesReceived, + messagesReceived, + decodeErr) + if decodeErr != nil { + r.failRequest(ctx, resp, http.StatusInternalServerError, errInternalServerError, decodeErr) + } else { + resp.WriteHeader(http.StatusAccepted) + resp.Write(okRespBody) + } +} + +func (r *splunkReceiver) failRequest( + ctx context.Context, + resp http.ResponseWriter, + httpStatusCode int, + jsonResponse []byte, + err error, +) { + resp.WriteHeader(httpStatusCode) + if len(jsonResponse) > 0 { + // The response needs to be written as a JSON string. + _, writeErr := resp.Write(jsonResponse) + if writeErr != nil { + r.logger.Warn( + "Error writing HTTP response message", + zap.Error(writeErr), + zap.String("receiver", r.config.Name())) + } + } + + msg := string(jsonResponse) + + reqSpan := trace.FromContext(ctx) + reqSpan.AddAttributes( + trace.Int64Attribute(conventions.AttributeHTTPStatusCode, int64(httpStatusCode)), + trace.StringAttribute(conventions.AttributeHTTPStatusText, msg)) + traceStatus := trace.Status{ + Code: trace.StatusCodeInvalidArgument, + } + if httpStatusCode == http.StatusInternalServerError { + traceStatus.Code = trace.StatusCodeInternal + } + if err != nil { + traceStatus.Message = err.Error() + } + reqSpan.SetStatus(traceStatus) + reqSpan.End() + + r.logger.Debug( + "Splunk HEC receiver request failed", + zap.Int("http_status_code", httpStatusCode), + zap.String("msg", msg), + zap.Error(err), // It handles nil error + zap.String("receiver", r.config.Name())) +} + +func initJSONResponse(s string) []byte { + respBody, err := json.Marshal(s) + if err != nil { + // This is to be used in initialization so panic here is fine. + panic(err) + } + return respBody +} diff --git a/receiver/splunkhecreceiver/receiver_test.go b/receiver/splunkhecreceiver/receiver_test.go new file mode 100644 index 000000000000..4717795d0352 --- /dev/null +++ b/receiver/splunkhecreceiver/receiver_test.go @@ -0,0 +1,484 @@ +// Copyright 2020, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package splunkhecreceiver + +import ( + "bytes" + "compress/gzip" + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/testutil" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/splunk" +) + +func Test_splunkhecreceiver_New(t *testing.T) { + defaultConfig := createDefaultConfig().(*Config) + emptyEndpointConfig := createDefaultConfig().(*Config) + emptyEndpointConfig.Endpoint = "" + type args struct { + config Config + logsConsumer consumer.LogsConsumer + } + tests := []struct { + name string + args args + wantErr error + }{ + { + name: "nil_nextConsumer", + args: args{ + config: *defaultConfig, + }, + wantErr: errNilNextConsumer, + }, + { + name: "empty_endpoint", + args: args{ + config: *emptyEndpointConfig, + logsConsumer: new(exportertest.SinkLogsExporter), + }, + wantErr: errEmptyEndpoint, + }, + { + name: "default_endpoint", + args: args{ + config: *defaultConfig, + logsConsumer: exportertest.NewNopLogsExporter(), + }, + }, + { + name: "happy_path", + args: args{ + config: Config{ + HTTPServerSettings: confighttp.HTTPServerSettings{ + Endpoint: "localhost:1234", + }, + }, + logsConsumer: exportertest.NewNopLogsExporter(), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := New(zap.NewNop(), tt.args.config, tt.args.logsConsumer) + assert.Equal(t, tt.wantErr, err) + if err == nil { + assert.NotNil(t, got) + } else { + assert.Nil(t, got) + } + }) + } +} + +func Test_splunkhecReceiver_handleReq(t *testing.T) { + config := createDefaultConfig().(*Config) + config.Endpoint = "localhost:0" // Actually not creating the endpoint + + currentTime := float64(time.Now().UnixNano()) / 1e6 + splunkMsg := buildSplunkHecMsg(currentTime, "foo", 3) + + tests := []struct { + name string + req *http.Request + assertResponse func(t *testing.T, status int, body string) + }{ + { + name: "incorrect_method", + req: httptest.NewRequest("PUT", "http://localhost", nil), + assertResponse: func(t *testing.T, status int, body string) { + assert.Equal(t, http.StatusBadRequest, status) + assert.Equal(t, responseInvalidMethod, body) + }, + }, + { + name: "incorrect_content_type", + req: func() *http.Request { + req := httptest.NewRequest("POST", "http://localhost", nil) + req.Header.Set("Content-Type", "application/not-json") + return req + }(), + assertResponse: func(t *testing.T, status int, body string) { + assert.Equal(t, http.StatusUnsupportedMediaType, status) + assert.Equal(t, responseInvalidContentType, body) + }, + }, + { + name: "metric_unsupported", + req: func() *http.Request { + metricMsg := buildSplunkHecMsg(currentTime, "foo", 3) + metricMsg.Event = "metric" + msgBytes, err := json.Marshal(metricMsg) + require.NoError(t, err) + req := httptest.NewRequest("POST", "http://localhost", bytes.NewReader(msgBytes)) + req.Header.Set("Content-Type", "application/json") + return req + }(), + assertResponse: func(t *testing.T, status int, body string) { + assert.Equal(t, http.StatusBadRequest, status) + assert.Equal(t, responseErrUnsupportedMetricEvent, body) + }, + }, + { + name: "incorrect_content_encoding", + req: func() *http.Request { + req := httptest.NewRequest("POST", "http://localhost", nil) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Content-Encoding", "superzipper") + return req + }(), + assertResponse: func(t *testing.T, status int, body string) { + assert.Equal(t, http.StatusUnsupportedMediaType, status) + assert.Equal(t, responseInvalidEncoding, body) + }, + }, + { + name: "bad_data_in_body", + req: func() *http.Request { + req := httptest.NewRequest("POST", "http://localhost", bytes.NewReader([]byte{1, 2, 3, 4})) + req.Header.Set("Content-Type", "application/json") + return req + }(), + assertResponse: func(t *testing.T, status int, body string) { + assert.Equal(t, http.StatusBadRequest, status) + assert.Equal(t, responseErrUnmarshalBody, body) + }, + }, + { + name: "empty_body", + req: func() *http.Request { + req := httptest.NewRequest("POST", "http://localhost", bytes.NewReader(nil)) + req.Header.Set("Content-Type", "application/json") + return req + }(), + assertResponse: func(t *testing.T, status int, body string) { + assert.Equal(t, http.StatusOK, status) + assert.Equal(t, responseOK, body) + }, + }, + { + name: "msg_accepted", + req: func() *http.Request { + msgBytes, err := json.Marshal(splunkMsg) + require.NoError(t, err) + req := httptest.NewRequest("POST", "http://localhost", bytes.NewReader(msgBytes)) + req.Header.Set("Content-Type", "application/json") + return req + }(), + assertResponse: func(t *testing.T, status int, body string) { + assert.Equal(t, http.StatusAccepted, status) + assert.Equal(t, responseOK, body) + }, + }, + { + name: "msg_accepted_gzipped", + req: func() *http.Request { + msgBytes, err := json.Marshal(splunkMsg) + require.NoError(t, err) + + var buf bytes.Buffer + gzipWriter := gzip.NewWriter(&buf) + _, err = gzipWriter.Write(msgBytes) + require.NoError(t, err) + require.NoError(t, gzipWriter.Close()) + + req := httptest.NewRequest("POST", "http://localhost", &buf) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Content-Encoding", "gzip") + return req + }(), + assertResponse: func(t *testing.T, status int, body string) { + assert.Equal(t, http.StatusAccepted, status) + assert.Equal(t, responseOK, body) + }, + }, + { + name: "bad_gzipped_msg", + req: func() *http.Request { + msgBytes, err := json.Marshal(splunkMsg) + require.NoError(t, err) + + req := httptest.NewRequest("POST", "http://localhost", bytes.NewReader(msgBytes)) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Content-Encoding", "gzip") + return req + }(), + assertResponse: func(t *testing.T, status int, body string) { + assert.Equal(t, http.StatusBadRequest, status) + assert.Equal(t, responseErrGzipReader, body) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sink := new(exportertest.SinkLogsExporter) + rcv, err := New(zap.NewNop(), *config, sink) + assert.NoError(t, err) + + r := rcv.(*splunkReceiver) + w := httptest.NewRecorder() + r.handleReq(w, tt.req) + + resp := w.Result() + respBytes, err := ioutil.ReadAll(resp.Body) + assert.NoError(t, err) + + var bodyStr string + assert.NoError(t, json.Unmarshal(respBytes, &bodyStr)) + + tt.assertResponse(t, resp.StatusCode, bodyStr) + }) + } +} + +func Test_consumer_err(t *testing.T) { + currentTime := float64(time.Now().UnixNano()) / 1e6 + splunkMsg := buildSplunkHecMsg(currentTime, "foo", 3) + config := createDefaultConfig().(*Config) + config.Endpoint = "localhost:0" // Actually not creating the endpoint + sink := new(exportertest.SinkLogsExporter) + sink.SetConsumeLogError(errors.New("bad consumer")) + rcv, err := New(zap.NewNop(), *config, sink) + assert.NoError(t, err) + + r := rcv.(*splunkReceiver) + w := httptest.NewRecorder() + msgBytes, err := json.Marshal(splunkMsg) + require.NoError(t, err) + req := httptest.NewRequest("POST", "http://localhost", bytes.NewReader(msgBytes)) + req.Header.Set("Content-Type", "application/json") + r.handleReq(w, req) + + resp := w.Result() + respBytes, err := ioutil.ReadAll(resp.Body) + assert.NoError(t, err) + + var bodyStr string + assert.NoError(t, json.Unmarshal(respBytes, &bodyStr)) + + assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) + assert.Equal(t, "Internal Server Error", bodyStr) +} + +func Test_splunkhecReceiver_TLS(t *testing.T) { + addr := testutil.GetAvailableLocalAddress(t) + cfg := createDefaultConfig().(*Config) + cfg.Endpoint = addr + cfg.TLSSetting = &configtls.TLSServerSetting{ + TLSSetting: configtls.TLSSetting{ + CertFile: "./testdata/testcert.crt", + KeyFile: "./testdata/testkey.key", + }, + } + sink := new(exportertest.SinkLogsExporter) + r, err := New(zap.NewNop(), *cfg, sink) + require.NoError(t, err) + defer r.Shutdown(context.Background()) + + // NewNopHost swallows errors so using NewErrorWaitingHost to catch any potential errors starting the + // receiver. + mh := componenttest.NewErrorWaitingHost() + require.NoError(t, r.Start(context.Background(), mh), "should not have failed to start log reception") + + // If there are errors reported through host.ReportFatalError() this will retrieve it. + receivedError, receivedErr := mh.WaitForFatalError(500 * time.Millisecond) + require.NoError(t, receivedErr, "should not have failed to start log reception") + require.False(t, receivedError) + t.Log("Event Reception Started") + + now := time.Now() + msecInt64 := now.UnixNano() / 1e6 + sec := float64(msecInt64) / 1e3 + lr := pdata.NewLogRecord() + lr.InitEmpty() + lr.SetTimestamp(pdata.TimestampUnixNano(int64(sec * 1e9))) + + lr.Body().SetStringVal("foo") + logs := pdata.NewLogs() + rl := pdata.NewResourceLogs() + rl.InitEmpty() + rl.Resource().InitEmpty() + rl.Resource().Attributes().InsertString("host.hostname", "") + rl.Resource().Attributes().InsertString("service.name", "") + rl.Resource().Attributes().InsertString("com.splunk.sourcetype", "") + ill := pdata.NewInstrumentationLibraryLogs() + ill.InitEmpty() + ill.Logs().Append(lr) + rl.InstrumentationLibraryLogs().Append(ill) + logs.ResourceLogs().Append(rl) + want := logs + + t.Log("Sending Splunk HEC data Request") + + body, err := json.Marshal(buildSplunkHecMsg(sec, "foo", 0)) + require.NoError(t, err, fmt.Sprintf("failed to marshal Splunk message: %v", err)) + + url := fmt.Sprintf("https://%s%s", addr, hecPath) + + req, err := http.NewRequest("POST", url, bytes.NewReader(body)) + require.NoErrorf(t, err, "should have no errors with new request: %v", err) + req.Header.Set("Content-Type", "application/json") + + caCert, err := ioutil.ReadFile("./testdata/testcert.crt") + require.NoErrorf(t, err, "failed to load certificate: %v", err) + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: caCertPool, + }, + }, + } + + resp, err := client.Do(req) + require.NoErrorf(t, err, "should not have failed when sending to splunk HEC receiver %v", err) + assert.Equal(t, http.StatusAccepted, resp.StatusCode) + t.Log("Splunk HEC Request Received") + + got := sink.AllLogs() + require.Equal(t, 1, len(got)) + assert.Equal(t, want, got[0]) +} + +func Test_splunkhecReceiver_AccessTokenPassthrough(t *testing.T) { + tests := []struct { + name string + passthrough bool + token pdata.AttributeValue + }{ + { + name: "No token provided and passthrough false", + passthrough: false, + token: pdata.NewAttributeValueNull(), + }, + { + name: "No token provided and passthrough true", + passthrough: true, + token: pdata.NewAttributeValueNull(), + }, + { + name: "token provided and passthrough false", + passthrough: false, + token: pdata.NewAttributeValueString("myToken"), + }, + { + name: "token provided and passthrough true", + passthrough: true, + token: pdata.NewAttributeValueString("myToken"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config := createDefaultConfig().(*Config) + config.Endpoint = "localhost:0" + config.AccessTokenPassthrough = tt.passthrough + + sink := new(exportertest.SinkLogsExporter) + rcv, err := New(zap.NewNop(), *config, sink) + assert.NoError(t, err) + + currentTime := float64(time.Now().UnixNano()) / 1e6 + splunkhecMsg := buildSplunkHecMsg(currentTime, "foo", 3) + msgBytes, _ := json.Marshal(splunkhecMsg) + req := httptest.NewRequest("POST", "http://localhost", bytes.NewReader(msgBytes)) + req.Header.Set("Content-Type", "application/json") + if tt.token.Type() != pdata.AttributeValueNULL { + req.Header.Set("Splunk", tt.token.StringVal()) + } + + r := rcv.(*splunkReceiver) + w := httptest.NewRecorder() + r.handleReq(w, req) + + resp := w.Result() + respBytes, err := ioutil.ReadAll(resp.Body) + assert.NoError(t, err) + + var bodyStr string + assert.NoError(t, json.Unmarshal(respBytes, &bodyStr)) + + assert.Equal(t, http.StatusAccepted, resp.StatusCode) + assert.Equal(t, responseOK, bodyStr) + + got := sink.AllLogs() + + resource := got[0].ResourceLogs().At(0).Resource() + if resource.IsNil() { + resource.InitEmpty() + } + tokenLabel, exists := resource.Attributes().Get("com.splunk.hec.access_token") + + if tt.passthrough { + if tt.token.Type() == pdata.AttributeValueNULL { + assert.False(t, exists) + } else { + assert.Equal(t, tt.token.StringVal(), tokenLabel.StringVal()) + } + } else { + assert.Empty(t, tokenLabel) + } + }) + } +} + +func buildSplunkHecMsg(time float64, value string, dimensions uint) *splunk.Event { + ev := &splunk.Event{ + Time: time, + Event: value, + Fields: map[string]interface{}{}, + } + for dim := uint(0); dim < dimensions; dim++ { + ev.Fields[fmt.Sprintf("k%d", dim)] = fmt.Sprintf("v%d", dim) + } + + return ev +} + +type badReqBody struct{} + +var _ io.ReadCloser = (*badReqBody)(nil) + +func (b badReqBody) Read(p []byte) (n int, err error) { + return 0, errors.New("badReqBody: can't read it") +} + +func (b badReqBody) Close() error { + return nil +} diff --git a/receiver/splunkhecreceiver/splunk_to_logdata.go b/receiver/splunkhecreceiver/splunk_to_logdata.go new file mode 100644 index 000000000000..b5a276d344a2 --- /dev/null +++ b/receiver/splunkhecreceiver/splunk_to_logdata.go @@ -0,0 +1,149 @@ +// Copyright 2020, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package splunkhecreceiver + +import ( + "errors" + "sort" + + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/translator/conventions" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/splunk" +) + +const ( + cannotConvertValue = "cannot convert field value to attribute" +) + +// SplunkHecToLogData transforms splunk events into logs +func SplunkHecToLogData(logger *zap.Logger, events []*splunk.Event, resourceCustomizer func(pdata.Resource)) (pdata.Logs, error) { + ld := pdata.NewLogs() + rls := ld.ResourceLogs() + rls.Resize(len(events)) + + for i, event := range events { + rl := rls.At(i) + rl.InitEmpty() + logRecord := pdata.NewLogRecord() + logRecord.InitEmpty() + + // The SourceType field is the most logical "name" of the event. + logRecord.SetName(event.SourceType) + logRecord.Body().InitEmpty() + attrValue, err := convertInterfaceToAttributeValue(logger, event.Event) + if err != nil { + logger.Debug("Unsupported value conversion", zap.Any("value", event.Event)) + return ld, errors.New(cannotConvertValue) + } + attrValue.CopyTo(logRecord.Body()) + + // Splunk timestamps are in seconds so convert to nanos by multiplying + // by 1 billion. + logRecord.SetTimestamp(pdata.TimestampUnixNano(event.Time * 1e9)) + + rl.Resource().InitEmpty() + attrs := rl.Resource().Attributes() + attrs.InitEmptyWithCapacity(3) + attrs.InsertString(conventions.AttributeHostHostname, event.Host) + attrs.InsertString(conventions.AttributeServiceName, event.Source) + attrs.InsertString(splunk.SourcetypeLabel, event.SourceType) + resourceCustomizer(rl.Resource()) + //TODO consider setting the index field as well for pass through scenarios. + keys := make([]string, 0, len(event.Fields)) + for k := range event.Fields { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + val := event.Fields[key] + attrValue, err := convertInterfaceToAttributeValue(logger, val) + if err != nil { + return ld, err + } + logRecord.Attributes().Insert(key, attrValue) + } + ill := pdata.NewInstrumentationLibraryLogs() + ill.InitEmpty() + ill.Logs().Append(logRecord) + rl.InstrumentationLibraryLogs().Append(ill) + } + + return ld, nil +} + +func convertInterfaceToAttributeValue(logger *zap.Logger, originalValue interface{}) (pdata.AttributeValue, error) { + if originalValue == nil { + return pdata.NewAttributeValueNull(), nil + } else if value, ok := originalValue.(string); ok { + return pdata.NewAttributeValueString(value), nil + } else if value, ok := originalValue.(int64); ok { + return pdata.NewAttributeValueInt(value), nil + } else if value, ok := originalValue.(float64); ok { + return pdata.NewAttributeValueDouble(value), nil + } else if value, ok := originalValue.(bool); ok { + return pdata.NewAttributeValueBool(value), nil + } else if value, ok := originalValue.(map[string]interface{}); ok { + mapContents, err := convertToAttributeMap(logger, value) + if err != nil { + return pdata.NewAttributeValueNull(), err + } + mapValue := pdata.NewAttributeValueMap() + mapValue.SetMapVal(mapContents) + return mapValue, nil + } else if value, ok := originalValue.([]interface{}); ok { + arrValue := pdata.NewAttributeValueArray() + arrContents, err := convertToArrayVal(logger, value) + if err != nil { + return pdata.NewAttributeValueNull(), err + } + arrValue.SetArrayVal(arrContents) + return arrValue, nil + } else { + logger.Debug("Unsupported value conversion", zap.Any("value", originalValue)) + return pdata.NewAttributeValueNull(), errors.New(cannotConvertValue) + } +} + +func convertToArrayVal(logger *zap.Logger, value []interface{}) (pdata.AnyValueArray, error) { + arr := pdata.NewAnyValueArray() + for _, elt := range value { + translatedElt, err := convertInterfaceToAttributeValue(logger, elt) + if err != nil { + return arr, err + } + arr.Append(translatedElt) + } + return arr, nil +} + +func convertToAttributeMap(logger *zap.Logger, value map[string]interface{}) (pdata.AttributeMap, error) { + attrMap := pdata.NewAttributeMap() + keys := make([]string, 0, len(value)) + for k := range value { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := value[k] + translatedElt, err := convertInterfaceToAttributeValue(logger, v) + if err != nil { + return attrMap, err + } + attrMap.Insert(k, translatedElt) + } + return attrMap, nil +} diff --git a/receiver/splunkhecreceiver/splunk_to_logdata_test.go b/receiver/splunkhecreceiver/splunk_to_logdata_test.go new file mode 100644 index 000000000000..97dd84192e86 --- /dev/null +++ b/receiver/splunkhecreceiver/splunk_to_logdata_test.go @@ -0,0 +1,221 @@ +// Copyright 2020, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package splunkhecreceiver + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/consumer/pdata" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/splunk" +) + +func Test_SplunkHecToLogData(t *testing.T) { + + tests := []struct { + name string + event splunk.Event + output pdata.ResourceLogsSlice + wantErr error + }{ + { + name: "happy_path", + event: splunk.Event{ + Time: 0.123, + Host: "localhost", + Source: "mysource", + SourceType: "mysourcetype", + Index: "myindex", + Event: "value", + Fields: map[string]interface{}{ + "foo": "bar", + }, + }, + output: func() pdata.ResourceLogsSlice { + return createLogsSlice("value") + }(), + wantErr: nil, + }, + { + name: "double", + event: splunk.Event{ + Time: 0.123, + Host: "localhost", + Source: "mysource", + SourceType: "mysourcetype", + Index: "myindex", + Event: 12.3, + Fields: map[string]interface{}{ + "foo": "bar", + }, + }, + output: func() pdata.ResourceLogsSlice { + logsSlice := createLogsSlice("value") + logsSlice.At(0).InstrumentationLibraryLogs().At(0).Logs().At(0).Body().SetDoubleVal(12.3) + return logsSlice + }(), + wantErr: nil, + }, + { + name: "array", + event: splunk.Event{ + Time: 0.123, + Host: "localhost", + Source: "mysource", + SourceType: "mysourcetype", + Index: "myindex", + Event: []interface{}{"foo", "bar"}, + Fields: map[string]interface{}{ + "foo": "bar", + }, + }, + output: func() pdata.ResourceLogsSlice { + logsSlice := createLogsSlice("value") + arr := pdata.NewAnyValueArray() + arr.Append(pdata.NewAttributeValueString("foo")) + arr.Append(pdata.NewAttributeValueString("bar")) + logsSlice.At(0).InstrumentationLibraryLogs().At(0).Logs().At(0).Body().SetArrayVal(arr) + return logsSlice + }(), + wantErr: nil, + }, + { + name: "complex_structure", + event: splunk.Event{ + Time: 0.123, + Host: "localhost", + Source: "mysource", + SourceType: "mysourcetype", + Index: "myindex", + Event: map[string]interface{}{"foos": []interface{}{"foo", "bar", "foobar"}, "bool": false, "someInt": int64(12)}, + Fields: map[string]interface{}{ + "foo": "bar", + }, + }, + output: func() pdata.ResourceLogsSlice { + logsSlice := createLogsSlice("value") + attMap := pdata.NewAttributeMap() + foos := pdata.NewAnyValueArray() + foos.Append(pdata.NewAttributeValueString("foo")) + foos.Append(pdata.NewAttributeValueString("bar")) + foos.Append(pdata.NewAttributeValueString("foobar")) + foosArr := pdata.NewAttributeValueArray() + foosArr.SetArrayVal(foos) + attMap.InsertBool("bool", false) + attMap.Insert("foos", foosArr) + attMap.InsertInt("someInt", 12) + logsSlice.At(0).InstrumentationLibraryLogs().At(0).Logs().At(0).Body().SetMapVal(attMap) + return logsSlice + }(), + wantErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := SplunkHecToLogData(zap.NewNop(), []*splunk.Event{&tt.event}, func(resource pdata.Resource) {}) + assert.Equal(t, tt.wantErr, err) + assert.Equal(t, tt.output.Len(), result.ResourceLogs().Len()) + assert.Equal(t, tt.output.At(0), result.ResourceLogs().At(0)) + }) + } +} + +func createLogsSlice(body string) pdata.ResourceLogsSlice { + lrs := pdata.NewResourceLogsSlice() + lrs.Resize(1) + lr := lrs.At(0) + lr.InitEmpty() + lr.Resource().InitEmpty() + logRecord := pdata.NewLogRecord() + logRecord.InitEmpty() + + logRecord.SetName("mysourcetype") + logRecord.Body().SetStringVal(body) + logRecord.SetTimestamp(pdata.TimestampUnixNano(123000000)) + lr.Resource().Attributes().InsertString("host.hostname", "localhost") + lr.Resource().Attributes().InsertString("service.name", "mysource") + lr.Resource().Attributes().InsertString("com.splunk.sourcetype", "mysourcetype") + logRecord.Attributes().InsertString("foo", "bar") + ill := pdata.NewInstrumentationLibraryLogs() + ill.InitEmpty() + ill.Logs().Append(logRecord) + lr.InstrumentationLibraryLogs().Append(ill) + + return lrs +} + +func Test_ConvertAttributeValueNull(t *testing.T) { + value, err := convertInterfaceToAttributeValue(zap.NewNop(), nil) + assert.NoError(t, err) + assert.Equal(t, pdata.NewAttributeValueNull(), value) +} + +func Test_ConvertAttributeValueString(t *testing.T) { + value, err := convertInterfaceToAttributeValue(zap.NewNop(), "foo") + assert.NoError(t, err) + assert.Equal(t, pdata.NewAttributeValueString("foo"), value) +} + +func Test_ConvertAttributeValueBool(t *testing.T) { + value, err := convertInterfaceToAttributeValue(zap.NewNop(), false) + assert.NoError(t, err) + assert.Equal(t, pdata.NewAttributeValueBool(false), value) +} + +func Test_ConvertAttributeValueFloat(t *testing.T) { + value, err := convertInterfaceToAttributeValue(zap.NewNop(), 12.3) + assert.NoError(t, err) + assert.Equal(t, pdata.NewAttributeValueDouble(12.3), value) +} + +func Test_ConvertAttributeValueMap(t *testing.T) { + value, err := convertInterfaceToAttributeValue(zap.NewNop(), map[string]interface{}{"foo": "bar"}) + assert.NoError(t, err) + atts := pdata.NewAttributeValueMap() + attMap := pdata.NewAttributeMap() + attMap.InsertString("foo", "bar") + atts.SetMapVal(attMap) + assert.Equal(t, atts, value) +} + +func Test_ConvertAttributeValueArray(t *testing.T) { + value, err := convertInterfaceToAttributeValue(zap.NewNop(), []interface{}{"foo"}) + assert.NoError(t, err) + arr := pdata.NewAttributeValueArray() + arrValue := pdata.NewAnyValueArray() + arrValue.Append(pdata.NewAttributeValueString("foo")) + arr.SetArrayVal(arrValue) + assert.Equal(t, arr, value) +} + +func Test_ConvertAttributeValueInvalid(t *testing.T) { + value, err := convertInterfaceToAttributeValue(zap.NewNop(), splunk.Event{}) + assert.Error(t, err) + assert.Equal(t, pdata.NewAttributeValueNull(), value) +} + +func Test_ConvertAttributeValueInvalidInMap(t *testing.T) { + value, err := convertInterfaceToAttributeValue(zap.NewNop(), map[string]interface{}{"foo": splunk.Event{}}) + assert.Error(t, err) + assert.Equal(t, pdata.NewAttributeValueNull(), value) +} + +func Test_ConvertAttributeValueInvalidInArray(t *testing.T) { + value, err := convertInterfaceToAttributeValue(zap.NewNop(), []interface{}{splunk.Event{}}) + assert.Error(t, err) + assert.Equal(t, pdata.NewAttributeValueNull(), value) +} From 23c4329a734adeba947e8f80f91e911c1217eb4c Mon Sep 17 00:00:00 2001 From: "Rayhan Hossain (Mukla.C)" Date: Fri, 16 Oct 2020 10:40:09 -0500 Subject: [PATCH 5/8] awsecscontainermetrics: update CpuUtilized metric calculation (#1283) We had our internal discussion and we decided to report `CpuUtilized` metric in percentage. We studied our new calculation and verified with domain expert. [Update] As the reviewer's suggestion, will create a separate PR for the following one. Earlier we decided to add task and container metadata as resource attributes. However, CloudWatch `awsemf` exporter reads dimensions only from metric labels. As this exporter is major target for `awsecscontainermetrics` receiver, we need to add these metadata as metric labels. I discussed this in our OTel Metric SIG meeting. We also discussed internally and agreed to add them as metric labels as well. This change will address both of these issues. **Link to tracking Issue:** #1282 --- .../awsecscontainermetrics/accumulator.go | 10 ++++++ .../awsecscontainermetrics/constant.go | 2 ++ .../awsecscontainermetrics/docker_stats.go | 9 +++-- .../awsecscontainermetrics/ecs_metrics.go | 1 + .../awsecscontainermetrics/metrics_helper.go | 13 ++++--- .../metrics_helper_test.go | 34 +++++++++++++++---- .../awsecscontainermetrics/translator.go | 3 +- .../awsecscontainermetrics/translator_test.go | 2 +- 8 files changed, 59 insertions(+), 15 deletions(-) diff --git a/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/accumulator.go b/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/accumulator.go index 135a219bbbf9..b2b3a9b861eb 100644 --- a/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/accumulator.go +++ b/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/accumulator.go @@ -40,6 +40,10 @@ func (acc *metricDataAccumulator) getMetricsData(containerStatsMap map[string]Co containerMetrics.MemoryReserved = *containerMetadata.Limits.Memory containerMetrics.CPUReserved = *containerMetadata.Limits.CPU + if containerMetrics.CPUReserved > 0 { + containerMetrics.CPUUtilized = (containerMetrics.CPUUtilized / containerMetrics.CPUReserved) + } + containerResource := containerResource(containerMetadata) for k, v := range taskResource.Labels { containerResource.Labels[k] = v @@ -65,6 +69,12 @@ func (acc *metricDataAccumulator) getMetricsData(containerStatsMap map[string]Co taskMetrics.CPUReserved = *metadata.Limits.CPU } + // taskMetrics.CPUReserved cannot be zero. In ECS, user needs to set CPU limit + // at least in one place (either in task level or in container level). If the + // task level CPULimit is not present, we calculate it from the summation of + // all container CPU limits. + taskMetrics.CPUUtilized = ((taskMetrics.CPUUsageInVCPU / taskMetrics.CPUReserved) * 100) + acc.accumulate( taskResource, convertToOCMetrics(TaskPrefix, taskMetrics, nil, nil, timestamp), diff --git a/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/constant.go b/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/constant.go index a3d1ecd2a3c8..a0128c1594ab 100644 --- a/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/constant.go +++ b/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/constant.go @@ -48,6 +48,7 @@ const ( AttributeCPUOnlines = "cpu.onlines" AttributeCPUReserved = "cpu.reserved" AttributeCPUUtilized = "cpu.utilized" + AttributeCPUUsageInVCPU = "cpu.usage.vcpu" AttributeNetworkRateRx = "network.rate.rx" AttributeNetworkRateTx = "network.rate.tx" @@ -70,4 +71,5 @@ const ( UnitBytesPerSec = "Bytes/Sec" UnitCount = "Count" UnitVCpu = "vCPU" + UnitPercent = "Percent" ) diff --git a/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/docker_stats.go b/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/docker_stats.go index a3c665001f8c..b3f46e60a3ca 100644 --- a/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/docker_stats.go +++ b/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/docker_stats.go @@ -14,16 +14,21 @@ package awsecscontainermetrics +import "time" + // ContainerStats defines the structure for container stats type ContainerStats struct { - Name string `json:"name"` - ID string `json:"id"` + Name string `json:"name"` + ID string `json:"id"` + Read time.Time `json:"read"` + PreviousRead time.Time `json:"preread"` Memory MemoryStats `json:"memory_stats,omitempty"` Disk DiskStats `json:"blkio_stats,omitempty"` Network map[string]NetworkStats `json:"networks,omitempty"` NetworkRate NetworkRateStats `json:"network_rate_stats,omitempty"` CPU CPUStats `json:"cpu_stats,omitempty"` + PreviousCPU CPUStats `json:"precpu_stats,omitempty"` } // MemoryStats defines the memory stats diff --git a/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/ecs_metrics.go b/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/ecs_metrics.go index 5f1c5f1ab3a0..88ca3c50fe7c 100644 --- a/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/ecs_metrics.go +++ b/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/ecs_metrics.go @@ -30,6 +30,7 @@ type ECSMetrics struct { NumOfCPUCores uint64 CPUReserved float64 CPUUtilized float64 + CPUUsageInVCPU float64 NetworkRateRxBytesPerSecond float64 NetworkRateTxBytesPerSecond float64 diff --git a/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/metrics_helper.go b/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/metrics_helper.go index 93280aa61e5c..ac1b224d8be5 100644 --- a/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/metrics_helper.go +++ b/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/metrics_helper.go @@ -17,11 +17,15 @@ package awsecscontainermetrics // getContainerMetrics generate ECS Container metrics from Container stats func getContainerMetrics(stats ContainerStats) ECSMetrics { memoryUtilizedInMb := (*stats.Memory.Usage - stats.Memory.Stats["cache"]) / BytesInMiB - numOfCores := (uint64)(len(stats.CPU.CPUUsage.PerCPUUsage)) + timeDiffSinceLastRead := (float64)(stats.Read.Sub(stats.PreviousRead).Nanoseconds()) - // TODO: match with ECS Agent calculation and modify if needed - cpuUtilized := (float64)(*stats.CPU.CPUUsage.TotalUsage / numOfCores / 1024) + cpuUsageInVCpu := float64(0) + if timeDiffSinceLastRead > 0 { + cpuDelta := (float64)(*stats.CPU.CPUUsage.TotalUsage - *stats.PreviousCPU.CPUUsage.TotalUsage) + cpuUsageInVCpu = cpuDelta / timeDiffSinceLastRead + } + cpuUtilized := cpuUsageInVCpu * 100 netStatArray := getNetworkStats(stats.Network) @@ -41,6 +45,7 @@ func getContainerMetrics(stats ContainerStats) ECSMetrics { m.NumOfCPUCores = numOfCores m.CPUOnlineCpus = *stats.CPU.OnlineCpus m.SystemCPUUsage = *stats.CPU.SystemCPUUsage + m.CPUUsageInVCPU = cpuUsageInVCpu m.CPUUtilized = cpuUtilized if stats.NetworkRate == (NetworkRateStats{}) { @@ -119,8 +124,8 @@ func aggregateTaskMetrics(taskMetrics *ECSMetrics, conMetrics ECSMetrics) { taskMetrics.NumOfCPUCores += conMetrics.NumOfCPUCores taskMetrics.CPUOnlineCpus += conMetrics.CPUOnlineCpus taskMetrics.SystemCPUUsage += conMetrics.SystemCPUUsage - taskMetrics.CPUUtilized += conMetrics.CPUUtilized taskMetrics.CPUReserved += conMetrics.CPUReserved + taskMetrics.CPUUsageInVCPU += conMetrics.CPUUsageInVCPU taskMetrics.NetworkRateRxBytesPerSecond += conMetrics.NetworkRateRxBytesPerSecond taskMetrics.NetworkRateTxBytesPerSecond += conMetrics.NetworkRateTxBytesPerSecond diff --git a/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/metrics_helper_test.go b/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/metrics_helper_test.go index c05651d02017..7d63ad99d2c2 100644 --- a/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/metrics_helper_test.go +++ b/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/metrics_helper_test.go @@ -16,6 +16,7 @@ package awsecscontainermetrics import ( "testing" + "time" "github.com/stretchr/testify/require" ) @@ -77,14 +78,33 @@ func TestGetContainerAndTaskMetrics(t *testing.T) { CPUUtilized: &v, CPUReserved: &v, } + + previousCPUUsage := CPUUsage{ + TotalUsage: &v, + UsageInKernelmode: &v, + UsageInUserMode: &v, + PerCPUUsage: percpu, + } + + previousCPUStats := CPUStats{ + CPUUsage: previousCPUUsage, + OnlineCpus: &v, + SystemCPUUsage: &v, + CPUUtilized: &v, + CPUReserved: &v, + } + containerStats := ContainerStats{ - Name: "test", - ID: "001", - Memory: mem, - Disk: disk, - Network: net, - NetworkRate: netRate, - CPU: cpuStats, + Name: "test", + ID: "001", + Read: time.Now(), + PreviousRead: time.Now().Add(-10 * time.Second), + Memory: mem, + Disk: disk, + Network: net, + NetworkRate: netRate, + CPU: cpuStats, + PreviousCPU: previousCPUStats, } containerMetrics := getContainerMetrics(containerStats) diff --git a/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/translator.go b/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/translator.go index f6c368f6ab6e..dea5f28e2987 100644 --- a/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/translator.go +++ b/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/translator.go @@ -34,8 +34,9 @@ func convertToOCMetrics(prefix string, m ECSMetrics, labelKeys []*metricspb.Labe intGauge(prefix+AttributeCPUCores, UnitCount, &m.NumOfCPUCores, labelKeys, labelValues), intGauge(prefix+AttributeCPUOnlines, UnitCount, &m.CPUOnlineCpus, labelKeys, labelValues), intCumulative(prefix+AttributeCPUSystemUsage, UnitNanoSecond, &m.SystemCPUUsage, labelKeys, labelValues), - doubleGauge(prefix+AttributeCPUUtilized, UnitVCpu, &m.CPUUtilized, labelKeys, labelValues), + doubleGauge(prefix+AttributeCPUUtilized, UnitPercent, &m.CPUUtilized, labelKeys, labelValues), doubleGauge(prefix+AttributeCPUReserved, UnitVCpu, &m.CPUReserved, labelKeys, labelValues), + doubleGauge(prefix+AttributeCPUUsageInVCPU, UnitVCpu, &m.CPUUsageInVCPU, labelKeys, labelValues), doubleGauge(prefix+AttributeNetworkRateRx, UnitBytesPerSec, &m.NetworkRateRxBytesPerSecond, labelKeys, labelValues), doubleGauge(prefix+AttributeNetworkRateTx, UnitBytesPerSec, &m.NetworkRateTxBytesPerSecond, labelKeys, labelValues), diff --git a/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/translator_test.go b/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/translator_test.go index fab2ea892b7c..b6813cc254b9 100644 --- a/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/translator_test.go +++ b/receiver/awsecscontainermetricsreceiver/awsecscontainermetrics/translator_test.go @@ -39,7 +39,7 @@ func TestConvertToOTMetrics(t *testing.T) { } metrics := convertToOCMetrics("container.", m, labelKeys, labelValues, timestamp) - require.EqualValues(t, 25, len(metrics)) + require.EqualValues(t, 26, len(metrics)) } func TestIntGauge(t *testing.T) { From 08a97dcf345b3629ec158c2502dfcb3f8d428440 Mon Sep 17 00:00:00 2001 From: Jay Camp Date: Fri, 16 Oct 2020 13:45:13 -0400 Subject: [PATCH 6/8] sapmexporter: correlation enabled check inversed (#1278) Added tests to cover case. --- exporter/sapmexporter/exporter.go | 2 +- exporter/sapmexporter/exporter_test.go | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/exporter/sapmexporter/exporter.go b/exporter/sapmexporter/exporter.go index 8243810ffed4..a05fd17a9b8d 100644 --- a/exporter/sapmexporter/exporter.go +++ b/exporter/sapmexporter/exporter.go @@ -56,7 +56,7 @@ func newSAPMExporter(cfg *Config, params component.ExporterCreateParams) (sapmEx var tracker *Tracker - if !cfg.Correlation.Enabled { + if cfg.Correlation.Enabled { tracker = NewTracker(cfg, params) } diff --git a/exporter/sapmexporter/exporter_test.go b/exporter/sapmexporter/exporter_test.go index 4ce5b33b4ba7..da6da8913500 100644 --- a/exporter/sapmexporter/exporter_test.go +++ b/exporter/sapmexporter/exporter_test.go @@ -54,6 +54,32 @@ func TestCreateTraceExporter(t *testing.T) { assert.NoError(t, te.Shutdown(context.Background()), "trace exporter shutdown failed") } +func TestCreateTraceExporterWithCorrelationEnabled(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Endpoint = "localhost:1234" + cfg.Correlation.Enabled = true + cfg.Correlation.Endpoint = "http://localhost" + params := component.ExporterCreateParams{Logger: zap.NewNop()} + + te, err := newSAPMExporter(cfg, params) + assert.Nil(t, err) + assert.NotNil(t, te, "failed to create trace exporter") + + assert.NotNil(t, te.tracker, "correlation tracker should have been set") +} + +func TestCreateTraceExporterWithCorrelationDisabled(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Endpoint = "localhost:1234" + params := component.ExporterCreateParams{Logger: zap.NewNop()} + + te, err := newSAPMExporter(cfg, params) + assert.Nil(t, err) + assert.NotNil(t, te, "failed to create trace exporter") + + assert.Nil(t, te.tracker, "tracker correlation should not be created") +} + func TestCreateTraceExporterWithInvalidConfig(t *testing.T) { config := &Config{} params := component.ExporterCreateParams{Logger: zap.NewNop()} From e7b29c17f3d975b8801481771d22477644086541 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Fri, 16 Oct 2020 14:00:19 -0700 Subject: [PATCH 7/8] Run make gotidy on master branch (#1287) Signed-off-by: Bogdan Drutu --- exporter/datadogexporter/go.mod | 9 +++---- exporter/datadogexporter/go.sum | 3 +++ exporter/sapmexporter/go.sum | 5 ---- go.sum | 31 ------------------------ receiver/splunkhecreceiver/go.mod | 4 ---- receiver/stanzareceiver/go.sum | 1 + testbed/go.sum | 39 +++++++++++++++++++++++++++++-- 7 files changed, 44 insertions(+), 48 deletions(-) diff --git a/exporter/datadogexporter/go.mod b/exporter/datadogexporter/go.mod index a468a47ca368..bed909bf3d44 100644 --- a/exporter/datadogexporter/go.mod +++ b/exporter/datadogexporter/go.mod @@ -5,16 +5,13 @@ go 1.15 require ( github.com/DataDog/datadog-agent v0.0.0-20200417180928-f454c60bc16f github.com/DataDog/viper v1.8.0 // indirect - github.com/census-instrumentation/opencensus-proto v0.3.0 github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect - github.com/gogo/protobuf v1.3.1 - github.com/klauspost/compress v1.10.10 + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/stretchr/testify v1.6.1 github.com/zorkian/go-datadog-api v2.29.0+incompatible // indirect + go.opencensus.io v0.22.4 go.opentelemetry.io/collector v0.12.1-0.20201012183541-526f34200197 go.uber.org/zap v1.16.0 - gopkg.in/zorkian/go-datadog-api.v2 v2.29.0 - go.opencensus.io v0.22.4 - google.golang.org/protobuf v1.25.0 gopkg.in/DataDog/dd-trace-go.v1 v1.26.0 + gopkg.in/zorkian/go-datadog-api.v2 v2.29.0 ) diff --git a/exporter/datadogexporter/go.sum b/exporter/datadogexporter/go.sum index eb13d55e2e05..e9954f193449 100644 --- a/exporter/datadogexporter/go.sum +++ b/exporter/datadogexporter/go.sum @@ -668,6 +668,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -1202,6 +1204,7 @@ golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPI golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= diff --git a/exporter/sapmexporter/go.sum b/exporter/sapmexporter/go.sum index 55d0b3a2117d..1386f5688468 100644 --- a/exporter/sapmexporter/go.sum +++ b/exporter/sapmexporter/go.sum @@ -95,7 +95,6 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWso github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/Songmu/retry v0.1.0 h1:hPA5xybQsksLR/ry/+t/7cFajPW+dqjmjhzZhioBILA= github.com/Songmu/retry v0.1.0/go.mod h1:7sXIW7eseB9fq0FUvigRcQMVLR9tuHI0Scok+rkpAuA= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= @@ -286,7 +285,6 @@ github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= @@ -409,7 +407,6 @@ github.com/gofrs/flock v0.8.0 h1:MSdYClljsF3PbENUUEx85nkWfJSGfzYI9yEBZOJz6CY= github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.3.0/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= -github.com/gogo/googleapis v1.3.1 h1:CzMaKrvF6Qa7XtRii064vKBQiyvmY8H8vG1xa1/W1JA= github.com/gogo/googleapis v1.3.1/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= @@ -1023,8 +1020,6 @@ github.com/signalfx/gomemcache v0.0.0-20180823214636-4f7ef64c72a9/go.mod h1:Ytb8 github.com/signalfx/sapm-proto v0.4.0/go.mod h1:x3gtwJ1GRejtkghB4nYpwixh2zqJrLbPU959ZNhM0Fk= github.com/signalfx/sapm-proto v0.6.2 h1:2LtB8AUGVyP5lSlsaBjFTsHfZNK/zn+jzWl1tWwniRA= github.com/signalfx/sapm-proto v0.6.2/go.mod h1:AHtWypa5paGVlvDjSZw9Bh5GLgS62ee2U0UcsrLlLhU= -github.com/signalfx/signalfx-agent/pkg/apm v0.0.0-20201005151249-ce1a2e0a25e7 h1:+KSSs1oE/YFmd487gpPk79OcFo51tEiFqadFoE3RVvg= -github.com/signalfx/signalfx-agent/pkg/apm v0.0.0-20201005151249-ce1a2e0a25e7/go.mod h1:pNaqfprM2bSCBhE8sTT2NtasSWEsIJbrmnIF0ap/Cvg= github.com/signalfx/signalfx-agent/pkg/apm v0.0.0-20201009143858-d25fd073fb56 h1:XYBr6vxBtAufUs72S5LYkjCmCB7QM4kvX2jwufGCqhg= github.com/signalfx/signalfx-agent/pkg/apm v0.0.0-20201009143858-d25fd073fb56/go.mod h1:pNaqfprM2bSCBhE8sTT2NtasSWEsIJbrmnIF0ap/Cvg= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= diff --git a/go.sum b/go.sum index 50063c89c2ed..7d0b8a5747d3 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,6 @@ cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZ cloud.google.com/go v0.61.0/go.mod h1:XukKJg4Y7QsUu0Hxg3qQKUWR4VuWivmyMK2+rUyxAqw= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.66.0 h1:DZeAkuQGQqnm9Xv36SbMJEU8aFBz4wL04UpMWPWwjzg= -cloud.google.com/go v0.66.0/go.mod h1:dgqGAjKCDxyhGTtC9dAREQGUJpkceNm1yt590Qno0Ko= cloud.google.com/go v0.67.0 h1:YIkzmqUfVGiGPpT98L8sVvUIkDno6UlrDxw4NR6z5ak= cloud.google.com/go v0.67.0/go.mod h1:YNan/mUhNZFrYUor0vqrsQ0Ffl7Xtm/ACOy/vsTS858= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -175,9 +173,6 @@ github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.34.32/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/aws/aws-sdk-go v1.34.32/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/aws/aws-sdk-go v1.35.2 h1:qK+noh6b9KW+5CP1NmmWsQCUbnzucSGrjHEs69MEl6A= github.com/aws/aws-sdk-go v1.35.2/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.35.7 h1:FHMhVhyc/9jljgFAcGkQDYjpC9btM0B8VfkLBfctdNE= github.com/aws/aws-sdk-go v1.35.7/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= @@ -198,7 +193,6 @@ github.com/bombsimon/wsl/v3 v3.1.0 h1:E5SRssoBgtVFPcYWUOFJEcgaySgdtTNYzsSKDOY7ss github.com/bombsimon/wsl/v3 v3.1.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= github.com/brianvoe/gofakeit v3.17.0+incompatible h1:C1+30+c0GtjgGDtRC+iePZeP1WMiwsWCELNJhmc7aIc= github.com/brianvoe/gofakeit v3.17.0+incompatible/go.mod h1:kfwdRA90vvNhPutZWfH7WPaDzUjz+CZFqG+rPkOjGOc= -github.com/bsm/sarama-cluster v2.1.13+incompatible h1:bqU3gMJbWZVxLZ9PGWVKP05yOmFXUlfw61RBwuE3PYU= github.com/bsm/sarama-cluster v2.1.13+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= @@ -735,7 +729,6 @@ github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKe github.com/hetznercloud/hcloud-go v1.21.1 h1:LWNozxiZhKmeMqYbAS7KsAcPcxg47afCnTeLKmN+n7w= github.com/hetznercloud/hcloud-go v1.21.1/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= github.com/honeycombio/libhoney-go v1.14.1 h1:TyFJrPua66YodzBf/eyi0nWEOL/YMhtegkAHVBocUa8= -github.com/honeycombio/libhoney-go v1.14.1 h1:TyFJrPua66YodzBf/eyi0nWEOL/YMhtegkAHVBocUa8= github.com/honeycombio/libhoney-go v1.14.1/go.mod h1:SPNdLieCW/Ryt6cAIeELQENobj8H9bYXezWkA5ZI+u4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= @@ -762,11 +755,6 @@ github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/ github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI= github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= github.com/jaegertracing/jaeger v1.15.1/go.mod h1:LUWPSnzNPGRubM8pk0inANGitpiMOOxihXx0+53llXI= -github.com/jaegertracing/jaeger v1.19.1/go.mod h1:2GVHuF9OIfRw2N6ZMoEgRGL+GJxvDLVtALDWxOINqDk= -github.com/jaegertracing/jaeger v1.19.1/go.mod h1:2GVHuF9OIfRw2N6ZMoEgRGL+GJxvDLVtALDWxOINqDk= -github.com/jaegertracing/jaeger v1.19.2 h1:JX1ty1wlkk3JENyfXNMRAxGClwErTyzEKbQAFktYpOc= -github.com/jaegertracing/jaeger v1.19.2 h1:JX1ty1wlkk3JENyfXNMRAxGClwErTyzEKbQAFktYpOc= -github.com/jaegertracing/jaeger v1.19.2/go.mod h1:2GVHuF9OIfRw2N6ZMoEgRGL+GJxvDLVtALDWxOINqDk= github.com/jaegertracing/jaeger v1.20.0 h1:rnwhl7COrEj1/vYfumL84CoiwOEy2MLFJFcW1bqjxnA= github.com/jaegertracing/jaeger v1.20.0/go.mod h1:EFO94eQMRMI5KM4RIWcnl3rocmGEVt232TIG4Ua/4T0= github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= @@ -789,7 +777,6 @@ github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8 github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/joshdk/go-junit v0.0.0-20200702055522-6efcf4050909 h1:3BBiJ4qMIiesYqTk4zbLZnHrNIE3LYL1UUpaMIYGPSo= github.com/joshdk/go-junit v0.0.0-20200702055522-6efcf4050909/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= @@ -1205,7 +1192,6 @@ github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 h1:X886QgwZH5qr9HIQkk3m github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2/go.mod h1:tCQQqyJAVF1+mxNdqOi18sS/zaSrE6EMyWwRA2QTl70= github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 h1:WsShHmu12ZztYPfh9b+I+VjYD1o8iOHhB67WZCMEEE8= github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083/go.mod h1:adPDS6s7WaajdFBV9mQ7i0dKfQ8xiDnF9ZNETVPpp7c= -github.com/signalfx/golib/v3 v3.3.0 h1:vSXsAb73bdrlnjk5rnZ7y3t09Qzu9qfBEbXdcyBHsmE= github.com/signalfx/golib/v3 v3.3.0/go.mod h1:GzjWpV0skAXZn7+u9LnkOkiXAx9KKd5XZcd5r+RoF5o= github.com/signalfx/golib/v3 v3.3.13 h1:Q+WDU2CeOGAJ2uZtb3Ov5cIUKS6tyvR2KU87SjVlXg0= github.com/signalfx/golib/v3 v3.3.13/go.mod h1:LKKCrEw4rU8ZL/8dVwX5i1+kqm4utB7uaHQpRx587rs= @@ -1215,16 +1201,8 @@ github.com/signalfx/omnition-kinesis-producer v0.5.0/go.mod h1:5tt4Zb0FS0QRKXVGF github.com/signalfx/opencensus-go-exporter-kinesis v0.6.3 h1:ooYCDeKtuwmT+HNBkv/VjkPp97f4xAmA6COgHQS9+as= github.com/signalfx/opencensus-go-exporter-kinesis v0.6.3/go.mod h1:iKTZPIUUpRI9Hp2yAMb2qNXl6itkEd2pxAznG08Y6YU= github.com/signalfx/sapm-proto v0.4.0/go.mod h1:x3gtwJ1GRejtkghB4nYpwixh2zqJrLbPU959ZNhM0Fk= -github.com/signalfx/sapm-proto v0.6.0 h1:SpcBPBN7mllCaittxvTmXM4zCob79ytb9DEHsZ9jLF0= -github.com/signalfx/sapm-proto v0.6.0 h1:SpcBPBN7mllCaittxvTmXM4zCob79ytb9DEHsZ9jLF0= -github.com/signalfx/sapm-proto v0.6.0/go.mod h1:2uHysA4VySJpaZKEvsceKew2aNclf5Bu45bfbNwgbhI= -github.com/signalfx/sapm-proto v0.6.0/go.mod h1:2uHysA4VySJpaZKEvsceKew2aNclf5Bu45bfbNwgbhI= -github.com/signalfx/sapm-proto v0.6.2 h1:2LtB8AUGVyP5lSlsaBjFTsHfZNK/zn+jzWl1tWwniRA= github.com/signalfx/sapm-proto v0.6.2 h1:2LtB8AUGVyP5lSlsaBjFTsHfZNK/zn+jzWl1tWwniRA= github.com/signalfx/sapm-proto v0.6.2/go.mod h1:AHtWypa5paGVlvDjSZw9Bh5GLgS62ee2U0UcsrLlLhU= -github.com/signalfx/sapm-proto v0.6.2/go.mod h1:AHtWypa5paGVlvDjSZw9Bh5GLgS62ee2U0UcsrLlLhU= -github.com/signalfx/signalfx-agent/pkg/apm v0.0.0-20201005151249-ce1a2e0a25e7 h1:+KSSs1oE/YFmd487gpPk79OcFo51tEiFqadFoE3RVvg= -github.com/signalfx/signalfx-agent/pkg/apm v0.0.0-20201005151249-ce1a2e0a25e7/go.mod h1:pNaqfprM2bSCBhE8sTT2NtasSWEsIJbrmnIF0ap/Cvg= github.com/signalfx/signalfx-agent/pkg/apm v0.0.0-20201009143858-d25fd073fb56 h1:XYBr6vxBtAufUs72S5LYkjCmCB7QM4kvX2jwufGCqhg= github.com/signalfx/signalfx-agent/pkg/apm v0.0.0-20201009143858-d25fd073fb56/go.mod h1:pNaqfprM2bSCBhE8sTT2NtasSWEsIJbrmnIF0ap/Cvg= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -1412,10 +1390,7 @@ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+ go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -1529,8 +1504,6 @@ golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= -golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200927032502-5d4f70055728 h1:5wtQIAulKU5AbLQOkjxl32UufnIOqgBX72pS0AV14H0= golang.org/x/net v0.0.0-20200927032502-5d4f70055728/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1733,8 +1706,6 @@ golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200822203824-307de81be3f4/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c h1:AQsh/7arPVFDBraQa8x7GoVnwnGg1kM7J2ySI0kF5WU= -golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20200929161345-d7fc70abf50f h1:18s2P7JILnVhIF2+ZtGJQ9czV5bvTsb13/UGtNPDbjA= golang.org/x/tools v0.0.0-20200929161345-d7fc70abf50f/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1817,8 +1788,6 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200914193844-75d14daec038 h1:SnvTpXhVDJGFxzZiHbMUZTh3VjU2Vx2feJ7Zfl5+OIY= -google.golang.org/genproto v0.0.0-20200914193844-75d14daec038/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200929141702-51c3e5b607fe h1:6SgESkjJknFUnsfQ2yxQbmTAi37BxhwS/riq+VdLo9c= google.golang.org/genproto v0.0.0-20200929141702-51c3e5b607fe/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= diff --git a/receiver/splunkhecreceiver/go.mod b/receiver/splunkhecreceiver/go.mod index c0bb79ed895b..43751915ec54 100644 --- a/receiver/splunkhecreceiver/go.mod +++ b/receiver/splunkhecreceiver/go.mod @@ -3,17 +3,13 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunk go 1.14 require ( - github.com/census-instrumentation/opencensus-proto v0.3.0 - github.com/golang/protobuf v1.4.2 github.com/gorilla/mux v1.8.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter v0.0.0-00010101000000-000000000000 github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.0.0-00010101000000-000000000000 github.com/stretchr/testify v1.6.1 go.opencensus.io v0.22.4 go.opentelemetry.io/collector v0.12.1-0.20201012183541-526f34200197 go.uber.org/zap v1.16.0 google.golang.org/grpc/examples v0.0.0-20200728194956-1c32b02682df // indirect - gotest.tools v2.2.0+incompatible ) replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter => ../../exporter/splunkhecexporter diff --git a/receiver/stanzareceiver/go.sum b/receiver/stanzareceiver/go.sum index bc6bda422b00..d876bea096c3 100644 --- a/receiver/stanzareceiver/go.sum +++ b/receiver/stanzareceiver/go.sum @@ -807,6 +807,7 @@ github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/observiq/ctimefmt v1.0.0 h1:r7vTJ+Slkrt9fZ67mkf+mA6zAdR5nGIJRMTzkUyvilk= github.com/observiq/ctimefmt v1.0.0/go.mod h1:mxi62//WbSpG/roCO1c6MqZ7zQTvjVtYheqHN3eOjvc= +github.com/observiq/nanojack v0.0.0-20200910202758-a0af1c611319 h1:33Fh2cXMUKlnSxYAqt9+BHZRdK01/P6lJNfXttwmIjk= github.com/observiq/nanojack v0.0.0-20200910202758-a0af1c611319/go.mod h1:f+QQxL9zFpO5q44o7rf+TOEtEmlMQUI9snW9ZADIku0= github.com/observiq/stanza v0.12.0 h1:AOOKMxJyP/2U1ugorO4ufLKNyNrlDDWbqOS/9n6ZYDU= github.com/observiq/stanza v0.12.0/go.mod h1:Tu5ukrGEoFVnk9Mz3yaRyQ6x7Y7IeqwCoLHd4JCySHU= diff --git a/testbed/go.sum b/testbed/go.sum index 93789ca01808..4a508437340f 100644 --- a/testbed/go.sum +++ b/testbed/go.sum @@ -100,8 +100,9 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWso github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/Songmu/retry v0.1.0 h1:hPA5xybQsksLR/ry/+t/7cFajPW+dqjmjhzZhioBILA= github.com/Songmu/retry v0.1.0/go.mod h1:7sXIW7eseB9fq0FUvigRcQMVLR9tuHI0Scok+rkpAuA= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= @@ -229,6 +230,8 @@ github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9 h1:NAvZb7gqQfLSNBPzVsvI7eZMosXtg2g2kxXrei90CtU= +github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9/go.mod h1:glr97hP/JuXb+WMYCizc4PIFuzw1lCR97mwbe1VVXhQ= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= @@ -251,6 +254,10 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4 h1:fP04zlkPjAGpsduG7xN3rRkxjAqkJaIQnnkNYYw/pAk= +github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4/go.mod h1:SBHk9aNQtiw4R4bEuzHjVmZikkUKCnO1v3lPQ21HZGk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= @@ -287,8 +294,9 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= +github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -408,6 +416,7 @@ github.com/gofrs/flock v0.8.0 h1:MSdYClljsF3PbENUUEx85nkWfJSGfzYI9yEBZOJz6CY= github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.3.0/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= +github.com/gogo/googleapis v1.3.1/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -481,6 +490,7 @@ github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0 h1:HVfrLniijszjS1 github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/google/addlicense v0.0.0-20190510175307-22550fa7c1b0/go.mod h1:QtPG26W17m+OIQgE6gQ24gC1M6pUaMBAbFrTIDtwG/E= github.com/google/addlicense v0.0.0-20200622132530-df58acafd6d5/go.mod h1:EMjYTRimagHs1FwlIqKyX3wAM0u3rA+McvlIIWmSamA= github.com/google/addlicense v0.0.0-20200906110928-a0294312aa76 h1:JypWNzPMSgH5yL0NvFoAIsDRlKFgL0AsS3GO5bg4Pto= github.com/google/addlicense v0.0.0-20200906110928-a0294312aa76/go.mod h1:EMjYTRimagHs1FwlIqKyX3wAM0u3rA+McvlIIWmSamA= @@ -673,6 +683,12 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea h1:g2k+8WR7cHch4g0tBDhfiEvAp7fXxTNBiD1oC1Oxj3E= +github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 h1:UUHMLvzt/31azWTN/ifGWef4WUqvXk0iRqdhdy/2uzI= +github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/juju/testing v0.0.0-20191001232224-ce9dec17d28b h1:Rrp0ByJXEjhREMPGTt3aWYjoIsUGCbt21ekbeJcTWv0= +github.com/juju/testing v0.0.0-20191001232224-ce9dec17d28b/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= @@ -726,6 +742,7 @@ github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8= github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/maratori/testpackage v1.0.1 h1:QtJ5ZjqapShm0w5DosRjg0PRlSdAdlx+W6cCKoALdbQ= github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= @@ -869,6 +886,7 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/pavius/impi v0.0.3 h1:DND6MzU+BLABhOZXbELR3FU8b+zDgcq4dOCNLhiTYuI= github.com/pavius/impi v0.0.3/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= @@ -978,6 +996,7 @@ github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8 github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4wtNWrlj6kiTbkPt2F3rbYnhGX6TWLfco= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= @@ -997,6 +1016,7 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= +github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v2.20.6+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v2.20.9+incompatible h1:msXs2frUV+O/JLva9EDLpuJ84PrFsdCTCQex8PUdtkQ= github.com/shirou/gopsutil v2.20.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= @@ -1007,10 +1027,19 @@ github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/signalfx/com_signalfx_metrics_protobuf v0.0.1/go.mod h1:QkslgLDW0N9qRi9qkxcNDaf812gg0kWcf3ZZORE5/FI= github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 h1:X886QgwZH5qr9HIQkk3mWcNEhUxx6D8rUZumzLV4Wiw= github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2/go.mod h1:tCQQqyJAVF1+mxNdqOi18sS/zaSrE6EMyWwRA2QTl70= +github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 h1:WsShHmu12ZztYPfh9b+I+VjYD1o8iOHhB67WZCMEEE8= +github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083/go.mod h1:adPDS6s7WaajdFBV9mQ7i0dKfQ8xiDnF9ZNETVPpp7c= +github.com/signalfx/golib/v3 v3.3.13 h1:Q+WDU2CeOGAJ2uZtb3Ov5cIUKS6tyvR2KU87SjVlXg0= +github.com/signalfx/golib/v3 v3.3.13/go.mod h1:LKKCrEw4rU8ZL/8dVwX5i1+kqm4utB7uaHQpRx587rs= +github.com/signalfx/gomemcache v0.0.0-20180823214636-4f7ef64c72a9/go.mod h1:Ytb8KfCSyuwy/VILnROdgCvbQLA5ch0nkbG7lKT0BXw= +github.com/signalfx/sapm-proto v0.4.0/go.mod h1:x3gtwJ1GRejtkghB4nYpwixh2zqJrLbPU959ZNhM0Fk= github.com/signalfx/sapm-proto v0.6.2 h1:2LtB8AUGVyP5lSlsaBjFTsHfZNK/zn+jzWl1tWwniRA= github.com/signalfx/sapm-proto v0.6.2/go.mod h1:AHtWypa5paGVlvDjSZw9Bh5GLgS62ee2U0UcsrLlLhU= +github.com/signalfx/signalfx-agent/pkg/apm v0.0.0-20201009143858-d25fd073fb56 h1:XYBr6vxBtAufUs72S5LYkjCmCB7QM4kvX2jwufGCqhg= +github.com/signalfx/signalfx-agent/pkg/apm v0.0.0-20201009143858-d25fd073fb56/go.mod h1:pNaqfprM2bSCBhE8sTT2NtasSWEsIJbrmnIF0ap/Cvg= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= @@ -1019,6 +1048,7 @@ github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= @@ -1112,6 +1142,7 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC github.com/valyala/fasthttp v1.15.1/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= github.com/valyala/quicktemplate v1.6.2/go.mod h1:mtEJpQtUiBV0SHhMX6RtiJtqxncgrfmjcUy5T68X8TM= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vektra/mockery v0.0.0-20181123154057-e78b021dcbb5/go.mod h1:ppEjwdhyy7Y31EnHRDm1JkChoC7LXIJ7Ex0VYLWtZtQ= github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= @@ -1128,6 +1159,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -1391,6 +1423,7 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190906203814-12febf440ab1/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1557,6 +1590,8 @@ gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlI gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= +gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw= +gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= From 1a20922f151f19cf3fe79e6d32935535072c2b5c Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Fri, 16 Oct 2020 19:19:37 -0700 Subject: [PATCH 8/8] Remove OpenCensus from contrib testbed (#1253) Signed-off-by: Bogdan Drutu --- testbed/tests/metric_test.go | 10 +++++----- testbed/tests/trace_test.go | 9 --------- 2 files changed, 5 insertions(+), 14 deletions(-) diff --git a/testbed/tests/metric_test.go b/testbed/tests/metric_test.go index 1047c84424a9..8df191c6ffde 100644 --- a/testbed/tests/metric_test.go +++ b/testbed/tests/metric_test.go @@ -32,12 +32,12 @@ func TestMetric10kDPS(t *testing.T) { resourceSpec testbed.ResourceSpec }{ { - "OpenCensus", - testbed.NewOCMetricDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), - testbed.NewOCDataReceiver(testbed.GetAvailablePort(t)), + "OTLP", + testbed.NewOTLPMetricDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), + testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)), testbed.ResourceSpec{ - ExpectedMaxCPU: 85, - ExpectedMaxRAM: 70, + ExpectedMaxCPU: 50, + ExpectedMaxRAM: 80, }, }, { diff --git a/testbed/tests/trace_test.go b/testbed/tests/trace_test.go index 6aa5dea2c194..a778de78d723 100644 --- a/testbed/tests/trace_test.go +++ b/testbed/tests/trace_test.go @@ -41,15 +41,6 @@ func TestTrace10kSPS(t *testing.T) { receiver testbed.DataReceiver resourceSpec testbed.ResourceSpec }{ - { - "OpenCensus", - testbed.NewOCTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), - testbed.NewOCDataReceiver(testbed.GetAvailablePort(t)), - testbed.ResourceSpec{ - ExpectedMaxCPU: 39, - ExpectedMaxRAM: 82, - }, - }, { "OTLP", testbed.NewOTLPTraceDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)),