diff --git a/go.mod b/go.mod index 57270b0904c..c1fd474ea16 100644 --- a/go.mod +++ b/go.mod @@ -48,9 +48,11 @@ require ( github.com/tcnksm/ghr v0.13.0 github.com/uber/jaeger-lib v2.2.0+incompatible go.opencensus.io v0.22.3 + go.uber.org/atomic v1.5.1 go.uber.org/zap v1.13.0 golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e golang.org/x/sys v0.0.0-20200408040146-ea54a3c99b9b + golang.org/x/text v0.3.2 golang.org/x/tools v0.0.0-20200428211428-0c9eba77bc32 // indirect google.golang.org/api v0.10.0 // indirect google.golang.org/genproto v0.0.0-20200408120641-fbb3ad325eb7 diff --git a/testbed/testbed/child_process.go b/testbed/testbed/child_process.go index c5868e8b58b..a7ee6b2df42 100644 --- a/testbed/testbed/child_process.go +++ b/testbed/testbed/child_process.go @@ -25,12 +25,12 @@ import ( "path" "path/filepath" "sync" - "sync/atomic" "syscall" "time" "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/process" + "go.uber.org/atomic" ) // ResourceSpec is a resource consumption specification. @@ -95,10 +95,10 @@ type ChildProcess struct { lastProcessTimes *cpu.TimesStat // Current RAM RSS in MiBs - ramMiBCur uint32 + ramMiBCur atomic.Uint32 // Current CPU percentage times 1000 (we use scaling since we have to use int for atomic operations). - cpuPercentX1000Cur uint32 + cpuPercentX1000Cur atomic.Uint32 // Maximum CPU seen cpuPercentMax float64 @@ -287,8 +287,8 @@ func (cp *ChildProcess) Stop() (stopped bool, err error) { close(finished) // Set resource consumption stats to 0 - atomic.StoreUint32(&cp.ramMiBCur, 0) - atomic.StoreUint32(&cp.cpuPercentX1000Cur, 0) + cp.ramMiBCur.Store(0) + cp.cpuPercentX1000Cur.Store(0) log.Printf("%s process stopped, exit code=%d", cp.name, cp.cmd.ProcessState.ExitCode()) @@ -369,7 +369,7 @@ func (cp *ChildProcess) fetchRAMUsage() { } // Store current usage. - atomic.StoreUint32(&cp.ramMiBCur, ramMiBCur) + cp.ramMiBCur.Store(ramMiBCur) } func (cp *ChildProcess) fetchCPUUsage() { @@ -398,19 +398,19 @@ func (cp *ChildProcess) fetchCPUUsage() { curCPUPercentageX1000 := uint32(cpuPercent * 1000) // Store current usage. - atomic.StoreUint32(&cp.cpuPercentX1000Cur, curCPUPercentageX1000) + cp.cpuPercentX1000Cur.Store(curCPUPercentageX1000) } func (cp *ChildProcess) checkAllowedResourceUsage() error { // Check if current CPU usage exceeds expected. var errMsg string - if cp.resourceSpec.ExpectedMaxCPU != 0 && cp.cpuPercentX1000Cur/1000 > cp.resourceSpec.ExpectedMaxCPU { + if cp.resourceSpec.ExpectedMaxCPU != 0 && cp.cpuPercentX1000Cur.Load()/1000 > cp.resourceSpec.ExpectedMaxCPU { errMsg = fmt.Sprintf("CPU consumption is %.1f%%, max expected is %d%%", - float64(cp.cpuPercentX1000Cur)/1000.0, cp.resourceSpec.ExpectedMaxCPU) + float64(cp.cpuPercentX1000Cur.Load())/1000.0, cp.resourceSpec.ExpectedMaxCPU) } // Check if current RAM usage exceeds expected. - if cp.resourceSpec.ExpectedMaxRAM != 0 && cp.ramMiBCur > cp.resourceSpec.ExpectedMaxRAM { + if cp.resourceSpec.ExpectedMaxRAM != 0 && cp.ramMiBCur.Load() > cp.resourceSpec.ExpectedMaxRAM { errMsg = fmt.Sprintf("RAM consumption is %d MiB, max expected is %d MiB", cp.ramMiBCur, cp.resourceSpec.ExpectedMaxRAM) } @@ -431,8 +431,8 @@ func (cp *ChildProcess) GetResourceConsumption() string { return "" } - curRSSMib := atomic.LoadUint32(&cp.ramMiBCur) - curCPUPercentageX1000 := atomic.LoadUint32(&cp.cpuPercentX1000Cur) + curRSSMib := cp.ramMiBCur.Load() + curCPUPercentageX1000 := cp.cpuPercentX1000Cur.Load() return fmt.Sprintf("%s RAM (RES):%4d MiB, CPU:%4.1f%%", cp.name, curRSSMib, float64(curCPUPercentageX1000)/1000.0) diff --git a/testbed/testbed/data_providers.go b/testbed/testbed/data_providers.go index 6d2f4bed0f2..6ea63afe9c9 100644 --- a/testbed/testbed/data_providers.go +++ b/testbed/testbed/data_providers.go @@ -22,13 +22,13 @@ import ( "log" "math/rand" "strconv" - "sync/atomic" "time" metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" "github.com/golang/protobuf/ptypes/timestamp" + "go.uber.org/atomic" "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/data" @@ -37,40 +37,40 @@ import ( "go.opentelemetry.io/collector/translator/internaldata" ) -//DataProvider defines the interface for generators of test data used to drive various end-to-end tests. +// DataProvider defines the interface for generators of test data used to drive various end-to-end tests. type DataProvider interface { - //SetLoadGeneratorCounters supplies pointers to LoadGenerator counters. - //The data provider implementation should increment these as it generates data. - SetLoadGeneratorCounters(batchesGenerated *uint64, dataItemsGenerated *uint64) - //GenerateTraces returns an internal Traces instance with an OTLP ResourceSpans slice populated with test data. + // SetLoadGeneratorCounters supplies pointers to LoadGenerator counters. + // The data provider implementation should increment these as it generates data. + SetLoadGeneratorCounters(batchesGenerated *atomic.Uint64, dataItemsGenerated *atomic.Uint64) + // GenerateTraces returns an internal Traces instance with an OTLP ResourceSpans slice populated with test data. GenerateTraces() (pdata.Traces, bool) - //GenerateTracesOld returns a slice of OpenCensus Span instances populated with test data. + // GenerateTracesOld returns a slice of OpenCensus Span instances populated with test data. GenerateTracesOld() ([]*tracepb.Span, bool) - //GenerateMetrics returns an internal MetricData instance with an OTLP ResourceMetrics slice of test data. + // GenerateMetrics returns an internal MetricData instance with an OTLP ResourceMetrics slice of test data. GenerateMetrics() (data.MetricData, bool) - //GenerateMetricsOld returns a slice of OpenCensus Metric instances populated with test data. + // GenerateMetricsOld returns a slice of OpenCensus Metric instances populated with test data. GenerateMetricsOld() ([]*metricspb.Metric, bool) - //GetGeneratedSpan returns the generated Span matching the provided traceId and spanId or else nil if no match found. + // GetGeneratedSpan returns the generated Span matching the provided traceId and spanId or else nil if no match found. GetGeneratedSpan(traceID []byte, spanID []byte) *otlptrace.Span } -//PerfTestDataProvider in an implementation of the DataProvider for use in performance tests. -//Tracing IDs are based on the incremented batch and data items counters. +// PerfTestDataProvider in an implementation of the DataProvider for use in performance tests. +// Tracing IDs are based on the incremented batch and data items counters. type PerfTestDataProvider struct { options LoadOptions - batchesGenerated *uint64 - dataItemsGenerated *uint64 + batchesGenerated *atomic.Uint64 + dataItemsGenerated *atomic.Uint64 } -//NewPerfTestDataProvider creates an instance of PerfTestDataProvider which generates test data based on the sizes -//specified in the supplied LoadOptions. +// NewPerfTestDataProvider creates an instance of PerfTestDataProvider which generates test data based on the sizes +// specified in the supplied LoadOptions. func NewPerfTestDataProvider(options LoadOptions) *PerfTestDataProvider { return &PerfTestDataProvider{ options: options, } } -func (dp *PerfTestDataProvider) SetLoadGeneratorCounters(batchesGenerated *uint64, dataItemsGenerated *uint64) { +func (dp *PerfTestDataProvider) SetLoadGeneratorCounters(batchesGenerated *atomic.Uint64, dataItemsGenerated *atomic.Uint64) { dp.batchesGenerated = batchesGenerated dp.dataItemsGenerated = dataItemsGenerated } @@ -78,12 +78,12 @@ func (dp *PerfTestDataProvider) SetLoadGeneratorCounters(batchesGenerated *uint6 func (dp *PerfTestDataProvider) GenerateTracesOld() ([]*tracepb.Span, bool) { var spans []*tracepb.Span - traceID := atomic.AddUint64(dp.batchesGenerated, 1) + traceID := dp.batchesGenerated.Inc() for i := 0; i < dp.options.ItemsPerBatch; i++ { startTime := time.Now() - spanID := atomic.AddUint64(dp.dataItemsGenerated, 1) + spanID := dp.dataItemsGenerated.Inc() // Create a span. span := &tracepb.Span{ @@ -126,13 +126,13 @@ func (dp *PerfTestDataProvider) GenerateTraces() (pdata.Traces, bool) { spans := ilss.At(0).Spans() spans.Resize(dp.options.ItemsPerBatch) - traceID := atomic.AddUint64(dp.batchesGenerated, 1) + traceID := dp.batchesGenerated.Inc() for i := 0; i < dp.options.ItemsPerBatch; i++ { startTime := time.Now() endTime := startTime.Add(time.Duration(time.Millisecond)) - spanID := atomic.AddUint64(dp.dataItemsGenerated, 1) + spanID := dp.dataItemsGenerated.Inc() span := spans.At(i) @@ -192,7 +192,7 @@ func (dp *PerfTestDataProvider) GenerateMetricsOld() ([]*metricspb.Metric, bool) Resource: resource, } - batchIndex := atomic.AddUint64(dp.batchesGenerated, 1) + batchIndex := dp.batchesGenerated.Inc() // Generate data points for the metric. We generate timeseries each containing // a single data points. This is the most typical payload composition since @@ -201,7 +201,7 @@ func (dp *PerfTestDataProvider) GenerateMetricsOld() ([]*metricspb.Metric, bool) timeseries := &metricspb.TimeSeries{} startTime := time.Now() - value := atomic.AddUint64(dp.dataItemsGenerated, 1) + value := dp.dataItemsGenerated.Inc() // Create a data point. point := &metricspb.Point{ @@ -248,14 +248,14 @@ func (dp *PerfTestDataProvider) GenerateMetrics() (data.MetricData, bool) { metricDescriptor.SetDescription("Load Generator Counter #" + strconv.Itoa(i)) metricDescriptor.SetType(pdata.MetricTypeInt64) - batchIndex := atomic.AddUint64(dp.batchesGenerated, 1) + batchIndex := dp.batchesGenerated.Inc() // Generate data points for the metric. metric.Int64DataPoints().Resize(dataPointsPerMetric) for j := 0; j < dataPointsPerMetric; j++ { dataPoint := metric.Int64DataPoints().At(j) dataPoint.SetStartTime(pdata.TimestampUnixNano(uint64(time.Now().UnixNano()))) - value := atomic.AddUint64(dp.dataItemsGenerated, 1) + value := dp.dataItemsGenerated.Inc() dataPoint.SetValue(int64(value)) dataPoint.LabelsMap().InitFromMap(map[string]string{ "item_index": "item_" + strconv.Itoa(j), @@ -283,22 +283,22 @@ func timeToTimestamp(t time.Time) *timestamp.Timestamp { } } -//GoldenDataProvider is an implementation of DataProvider for use in correctness tests. -//Provided data from the "Golden" dataset generated using pairwise combinatorial testing techniques. +// GoldenDataProvider is an implementation of DataProvider for use in correctness tests. +// Provided data from the "Golden" dataset generated using pairwise combinatorial testing techniques. type GoldenDataProvider struct { tracePairsFile string spanPairsFile string random io.Reader - batchesGenerated *uint64 - dataItemsGenerated *uint64 + batchesGenerated *atomic.Uint64 + dataItemsGenerated *atomic.Uint64 resourceSpans []*otlptrace.ResourceSpans spansIndex int spansMap map[string]*otlptrace.Span } -//NewGoldenDataProvider creates a new instance of GoldenDataProvider which generates test data based -//on the pairwise combinations specified in the tracePairsFile and spanPairsFile input variables. -//The supplied randomSeed is used to initialize the random number generator used in generating tracing IDs. +// NewGoldenDataProvider creates a new instance of GoldenDataProvider which generates test data based +// on the pairwise combinations specified in the tracePairsFile and spanPairsFile input variables. +// The supplied randomSeed is used to initialize the random number generator used in generating tracing IDs. func NewGoldenDataProvider(tracePairsFile string, spanPairsFile string, randomSeed int64) *GoldenDataProvider { return &GoldenDataProvider{ tracePairsFile: tracePairsFile, @@ -307,7 +307,7 @@ func NewGoldenDataProvider(tracePairsFile string, spanPairsFile string, randomSe } } -func (dp *GoldenDataProvider) SetLoadGeneratorCounters(batchesGenerated *uint64, dataItemsGenerated *uint64) { +func (dp *GoldenDataProvider) SetLoadGeneratorCounters(batchesGenerated *atomic.Uint64, dataItemsGenerated *atomic.Uint64) { dp.batchesGenerated = batchesGenerated dp.dataItemsGenerated = dataItemsGenerated } @@ -321,7 +321,7 @@ func (dp *GoldenDataProvider) GenerateTraces() (pdata.Traces, bool) { dp.resourceSpans = make([]*otlptrace.ResourceSpans, 0) } } - atomic.AddUint64(dp.batchesGenerated, 1) + dp.batchesGenerated.Inc() if dp.spansIndex >= len(dp.resourceSpans) { return pdata.TracesFromOtlp(make([]*otlptrace.ResourceSpans, 0)), true } @@ -332,7 +332,7 @@ func (dp *GoldenDataProvider) GenerateTraces() (pdata.Traces, bool) { for _, libSpans := range resourceSpans[0].InstrumentationLibrarySpans { spanCount += uint64(len(libSpans.Spans)) } - atomic.AddUint64(dp.dataItemsGenerated, spanCount) + dp.dataItemsGenerated.Add(spanCount) return pdata.TracesFromOtlp(resourceSpans), false } diff --git a/testbed/testbed/load_generator.go b/testbed/testbed/load_generator.go index f80e57b1c7e..ba30cd65168 100644 --- a/testbed/testbed/load_generator.go +++ b/testbed/testbed/load_generator.go @@ -18,14 +18,17 @@ import ( "fmt" "log" "sync" - "sync/atomic" "time" resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + "go.uber.org/atomic" + "golang.org/x/text/message" "go.opentelemetry.io/collector/consumer/consumerdata" ) +var printer = message.NewPrinter(message.MatchLanguage("en")) + // LoadGenerator is a simple load generator. type LoadGenerator struct { sender DataSender @@ -33,10 +36,10 @@ type LoadGenerator struct { dataProvider DataProvider // Number of batches of data items sent. - batchesSent uint64 + batchesSent atomic.Uint64 // Number of data items (spans or metric data points) sent. - dataItemsSent uint64 + dataItemsSent atomic.Uint64 stopOnce sync.Once stopWait sync.WaitGroup @@ -111,11 +114,11 @@ func (lg *LoadGenerator) Stop() { // GetStats returns the stats as a printable string. func (lg *LoadGenerator) GetStats() string { - return fmt.Sprintf("Sent:%5d items", atomic.LoadUint64(&lg.dataItemsSent)) + return printer.Sprintf("Sent:%10d items", lg.DataItemsSent()) } func (lg *LoadGenerator) DataItemsSent() uint64 { - return atomic.LoadUint64(&lg.dataItemsSent) + return lg.dataItemsSent.Load() } // IncDataItemsSent is used when a test bypasses the LoadGenerator and sends data @@ -125,7 +128,7 @@ func (lg *LoadGenerator) DataItemsSent() uint64 { // reports to use their own counter and load generator and other sending sources // to contribute to this counter. This could be done as a future improvement. func (lg *LoadGenerator) IncDataItemsSent() { - atomic.AddUint64(&lg.dataItemsSent, 1) + lg.dataItemsSent.Inc() } func (lg *LoadGenerator) generate() { diff --git a/testbed/testbed/mock_backend.go b/testbed/testbed/mock_backend.go index 05c1a2dd34c..63e5b110cea 100644 --- a/testbed/testbed/mock_backend.go +++ b/testbed/testbed/mock_backend.go @@ -16,11 +16,12 @@ package testbed import ( "context" - "fmt" "log" "os" "sync" - "sync/atomic" + "time" + + "go.uber.org/atomic" "go.opentelemetry.io/collector/consumer/consumerdata" "go.opentelemetry.io/collector/consumer/pdata" @@ -42,6 +43,7 @@ type MockBackend struct { // Start/stop flags isStarted bool stopOnce sync.Once + startedAt time.Time // Recording fields. isRecording bool @@ -87,6 +89,7 @@ func (mb *MockBackend) Start() error { } mb.isStarted = true + mb.startedAt = time.Now() return nil } @@ -115,12 +118,13 @@ func (mb *MockBackend) EnableRecording() { } func (mb *MockBackend) GetStats() string { - return fmt.Sprintf("Received:%5d items", mb.DataItemsReceived()) + received := mb.DataItemsReceived() + return printer.Sprintf("Received:%10d items (%d/sec)", received, int(float64(received)/time.Since(mb.startedAt).Seconds())) } // DataItemsReceived returns total number of received spans and metrics. func (mb *MockBackend) DataItemsReceived() uint64 { - return atomic.LoadUint64(&mb.tc.spansReceived) + atomic.LoadUint64(&mb.mc.metricsReceived) + return mb.tc.spansReceived.Load() + mb.mc.metricsReceived.Load() } // ClearReceivedItems clears the list of received traces and metrics. Note: counters @@ -169,12 +173,12 @@ func (mb *MockBackend) ConsumeMetricOld(md consumerdata.MetricsData) { } type MockTraceConsumer struct { - spansReceived uint64 + spansReceived atomic.Uint64 backend *MockBackend } func (tc *MockTraceConsumer) ConsumeTraces(ctx context.Context, td pdata.Traces) error { - atomic.AddUint64(&tc.spansReceived, uint64(td.SpanCount())) + tc.spansReceived.Add(uint64(td.SpanCount())) rs := td.ResourceSpans() for i := 0; i < rs.Len(); i++ { @@ -210,7 +214,7 @@ func (tc *MockTraceConsumer) ConsumeTraces(ctx context.Context, td pdata.Traces) } func (tc *MockTraceConsumer) ConsumeTraceData(ctx context.Context, td consumerdata.TraceData) error { - atomic.AddUint64(&tc.spansReceived, uint64(len(td.Spans))) + tc.spansReceived.Add(uint64(len(td.Spans))) for _, span := range td.Spans { var spanSeqnum int64 @@ -240,13 +244,13 @@ func (tc *MockTraceConsumer) ConsumeTraceData(ctx context.Context, td consumerda } type MockMetricConsumer struct { - metricsReceived uint64 + metricsReceived atomic.Uint64 backend *MockBackend } func (mc *MockMetricConsumer) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { _, dataPoints := pdatautil.MetricAndDataPointCount(md) - atomic.AddUint64(&mc.metricsReceived, uint64(dataPoints)) + mc.metricsReceived.Add(uint64(dataPoints)) mc.backend.ConsumeMetric(md) return nil } @@ -259,7 +263,7 @@ func (mc *MockMetricConsumer) ConsumeMetricsData(ctx context.Context, md consume } } - atomic.AddUint64(&mc.metricsReceived, uint64(dataPoints)) + mc.metricsReceived.Add(uint64(dataPoints)) mc.backend.ConsumeMetricOld(md) diff --git a/testbed/testbed/mock_backend_test.go b/testbed/testbed/mock_backend_test.go index 2239cb2341f..b96e0a4ced6 100644 --- a/testbed/testbed/mock_backend_test.go +++ b/testbed/testbed/mock_backend_test.go @@ -3,7 +3,7 @@ // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at -//// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -52,12 +52,12 @@ func TestGeneratorAndBackend(t *testing.T) { defer mb.Stop() - options := LoadOptions{DataItemsPerSecond: 10000, ItemsPerBatch: 10} + options := LoadOptions{DataItemsPerSecond: 10_000, ItemsPerBatch: 10} dataProvider := NewPerfTestDataProvider(options) lg, err := NewLoadGenerator(dataProvider, test.sender) require.NoError(t, err, "Cannot start load generator") - assert.EqualValues(t, 0, lg.dataItemsSent) + assert.EqualValues(t, 0, lg.dataItemsSent.Load()) // Generate at 1000 SPS lg.Start(LoadOptions{DataItemsPerSecond: 1000}) diff --git a/testbed/testbed/test_case.go b/testbed/testbed/test_case.go index d8ca601159c..ae20946bf21 100644 --- a/testbed/testbed/test_case.go +++ b/testbed/testbed/test_case.go @@ -347,7 +347,7 @@ func (tc *TestCase) logStats() { } func (tc *TestCase) logStatsOnce() { - log.Printf("%s, %s, %s", + log.Printf("%s | %s | %s", tc.agentProc.GetResourceConsumption(), tc.LoadGenerator.GetStats(), tc.MockBackend.GetStats()) diff --git a/testbed/tests/e2e_test.go b/testbed/tests/e2e_test.go index 76c84877ff4..885f6b9441d 100644 --- a/testbed/tests/e2e_test.go +++ b/testbed/tests/e2e_test.go @@ -29,7 +29,7 @@ import ( ) func TestIdleMode(t *testing.T) { - options := testbed.LoadOptions{DataItemsPerSecond: 10000, ItemsPerBatch: 10} + options := testbed.LoadOptions{DataItemsPerSecond: 10_000, ItemsPerBatch: 10} dataProvider := testbed.NewPerfTestDataProvider(options) tc := testbed.NewTestCase( t, @@ -58,7 +58,7 @@ func TestBallastMemory(t *testing.T) { {1000, 100}, } - options := testbed.LoadOptions{DataItemsPerSecond: 10000, ItemsPerBatch: 10} + options := testbed.LoadOptions{DataItemsPerSecond: 10_000, ItemsPerBatch: 10} dataProvider := testbed.NewPerfTestDataProvider(options) for _, test := range tests { tc := testbed.NewTestCase( diff --git a/testbed/tests/metric_test.go b/testbed/tests/metric_test.go index be05470e5ba..8331b2e563a 100644 --- a/testbed/tests/metric_test.go +++ b/testbed/tests/metric_test.go @@ -24,7 +24,7 @@ import ( ) func TestMetricNoBackend10kDPSOpenCensus(t *testing.T) { - options := testbed.LoadOptions{DataItemsPerSecond: 10000, ItemsPerBatch: 10} + options := testbed.LoadOptions{DataItemsPerSecond: 10_000, ItemsPerBatch: 10} dataProvider := testbed.NewPerfTestDataProvider(options) tc := testbed.NewTestCase( t, @@ -40,7 +40,7 @@ func TestMetricNoBackend10kDPSOpenCensus(t *testing.T) { tc.SetResourceLimits(testbed.ResourceSpec{ExpectedMaxCPU: 200, ExpectedMaxRAM: 200}) tc.StartAgent() - tc.StartLoad(testbed.LoadOptions{DataItemsPerSecond: 10000}) + tc.StartLoad(testbed.LoadOptions{DataItemsPerSecond: 10_000}) tc.Sleep(tc.Duration) } diff --git a/testbed/tests/scenarios.go b/testbed/tests/scenarios.go index 1d33f9f6e0e..a99cdba9f55 100644 --- a/testbed/tests/scenarios.go +++ b/testbed/tests/scenarios.go @@ -26,11 +26,15 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/text/message" "go.opentelemetry.io/collector/testbed/testbed" ) -var performanceResultsSummary testbed.TestResultsSummary = &testbed.PerformanceResults{} +var ( + performanceResultsSummary testbed.TestResultsSummary = &testbed.PerformanceResults{} + printer = message.NewPrinter(message.MatchLanguage("en")) +) // createConfigYaml creates a collector config file that corresponds to the // sender and receiver used in the test and returns the config file name. @@ -123,7 +127,7 @@ func Scenario10kItemsPerSecond( require.NoError(t, err) options := testbed.LoadOptions{ - DataItemsPerSecond: 10000, + DataItemsPerSecond: 10_000, ItemsPerBatch: 100, } agentProc := &testbed.ChildProcess{}