diff --git a/.chloggen/rm-direction-hostmetrics.yaml b/.chloggen/rm-direction-hostmetrics.yaml
new file mode 100755
index 000000000000..4c891517e982
--- /dev/null
+++ b/.chloggen/rm-direction-hostmetrics.yaml
@@ -0,0 +1,16 @@
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: breaking
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: hostmetricsreceiver
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: "remove direction feature gate"
+
+# One or more tracking issues related to the change
+issues: [14959]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
diff --git a/receiver/hostmetricsreceiver/README.md b/receiver/hostmetricsreceiver/README.md
index 6d7b56be9206..26092b25a31a 100644
--- a/receiver/hostmetricsreceiver/README.md
+++ b/receiver/hostmetricsreceiver/README.md
@@ -141,23 +141,6 @@ service:
receivers: [hostmetrics, hostmetrics/disk]
```
-### Feature gate configurations
-
-#### Transition from metrics with "direction" attribute
-
-The proposal to change metrics from being reported with a `direction` attribute has been reverted in the specification. As a result, the
-following feature gates will be removed in v0.62.0:
-
-- **receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute**
-- **receiver.hostmetricsreceiver.emitMetricsWithDirectionAttribute**
-
-For additional information, see https://github.com/open-telemetry/opentelemetry-specification/issues/2726.
-
-##### More information:
-
-- https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/11815
-- https://github.com/open-telemetry/opentelemetry-specification/pull/2617
-
[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta
[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib
[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol
diff --git a/receiver/hostmetricsreceiver/factory.go b/receiver/hostmetricsreceiver/factory.go
index baaf84026162..d47e8f9f77e1 100644
--- a/receiver/hostmetricsreceiver/factory.go
+++ b/receiver/hostmetricsreceiver/factory.go
@@ -21,9 +21,7 @@ import (
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config"
"go.opentelemetry.io/collector/consumer"
- "go.opentelemetry.io/collector/featuregate"
"go.opentelemetry.io/collector/receiver/scraperhelper"
- "go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/cpuscraper"
@@ -103,12 +101,6 @@ func createMetricsReceiver(
)
}
-func logDeprecatedFeatureGateForDirection(log *zap.Logger, gateID string) {
- log.Warn("WARNING: The " + gateID + " feature gate is deprecated and will be removed in the next release. The change to remove " +
- "the direction attribute has been reverted in the specification. See https://github.com/open-telemetry/opentelemetry-specification/issues/2726 " +
- "for additional details.")
-}
-
func createAddScraperOptions(
ctx context.Context,
set component.ReceiverCreateSettings,
@@ -131,13 +123,6 @@ func createAddScraperOptions(
return nil, fmt.Errorf("host metrics scraper factory not found for key: %q", key)
}
- if !featuregate.GetRegistry().IsEnabled(internal.EmitMetricsWithDirectionAttributeFeatureGateID) {
- logDeprecatedFeatureGateForDirection(set.Logger, internal.EmitMetricsWithDirectionAttributeFeatureGateID)
- }
- if featuregate.GetRegistry().IsEnabled(internal.EmitMetricsWithoutDirectionAttributeFeatureGateID) {
- logDeprecatedFeatureGateForDirection(set.Logger, internal.EmitMetricsWithoutDirectionAttributeFeatureGateID)
- }
-
return scraperControllerOptions, nil
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper.go b/receiver/hostmetricsreceiver/internal/scraper.go
index 768ef5078318..5ef990f50545 100644
--- a/receiver/hostmetricsreceiver/internal/scraper.go
+++ b/receiver/hostmetricsreceiver/internal/scraper.go
@@ -18,42 +18,9 @@ import (
"context"
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/collector/featuregate"
"go.opentelemetry.io/collector/receiver/scraperhelper"
)
-const (
- EmitMetricsWithDirectionAttributeFeatureGateID = "receiver.hostmetricsreceiver.emitMetricsWithDirectionAttribute"
- EmitMetricsWithoutDirectionAttributeFeatureGateID = "receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute"
-)
-
-var (
- emitMetricsWithDirectionAttributeFeatureGate = featuregate.Gate{
- ID: EmitMetricsWithDirectionAttributeFeatureGateID,
- Enabled: true,
- Description: "Some process host metrics reported are transitioning from being reported with a direction " +
- "attribute to being reported with the direction included in the metric name to adhere to the " +
- "OpenTelemetry specification. This feature gate controls emitting the old metrics with the direction " +
- "attribute. For more details, see: " +
- "https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/hostmetricsreceiver/README.md#feature-gate-configurations",
- }
-
- emitMetricsWithoutDirectionAttributeFeatureGate = featuregate.Gate{
- ID: EmitMetricsWithoutDirectionAttributeFeatureGateID,
- Enabled: false,
- Description: "Some process host metrics reported are transitioning from being reported with a direction " +
- "attribute to being reported with the direction included in the metric name to adhere to the " +
- "OpenTelemetry specification. This feature gate controls emitting the new metrics without the direction " +
- "attribute. For more details, see: " +
- "https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/hostmetricsreceiver/README.md#feature-gate-configurations",
- }
-)
-
-func init() {
- featuregate.GetRegistry().MustRegister(emitMetricsWithDirectionAttributeFeatureGate)
- featuregate.GetRegistry().MustRegister(emitMetricsWithoutDirectionAttributeFeatureGate)
-}
-
// ScraperFactory can create a MetricScraper.
type ScraperFactory interface {
// CreateDefaultConfig creates the default configuration for the Scraper.
diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go
index e269595c7918..56861d68d2e5 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go
@@ -25,13 +25,11 @@ import (
"github.com/shirou/gopsutil/v3/disk"
"github.com/shirou/gopsutil/v3/host"
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/collector/featuregate"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver/scrapererror"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset"
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata"
)
@@ -50,17 +48,13 @@ type scraper struct {
excludeFS filterset.FilterSet
// for mocking
- bootTime func() (uint64, error)
- ioCounters func(names ...string) (map[string]disk.IOCountersStat, error)
- emitMetricsWithDirectionAttribute bool
- emitMetricsWithoutDirectionAttribute bool
+ bootTime func() (uint64, error)
+ ioCounters func(names ...string) (map[string]disk.IOCountersStat, error)
}
// newDiskScraper creates a Disk Scraper
func newDiskScraper(_ context.Context, settings component.ReceiverCreateSettings, cfg *Config) (*scraper, error) {
scraper := &scraper{settings: settings, config: cfg, bootTime: host.BootTime, ioCounters: disk.IOCounters}
- scraper.emitMetricsWithDirectionAttribute = featuregate.GetRegistry().IsEnabled(internal.EmitMetricsWithDirectionAttributeFeatureGateID)
- scraper.emitMetricsWithoutDirectionAttribute = featuregate.GetRegistry().IsEnabled(internal.EmitMetricsWithoutDirectionAttributeFeatureGateID)
var err error
@@ -116,27 +110,15 @@ func (s *scraper) scrape(_ context.Context) (pmetric.Metrics, error) {
func (s *scraper) recordDiskIOMetric(now pcommon.Timestamp, ioCounters map[string]disk.IOCountersStat) {
for device, ioCounter := range ioCounters {
- if s.emitMetricsWithDirectionAttribute {
- s.mb.RecordSystemDiskIoDataPoint(now, int64(ioCounter.ReadBytes), device, metadata.AttributeDirectionRead)
- s.mb.RecordSystemDiskIoDataPoint(now, int64(ioCounter.WriteBytes), device, metadata.AttributeDirectionWrite)
- }
- if s.emitMetricsWithoutDirectionAttribute {
- s.mb.RecordSystemDiskIoReadDataPoint(now, int64(ioCounter.ReadBytes), device)
- s.mb.RecordSystemDiskIoWriteDataPoint(now, int64(ioCounter.WriteBytes), device)
- }
+ s.mb.RecordSystemDiskIoDataPoint(now, int64(ioCounter.ReadBytes), device, metadata.AttributeDirectionRead)
+ s.mb.RecordSystemDiskIoDataPoint(now, int64(ioCounter.WriteBytes), device, metadata.AttributeDirectionWrite)
}
}
func (s *scraper) recordDiskOperationsMetric(now pcommon.Timestamp, ioCounters map[string]disk.IOCountersStat) {
for device, ioCounter := range ioCounters {
- if s.emitMetricsWithDirectionAttribute {
- s.mb.RecordSystemDiskOperationsDataPoint(now, int64(ioCounter.ReadCount), device, metadata.AttributeDirectionRead)
- s.mb.RecordSystemDiskOperationsDataPoint(now, int64(ioCounter.WriteCount), device, metadata.AttributeDirectionWrite)
- }
- if s.emitMetricsWithoutDirectionAttribute {
- s.mb.RecordSystemDiskOperationsReadDataPoint(now, int64(ioCounter.ReadCount), device)
- s.mb.RecordSystemDiskOperationsWriteDataPoint(now, int64(ioCounter.WriteCount), device)
- }
+ s.mb.RecordSystemDiskOperationsDataPoint(now, int64(ioCounter.ReadCount), device, metadata.AttributeDirectionRead)
+ s.mb.RecordSystemDiskOperationsDataPoint(now, int64(ioCounter.WriteCount), device, metadata.AttributeDirectionWrite)
}
}
@@ -148,14 +130,8 @@ func (s *scraper) recordDiskIOTimeMetric(now pcommon.Timestamp, ioCounters map[s
func (s *scraper) recordDiskOperationTimeMetric(now pcommon.Timestamp, ioCounters map[string]disk.IOCountersStat) {
for device, ioCounter := range ioCounters {
- if s.emitMetricsWithDirectionAttribute {
- s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(ioCounter.ReadTime)/1e3, device, metadata.AttributeDirectionRead)
- s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(ioCounter.WriteTime)/1e3, device, metadata.AttributeDirectionWrite)
- }
- if s.emitMetricsWithoutDirectionAttribute {
- s.mb.RecordSystemDiskOperationTimeReadDataPoint(now, float64(ioCounter.ReadTime)/1e3, device)
- s.mb.RecordSystemDiskOperationTimeWriteDataPoint(now, float64(ioCounter.WriteTime)/1e3, device)
- }
+ s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(ioCounter.ReadTime)/1e3, device, metadata.AttributeDirectionRead)
+ s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(ioCounter.WriteTime)/1e3, device, metadata.AttributeDirectionWrite)
}
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go
index 11af019d0a8d..501238ae5451 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go
@@ -39,13 +39,7 @@ func (s *scraper) recordDiskWeightedIOTimeMetric(now pcommon.Timestamp, ioCounte
func (s *scraper) recordDiskMergedMetric(now pcommon.Timestamp, ioCounters map[string]disk.IOCountersStat) {
for device, ioCounter := range ioCounters {
- if s.emitMetricsWithDirectionAttribute {
- s.mb.RecordSystemDiskMergedDataPoint(now, int64(ioCounter.MergedReadCount), device, metadata.AttributeDirectionRead)
- s.mb.RecordSystemDiskMergedDataPoint(now, int64(ioCounter.MergedWriteCount), device, metadata.AttributeDirectionWrite)
- }
- if s.emitMetricsWithoutDirectionAttribute {
- s.mb.RecordSystemDiskMergedReadDataPoint(now, int64(ioCounter.MergedReadCount), device)
- s.mb.RecordSystemDiskMergedWriteDataPoint(now, int64(ioCounter.MergedWriteCount), device)
- }
+ s.mb.RecordSystemDiskMergedDataPoint(now, int64(ioCounter.MergedReadCount), device, metadata.AttributeDirectionRead)
+ s.mb.RecordSystemDiskMergedDataPoint(now, int64(ioCounter.MergedWriteCount), device, metadata.AttributeDirectionWrite)
}
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go
index b43159c84366..505daa522696 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go
@@ -17,7 +17,6 @@ package diskscraper
import (
"context"
"errors"
- "runtime"
"testing"
"github.com/stretchr/testify/assert"
@@ -43,26 +42,12 @@ func TestScrape(t *testing.T) {
mutateScraper func(*scraper)
}
- metricsWithDirection := 3
- if runtime.GOOS == "linux" {
- metricsWithDirection++
- }
-
testCases := []testCase{
{
name: "Standard",
config: Config{Metrics: metadata.DefaultMetricsSettings()},
expectMetrics: metricsLen,
},
- {
- name: "With direction removed",
- config: Config{Metrics: metadata.DefaultMetricsSettings()},
- expectMetrics: metricsLen + metricsWithDirection,
- mutateScraper: func(s *scraper) {
- s.emitMetricsWithDirectionAttribute = false
- s.emitMetricsWithoutDirectionAttribute = true
- },
- },
{
name: "Validate Start Time",
config: Config{Metrics: metadata.DefaultMetricsSettings()},
@@ -154,32 +139,16 @@ func TestScrape(t *testing.T) {
switch metric.Name() {
case "system.disk.io":
assertInt64DiskMetricValid(t, metric, true, test.expectedStartTime)
- case "system.disk.io.read":
- assertInt64DiskMetricValid(t, metric, false, test.expectedStartTime)
- case "system.disk.io.write":
- assertInt64DiskMetricValid(t, metric, false, test.expectedStartTime)
case "system.disk.io_time":
assertDoubleDiskMetricValid(t, metric, false, test.expectedStartTime)
case "system.disk.operation_time":
assertDoubleDiskMetricValid(t, metric, true, test.expectedStartTime)
- case "system.disk.operation_time.read":
- assertDoubleDiskMetricValid(t, metric, false, test.expectedStartTime)
- case "system.disk.operation_time.write":
- assertDoubleDiskMetricValid(t, metric, false, test.expectedStartTime)
case "system.disk.operations":
assertInt64DiskMetricValid(t, metric, true, test.expectedStartTime)
- case "system.disk.operations.read":
- assertInt64DiskMetricValid(t, metric, false, test.expectedStartTime)
- case "system.disk.operations.write":
- assertInt64DiskMetricValid(t, metric, false, test.expectedStartTime)
case "system.disk.weighted.io.time":
assertDoubleDiskMetricValid(t, metric, false, test.expectedStartTime)
case "system.disk.merged":
assertInt64DiskMetricValid(t, metric, true, test.expectedStartTime)
- case "system.disk.merged.read":
- assertInt64DiskMetricValid(t, metric, false, test.expectedStartTime)
- case "system.disk.merged.write":
- assertInt64DiskMetricValid(t, metric, false, test.expectedStartTime)
case "system.disk.pending_operations":
assertDiskPendingOperationsMetricValid(t, metric)
case "system.disk.weighted_io_time":
diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go
index 99e259ad76e0..7abdf9b215a0 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go
@@ -21,14 +21,12 @@ import (
"github.com/shirou/gopsutil/v3/host"
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/collector/featuregate"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver/scrapererror"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset"
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/perfcounters"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata"
)
@@ -65,16 +63,12 @@ type scraper struct {
skipScrape bool
// for mocking
- bootTime func() (uint64, error)
- emitMetricsWithDirectionAttribute bool
- emitMetricsWithoutDirectionAttribute bool
+ bootTime func() (uint64, error)
}
// newDiskScraper creates a Disk Scraper
func newDiskScraper(_ context.Context, settings component.ReceiverCreateSettings, cfg *Config) (*scraper, error) {
scraper := &scraper{settings: settings, config: cfg, perfCounterScraper: &perfcounters.PerfLibScraper{}, bootTime: host.BootTime}
- scraper.emitMetricsWithDirectionAttribute = featuregate.GetRegistry().IsEnabled(internal.EmitMetricsWithDirectionAttributeFeatureGateID)
- scraper.emitMetricsWithoutDirectionAttribute = featuregate.GetRegistry().IsEnabled(internal.EmitMetricsWithoutDirectionAttributeFeatureGateID)
var err error
@@ -150,27 +144,15 @@ func (s *scraper) scrape(ctx context.Context) (pmetric.Metrics, error) {
func (s *scraper) recordDiskIOMetric(now pcommon.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) {
for _, logicalDiskCounter := range logicalDiskCounterValues {
- if s.emitMetricsWithDirectionAttribute {
- s.mb.RecordSystemDiskIoDataPoint(now, logicalDiskCounter.Values[readBytesPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirectionRead)
- s.mb.RecordSystemDiskIoDataPoint(now, logicalDiskCounter.Values[writeBytesPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirectionWrite)
- }
- if s.emitMetricsWithoutDirectionAttribute {
- s.mb.RecordSystemDiskIoReadDataPoint(now, logicalDiskCounter.Values[readBytesPerSec], logicalDiskCounter.InstanceName)
- s.mb.RecordSystemDiskIoWriteDataPoint(now, logicalDiskCounter.Values[writeBytesPerSec], logicalDiskCounter.InstanceName)
- }
+ s.mb.RecordSystemDiskIoDataPoint(now, logicalDiskCounter.Values[readBytesPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirectionRead)
+ s.mb.RecordSystemDiskIoDataPoint(now, logicalDiskCounter.Values[writeBytesPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirectionWrite)
}
}
func (s *scraper) recordDiskOperationsMetric(now pcommon.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) {
for _, logicalDiskCounter := range logicalDiskCounterValues {
- if s.emitMetricsWithDirectionAttribute {
- s.mb.RecordSystemDiskOperationsDataPoint(now, logicalDiskCounter.Values[readsPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirectionRead)
- s.mb.RecordSystemDiskOperationsDataPoint(now, logicalDiskCounter.Values[writesPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirectionWrite)
- }
- if s.emitMetricsWithoutDirectionAttribute {
- s.mb.RecordSystemDiskOperationsReadDataPoint(now, logicalDiskCounter.Values[readsPerSec], logicalDiskCounter.InstanceName)
- s.mb.RecordSystemDiskOperationsWriteDataPoint(now, logicalDiskCounter.Values[writesPerSec], logicalDiskCounter.InstanceName)
- }
+ s.mb.RecordSystemDiskOperationsDataPoint(now, logicalDiskCounter.Values[readsPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirectionRead)
+ s.mb.RecordSystemDiskOperationsDataPoint(now, logicalDiskCounter.Values[writesPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirectionWrite)
}
}
@@ -183,14 +165,8 @@ func (s *scraper) recordDiskIOTimeMetric(now pcommon.Timestamp, logicalDiskCount
func (s *scraper) recordDiskOperationTimeMetric(now pcommon.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) {
for _, logicalDiskCounter := range logicalDiskCounterValues {
- if s.emitMetricsWithDirectionAttribute {
- s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(logicalDiskCounter.Values[avgDiskSecsPerRead])/1e7, logicalDiskCounter.InstanceName, metadata.AttributeDirectionRead)
- s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(logicalDiskCounter.Values[avgDiskSecsPerWrite])/1e7, logicalDiskCounter.InstanceName, metadata.AttributeDirectionWrite)
- }
- if s.emitMetricsWithoutDirectionAttribute {
- s.mb.RecordSystemDiskOperationTimeReadDataPoint(now, float64(logicalDiskCounter.Values[avgDiskSecsPerRead])/1e7, logicalDiskCounter.InstanceName)
- s.mb.RecordSystemDiskOperationTimeWriteDataPoint(now, float64(logicalDiskCounter.Values[avgDiskSecsPerWrite])/1e7, logicalDiskCounter.InstanceName)
- }
+ s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(logicalDiskCounter.Values[avgDiskSecsPerRead])/1e7, logicalDiskCounter.InstanceName, metadata.AttributeDirectionRead)
+ s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(logicalDiskCounter.Values[avgDiskSecsPerWrite])/1e7, logicalDiskCounter.InstanceName, metadata.AttributeDirectionWrite)
}
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/documentation.md
index c1f65aff425c..f7d25a926724 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/documentation.md
+++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/documentation.md
@@ -9,18 +9,10 @@ These are the metrics available for this scraper.
| Name | Description | Unit | Type | Attributes |
| ---- | ----------- | ---- | ---- | ---------- |
| **system.disk.io** | Disk bytes transferred. | By | Sum(Int) |
|
-| **system.disk.io.read** | Disk bytes read. | By | Sum(Int) | |
-| **system.disk.io.write** | Disk bytes written. | By | Sum(Int) | |
| **system.disk.io_time** | Time disk spent activated. On Windows, this is calculated as the inverse of disk idle time. | s | Sum(Double) | |
| **system.disk.merged** | The number of disk reads/writes merged into single physical disk access operations. | {operations} | Sum(Int) | |
-| **system.disk.merged.read** | The number of disk reads merged into single physical disk access operations. | {operations} | Sum(Int) | |
-| **system.disk.merged.write** | The number of disk writes merged into single physical disk access operations. | {operations} | Sum(Int) | |
| **system.disk.operation_time** | Time spent in disk operations. | s | Sum(Double) | |
-| **system.disk.operation_time.read** | Time spent in disk reads. | s | Sum(Double) | |
-| **system.disk.operation_time.write** | Time spent in disk writes. | s | Sum(Double) | |
| **system.disk.operations** | Disk operations count. | {operations} | Sum(Int) | |
-| **system.disk.operations.read** | Disk reads count. | {operations} | Sum(Int) | |
-| **system.disk.operations.write** | Disk writes count. | {operations} | Sum(Int) | |
| **system.disk.pending_operations** | The queue size of pending I/O operations. | {operations} | Sum(Int) | |
| **system.disk.weighted_io_time** | Time disk spent activated multiplied by the queue length. | s | Sum(Double) | |
diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics.go
index 1626f8bfaae2..c49d1e9106c3 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics.go
@@ -18,21 +18,13 @@ type MetricSettings struct {
// MetricsSettings provides settings for hostmetricsreceiver/disk metrics.
type MetricsSettings struct {
- SystemDiskIo MetricSettings `mapstructure:"system.disk.io"`
- SystemDiskIoRead MetricSettings `mapstructure:"system.disk.io.read"`
- SystemDiskIoWrite MetricSettings `mapstructure:"system.disk.io.write"`
- SystemDiskIoTime MetricSettings `mapstructure:"system.disk.io_time"`
- SystemDiskMerged MetricSettings `mapstructure:"system.disk.merged"`
- SystemDiskMergedRead MetricSettings `mapstructure:"system.disk.merged.read"`
- SystemDiskMergedWrite MetricSettings `mapstructure:"system.disk.merged.write"`
- SystemDiskOperationTime MetricSettings `mapstructure:"system.disk.operation_time"`
- SystemDiskOperationTimeRead MetricSettings `mapstructure:"system.disk.operation_time.read"`
- SystemDiskOperationTimeWrite MetricSettings `mapstructure:"system.disk.operation_time.write"`
- SystemDiskOperations MetricSettings `mapstructure:"system.disk.operations"`
- SystemDiskOperationsRead MetricSettings `mapstructure:"system.disk.operations.read"`
- SystemDiskOperationsWrite MetricSettings `mapstructure:"system.disk.operations.write"`
- SystemDiskPendingOperations MetricSettings `mapstructure:"system.disk.pending_operations"`
- SystemDiskWeightedIoTime MetricSettings `mapstructure:"system.disk.weighted_io_time"`
+ SystemDiskIo MetricSettings `mapstructure:"system.disk.io"`
+ SystemDiskIoTime MetricSettings `mapstructure:"system.disk.io_time"`
+ SystemDiskMerged MetricSettings `mapstructure:"system.disk.merged"`
+ SystemDiskOperationTime MetricSettings `mapstructure:"system.disk.operation_time"`
+ SystemDiskOperations MetricSettings `mapstructure:"system.disk.operations"`
+ SystemDiskPendingOperations MetricSettings `mapstructure:"system.disk.pending_operations"`
+ SystemDiskWeightedIoTime MetricSettings `mapstructure:"system.disk.weighted_io_time"`
}
func DefaultMetricsSettings() MetricsSettings {
@@ -40,42 +32,18 @@ func DefaultMetricsSettings() MetricsSettings {
SystemDiskIo: MetricSettings{
Enabled: true,
},
- SystemDiskIoRead: MetricSettings{
- Enabled: true,
- },
- SystemDiskIoWrite: MetricSettings{
- Enabled: true,
- },
SystemDiskIoTime: MetricSettings{
Enabled: true,
},
SystemDiskMerged: MetricSettings{
Enabled: true,
},
- SystemDiskMergedRead: MetricSettings{
- Enabled: true,
- },
- SystemDiskMergedWrite: MetricSettings{
- Enabled: true,
- },
SystemDiskOperationTime: MetricSettings{
Enabled: true,
},
- SystemDiskOperationTimeRead: MetricSettings{
- Enabled: true,
- },
- SystemDiskOperationTimeWrite: MetricSettings{
- Enabled: true,
- },
SystemDiskOperations: MetricSettings{
Enabled: true,
},
- SystemDiskOperationsRead: MetricSettings{
- Enabled: true,
- },
- SystemDiskOperationsWrite: MetricSettings{
- Enabled: true,
- },
SystemDiskPendingOperations: MetricSettings{
Enabled: true,
},
@@ -165,112 +133,6 @@ func newMetricSystemDiskIo(settings MetricSettings) metricSystemDiskIo {
return m
}
-type metricSystemDiskIoRead struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills system.disk.io.read metric with initial data.
-func (m *metricSystemDiskIoRead) init() {
- m.data.SetName("system.disk.io.read")
- m.data.SetDescription("Disk bytes read.")
- m.data.SetUnit("By")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
-}
-
-func (m *metricSystemDiskIoRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- dp.Attributes().PutStr("device", deviceAttributeValue)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricSystemDiskIoRead) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricSystemDiskIoRead) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricSystemDiskIoRead(settings MetricSettings) metricSystemDiskIoRead {
- m := metricSystemDiskIoRead{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
-type metricSystemDiskIoWrite struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills system.disk.io.write metric with initial data.
-func (m *metricSystemDiskIoWrite) init() {
- m.data.SetName("system.disk.io.write")
- m.data.SetDescription("Disk bytes written.")
- m.data.SetUnit("By")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
-}
-
-func (m *metricSystemDiskIoWrite) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- dp.Attributes().PutStr("device", deviceAttributeValue)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricSystemDiskIoWrite) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricSystemDiskIoWrite) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricSystemDiskIoWrite(settings MetricSettings) metricSystemDiskIoWrite {
- m := metricSystemDiskIoWrite{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
type metricSystemDiskIoTime struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -378,112 +240,6 @@ func newMetricSystemDiskMerged(settings MetricSettings) metricSystemDiskMerged {
return m
}
-type metricSystemDiskMergedRead struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills system.disk.merged.read metric with initial data.
-func (m *metricSystemDiskMergedRead) init() {
- m.data.SetName("system.disk.merged.read")
- m.data.SetDescription("The number of disk reads merged into single physical disk access operations.")
- m.data.SetUnit("{operations}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
-}
-
-func (m *metricSystemDiskMergedRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- dp.Attributes().PutStr("device", deviceAttributeValue)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricSystemDiskMergedRead) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricSystemDiskMergedRead) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricSystemDiskMergedRead(settings MetricSettings) metricSystemDiskMergedRead {
- m := metricSystemDiskMergedRead{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
-type metricSystemDiskMergedWrite struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills system.disk.merged.write metric with initial data.
-func (m *metricSystemDiskMergedWrite) init() {
- m.data.SetName("system.disk.merged.write")
- m.data.SetDescription("The number of disk writes merged into single physical disk access operations.")
- m.data.SetUnit("{operations}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
-}
-
-func (m *metricSystemDiskMergedWrite) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- dp.Attributes().PutStr("device", deviceAttributeValue)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricSystemDiskMergedWrite) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricSystemDiskMergedWrite) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricSystemDiskMergedWrite(settings MetricSettings) metricSystemDiskMergedWrite {
- m := metricSystemDiskMergedWrite{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
type metricSystemDiskOperationTime struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -538,112 +294,6 @@ func newMetricSystemDiskOperationTime(settings MetricSettings) metricSystemDiskO
return m
}
-type metricSystemDiskOperationTimeRead struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills system.disk.operation_time.read metric with initial data.
-func (m *metricSystemDiskOperationTimeRead) init() {
- m.data.SetName("system.disk.operation_time.read")
- m.data.SetDescription("Time spent in disk reads.")
- m.data.SetUnit("s")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
-}
-
-func (m *metricSystemDiskOperationTimeRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, deviceAttributeValue string) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetDoubleValue(val)
- dp.Attributes().PutStr("device", deviceAttributeValue)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricSystemDiskOperationTimeRead) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricSystemDiskOperationTimeRead) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricSystemDiskOperationTimeRead(settings MetricSettings) metricSystemDiskOperationTimeRead {
- m := metricSystemDiskOperationTimeRead{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
-type metricSystemDiskOperationTimeWrite struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills system.disk.operation_time.write metric with initial data.
-func (m *metricSystemDiskOperationTimeWrite) init() {
- m.data.SetName("system.disk.operation_time.write")
- m.data.SetDescription("Time spent in disk writes.")
- m.data.SetUnit("s")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
-}
-
-func (m *metricSystemDiskOperationTimeWrite) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, deviceAttributeValue string) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetDoubleValue(val)
- dp.Attributes().PutStr("device", deviceAttributeValue)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricSystemDiskOperationTimeWrite) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricSystemDiskOperationTimeWrite) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricSystemDiskOperationTimeWrite(settings MetricSettings) metricSystemDiskOperationTimeWrite {
- m := metricSystemDiskOperationTimeWrite{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
type metricSystemDiskOperations struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -698,112 +348,6 @@ func newMetricSystemDiskOperations(settings MetricSettings) metricSystemDiskOper
return m
}
-type metricSystemDiskOperationsRead struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills system.disk.operations.read metric with initial data.
-func (m *metricSystemDiskOperationsRead) init() {
- m.data.SetName("system.disk.operations.read")
- m.data.SetDescription("Disk reads count.")
- m.data.SetUnit("{operations}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
-}
-
-func (m *metricSystemDiskOperationsRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- dp.Attributes().PutStr("device", deviceAttributeValue)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricSystemDiskOperationsRead) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricSystemDiskOperationsRead) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricSystemDiskOperationsRead(settings MetricSettings) metricSystemDiskOperationsRead {
- m := metricSystemDiskOperationsRead{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
-type metricSystemDiskOperationsWrite struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills system.disk.operations.write metric with initial data.
-func (m *metricSystemDiskOperationsWrite) init() {
- m.data.SetName("system.disk.operations.write")
- m.data.SetDescription("Disk writes count.")
- m.data.SetUnit("{operations}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
-}
-
-func (m *metricSystemDiskOperationsWrite) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- dp.Attributes().PutStr("device", deviceAttributeValue)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricSystemDiskOperationsWrite) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricSystemDiskOperationsWrite) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricSystemDiskOperationsWrite(settings MetricSettings) metricSystemDiskOperationsWrite {
- m := metricSystemDiskOperationsWrite{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
type metricSystemDiskPendingOperations struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -913,26 +457,18 @@ func newMetricSystemDiskWeightedIoTime(settings MetricSettings) metricSystemDisk
// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
// required to produce metric representation defined in metadata and user settings.
type MetricsBuilder struct {
- startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
- metricsCapacity int // maximum observed number of metrics per resource.
- resourceCapacity int // maximum observed number of resource attributes.
- metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
- buildInfo component.BuildInfo // contains version information
- metricSystemDiskIo metricSystemDiskIo
- metricSystemDiskIoRead metricSystemDiskIoRead
- metricSystemDiskIoWrite metricSystemDiskIoWrite
- metricSystemDiskIoTime metricSystemDiskIoTime
- metricSystemDiskMerged metricSystemDiskMerged
- metricSystemDiskMergedRead metricSystemDiskMergedRead
- metricSystemDiskMergedWrite metricSystemDiskMergedWrite
- metricSystemDiskOperationTime metricSystemDiskOperationTime
- metricSystemDiskOperationTimeRead metricSystemDiskOperationTimeRead
- metricSystemDiskOperationTimeWrite metricSystemDiskOperationTimeWrite
- metricSystemDiskOperations metricSystemDiskOperations
- metricSystemDiskOperationsRead metricSystemDiskOperationsRead
- metricSystemDiskOperationsWrite metricSystemDiskOperationsWrite
- metricSystemDiskPendingOperations metricSystemDiskPendingOperations
- metricSystemDiskWeightedIoTime metricSystemDiskWeightedIoTime
+ startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
+ metricsCapacity int // maximum observed number of metrics per resource.
+ resourceCapacity int // maximum observed number of resource attributes.
+ metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
+ buildInfo component.BuildInfo // contains version information
+ metricSystemDiskIo metricSystemDiskIo
+ metricSystemDiskIoTime metricSystemDiskIoTime
+ metricSystemDiskMerged metricSystemDiskMerged
+ metricSystemDiskOperationTime metricSystemDiskOperationTime
+ metricSystemDiskOperations metricSystemDiskOperations
+ metricSystemDiskPendingOperations metricSystemDiskPendingOperations
+ metricSystemDiskWeightedIoTime metricSystemDiskWeightedIoTime
}
// metricBuilderOption applies changes to default metrics builder.
@@ -947,24 +483,16 @@ func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption {
func NewMetricsBuilder(settings MetricsSettings, buildInfo component.BuildInfo, options ...metricBuilderOption) *MetricsBuilder {
mb := &MetricsBuilder{
- startTime: pcommon.NewTimestampFromTime(time.Now()),
- metricsBuffer: pmetric.NewMetrics(),
- buildInfo: buildInfo,
- metricSystemDiskIo: newMetricSystemDiskIo(settings.SystemDiskIo),
- metricSystemDiskIoRead: newMetricSystemDiskIoRead(settings.SystemDiskIoRead),
- metricSystemDiskIoWrite: newMetricSystemDiskIoWrite(settings.SystemDiskIoWrite),
- metricSystemDiskIoTime: newMetricSystemDiskIoTime(settings.SystemDiskIoTime),
- metricSystemDiskMerged: newMetricSystemDiskMerged(settings.SystemDiskMerged),
- metricSystemDiskMergedRead: newMetricSystemDiskMergedRead(settings.SystemDiskMergedRead),
- metricSystemDiskMergedWrite: newMetricSystemDiskMergedWrite(settings.SystemDiskMergedWrite),
- metricSystemDiskOperationTime: newMetricSystemDiskOperationTime(settings.SystemDiskOperationTime),
- metricSystemDiskOperationTimeRead: newMetricSystemDiskOperationTimeRead(settings.SystemDiskOperationTimeRead),
- metricSystemDiskOperationTimeWrite: newMetricSystemDiskOperationTimeWrite(settings.SystemDiskOperationTimeWrite),
- metricSystemDiskOperations: newMetricSystemDiskOperations(settings.SystemDiskOperations),
- metricSystemDiskOperationsRead: newMetricSystemDiskOperationsRead(settings.SystemDiskOperationsRead),
- metricSystemDiskOperationsWrite: newMetricSystemDiskOperationsWrite(settings.SystemDiskOperationsWrite),
- metricSystemDiskPendingOperations: newMetricSystemDiskPendingOperations(settings.SystemDiskPendingOperations),
- metricSystemDiskWeightedIoTime: newMetricSystemDiskWeightedIoTime(settings.SystemDiskWeightedIoTime),
+ startTime: pcommon.NewTimestampFromTime(time.Now()),
+ metricsBuffer: pmetric.NewMetrics(),
+ buildInfo: buildInfo,
+ metricSystemDiskIo: newMetricSystemDiskIo(settings.SystemDiskIo),
+ metricSystemDiskIoTime: newMetricSystemDiskIoTime(settings.SystemDiskIoTime),
+ metricSystemDiskMerged: newMetricSystemDiskMerged(settings.SystemDiskMerged),
+ metricSystemDiskOperationTime: newMetricSystemDiskOperationTime(settings.SystemDiskOperationTime),
+ metricSystemDiskOperations: newMetricSystemDiskOperations(settings.SystemDiskOperations),
+ metricSystemDiskPendingOperations: newMetricSystemDiskPendingOperations(settings.SystemDiskPendingOperations),
+ metricSystemDiskWeightedIoTime: newMetricSystemDiskWeightedIoTime(settings.SystemDiskWeightedIoTime),
}
for _, op := range options {
op(mb)
@@ -1019,18 +547,10 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) {
ils.Scope().SetVersion(mb.buildInfo.Version)
ils.Metrics().EnsureCapacity(mb.metricsCapacity)
mb.metricSystemDiskIo.emit(ils.Metrics())
- mb.metricSystemDiskIoRead.emit(ils.Metrics())
- mb.metricSystemDiskIoWrite.emit(ils.Metrics())
mb.metricSystemDiskIoTime.emit(ils.Metrics())
mb.metricSystemDiskMerged.emit(ils.Metrics())
- mb.metricSystemDiskMergedRead.emit(ils.Metrics())
- mb.metricSystemDiskMergedWrite.emit(ils.Metrics())
mb.metricSystemDiskOperationTime.emit(ils.Metrics())
- mb.metricSystemDiskOperationTimeRead.emit(ils.Metrics())
- mb.metricSystemDiskOperationTimeWrite.emit(ils.Metrics())
mb.metricSystemDiskOperations.emit(ils.Metrics())
- mb.metricSystemDiskOperationsRead.emit(ils.Metrics())
- mb.metricSystemDiskOperationsWrite.emit(ils.Metrics())
mb.metricSystemDiskPendingOperations.emit(ils.Metrics())
mb.metricSystemDiskWeightedIoTime.emit(ils.Metrics())
for _, op := range rmo {
@@ -1057,16 +577,6 @@ func (mb *MetricsBuilder) RecordSystemDiskIoDataPoint(ts pcommon.Timestamp, val
mb.metricSystemDiskIo.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue.String())
}
-// RecordSystemDiskIoReadDataPoint adds a data point to system.disk.io.read metric.
-func (mb *MetricsBuilder) RecordSystemDiskIoReadDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- mb.metricSystemDiskIoRead.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue)
-}
-
-// RecordSystemDiskIoWriteDataPoint adds a data point to system.disk.io.write metric.
-func (mb *MetricsBuilder) RecordSystemDiskIoWriteDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- mb.metricSystemDiskIoWrite.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue)
-}
-
// RecordSystemDiskIoTimeDataPoint adds a data point to system.disk.io_time metric.
func (mb *MetricsBuilder) RecordSystemDiskIoTimeDataPoint(ts pcommon.Timestamp, val float64, deviceAttributeValue string) {
mb.metricSystemDiskIoTime.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue)
@@ -1077,46 +587,16 @@ func (mb *MetricsBuilder) RecordSystemDiskMergedDataPoint(ts pcommon.Timestamp,
mb.metricSystemDiskMerged.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue.String())
}
-// RecordSystemDiskMergedReadDataPoint adds a data point to system.disk.merged.read metric.
-func (mb *MetricsBuilder) RecordSystemDiskMergedReadDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- mb.metricSystemDiskMergedRead.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue)
-}
-
-// RecordSystemDiskMergedWriteDataPoint adds a data point to system.disk.merged.write metric.
-func (mb *MetricsBuilder) RecordSystemDiskMergedWriteDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- mb.metricSystemDiskMergedWrite.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue)
-}
-
// RecordSystemDiskOperationTimeDataPoint adds a data point to system.disk.operation_time metric.
func (mb *MetricsBuilder) RecordSystemDiskOperationTimeDataPoint(ts pcommon.Timestamp, val float64, deviceAttributeValue string, directionAttributeValue AttributeDirection) {
mb.metricSystemDiskOperationTime.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue.String())
}
-// RecordSystemDiskOperationTimeReadDataPoint adds a data point to system.disk.operation_time.read metric.
-func (mb *MetricsBuilder) RecordSystemDiskOperationTimeReadDataPoint(ts pcommon.Timestamp, val float64, deviceAttributeValue string) {
- mb.metricSystemDiskOperationTimeRead.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue)
-}
-
-// RecordSystemDiskOperationTimeWriteDataPoint adds a data point to system.disk.operation_time.write metric.
-func (mb *MetricsBuilder) RecordSystemDiskOperationTimeWriteDataPoint(ts pcommon.Timestamp, val float64, deviceAttributeValue string) {
- mb.metricSystemDiskOperationTimeWrite.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue)
-}
-
// RecordSystemDiskOperationsDataPoint adds a data point to system.disk.operations metric.
func (mb *MetricsBuilder) RecordSystemDiskOperationsDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue AttributeDirection) {
mb.metricSystemDiskOperations.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue.String())
}
-// RecordSystemDiskOperationsReadDataPoint adds a data point to system.disk.operations.read metric.
-func (mb *MetricsBuilder) RecordSystemDiskOperationsReadDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- mb.metricSystemDiskOperationsRead.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue)
-}
-
-// RecordSystemDiskOperationsWriteDataPoint adds a data point to system.disk.operations.write metric.
-func (mb *MetricsBuilder) RecordSystemDiskOperationsWriteDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- mb.metricSystemDiskOperationsWrite.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue)
-}
-
// RecordSystemDiskPendingOperationsDataPoint adds a data point to system.disk.pending_operations metric.
func (mb *MetricsBuilder) RecordSystemDiskPendingOperationsDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
mb.metricSystemDiskPendingOperations.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue)
diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/metadata.yaml b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/metadata.yaml
index 304f705d6048..35ab3e872a76 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/metadata.yaml
+++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/metadata.yaml
@@ -11,7 +11,6 @@ attributes:
enum: [read, write]
metrics:
-# produced when receiver.hostmetricsreceiver.emitMetricsWithDirectionAttribute feature gate is enabled
system.disk.io:
enabled: true
description: Disk bytes transferred.
@@ -21,27 +20,6 @@ metrics:
aggregation: cumulative
monotonic: true
attributes: [device, direction]
-# produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- system.disk.io.read:
- enabled: true
- description: Disk bytes read.
- unit: By
- sum:
- value_type: int
- aggregation: cumulative
- monotonic: true
- attributes: [device]
-# produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- system.disk.io.write:
- enabled: true
- description: Disk bytes written.
- unit: By
- sum:
- value_type: int
- aggregation: cumulative
- monotonic: true
- attributes: [device]
-# produced when receiver.hostmetricsreceiver.emitMetricsWithDirectionAttribute feature gate is enabled
system.disk.operations:
enabled: true
description: Disk operations count.
@@ -51,26 +29,6 @@ metrics:
aggregation: cumulative
monotonic: true
attributes: [device, direction]
-# produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- system.disk.operations.read:
- enabled: true
- description: Disk reads count.
- unit: "{operations}"
- sum:
- value_type: int
- aggregation: cumulative
- monotonic: true
- attributes: [device]
-# produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- system.disk.operations.write:
- enabled: true
- description: Disk writes count.
- unit: "{operations}"
- sum:
- value_type: int
- aggregation: cumulative
- monotonic: true
- attributes: [device]
system.disk.io_time:
enabled: true
@@ -81,7 +39,6 @@ metrics:
aggregation: cumulative
monotonic: true
attributes: [device]
-# produced when receiver.hostmetricsreceiver.emitMetricsWithDirectionAttribute feature gate is enabled
system.disk.operation_time:
enabled: true
description: Time spent in disk operations.
@@ -91,27 +48,6 @@ metrics:
aggregation: cumulative
monotonic: true
attributes: [device, direction]
-# produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- system.disk.operation_time.read:
- enabled: true
- description: Time spent in disk reads.
- unit: s
- sum:
- value_type: double
- aggregation: cumulative
- monotonic: true
- attributes: [device]
-# produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- system.disk.operation_time.write:
- enabled: true
- description: Time spent in disk writes.
- unit: s
- sum:
- value_type: double
- aggregation: cumulative
- monotonic: true
- attributes: [device]
-
system.disk.weighted_io_time:
enabled: true
description: Time disk spent activated multiplied by the queue length.
@@ -131,7 +67,6 @@ metrics:
aggregation: cumulative
monotonic: false
attributes: [device]
-# produced when receiver.hostmetricsreceiver.emitMetricsWithDirectionAttribute feature gate is enabled
system.disk.merged:
enabled: true
description: The number of disk reads/writes merged into single physical disk access operations.
@@ -141,23 +76,3 @@ metrics:
aggregation: cumulative
monotonic: true
attributes: [device, direction]
-# produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- system.disk.merged.read:
- enabled: true
- description: The number of disk reads merged into single physical disk access operations.
- unit: "{operations}"
- sum:
- value_type: int
- aggregation: cumulative
- monotonic: true
- attributes: [device]
-# produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- system.disk.merged.write:
- enabled: true
- description: The number of disk writes merged into single physical disk access operations.
- unit: "{operations}"
- sum:
- value_type: int
- aggregation: cumulative
- monotonic: true
- attributes: [device]
diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/documentation.md
index 630d8b735d67..cae9f0432775 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/documentation.md
+++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/documentation.md
@@ -12,17 +12,9 @@ These are the metrics available for this scraper.
| system.network.conntrack.count | The count of entries in conntrack table. | {entries} | Sum(Int) | |
| system.network.conntrack.max | The limit for entries in the conntrack table. | {entries} | Sum(Int) | |
| **system.network.dropped** | The number of packets dropped. (Deprecated) | {packets} | Sum(Int) | |
-| **system.network.dropped.receive** | The number of packets dropped on receive. | {packets} | Sum(Int) | |
-| **system.network.dropped.transmit** | The number of packets dropped on transmit. | {packets} | Sum(Int) | |
| **system.network.errors** | The number of errors encountered. (Deprecated) | {errors} | Sum(Int) | |
-| **system.network.errors.receive** | The number of errors encountered on receive. | {errors} | Sum(Int) | |
-| **system.network.errors.transmit** | The number of errors encountered on transmit. | {errors} | Sum(Int) | |
| **system.network.io** | The number of bytes transmitted and received. (Deprecated) | By | Sum(Int) | |
-| **system.network.io.receive** | The number of bytes received. | By | Sum(Int) | |
-| **system.network.io.transmit** | The number of bytes transmitted. | By | Sum(Int) | |
| **system.network.packets** | The number of packets transferred. (Deprecated) | {packets} | Sum(Int) | |
-| **system.network.packets.receive** | The number of packets received. | {packets} | Sum(Int) | |
-| **system.network.packets.transmit** | The number of packets transmitted. | {packets} | Sum(Int) | |
**Highlighted metrics** are emitted by default. Other metrics are optional and not emitted by default.
Any metric can be enabled or disabled with the following scraper configuration:
diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics.go
index 2caaa91a47af..5f5ccf8ea8e3 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics.go
@@ -18,21 +18,13 @@ type MetricSettings struct {
// MetricsSettings provides settings for hostmetricsreceiver/network metrics.
type MetricsSettings struct {
- SystemNetworkConnections MetricSettings `mapstructure:"system.network.connections"`
- SystemNetworkConntrackCount MetricSettings `mapstructure:"system.network.conntrack.count"`
- SystemNetworkConntrackMax MetricSettings `mapstructure:"system.network.conntrack.max"`
- SystemNetworkDropped MetricSettings `mapstructure:"system.network.dropped"`
- SystemNetworkDroppedReceive MetricSettings `mapstructure:"system.network.dropped.receive"`
- SystemNetworkDroppedTransmit MetricSettings `mapstructure:"system.network.dropped.transmit"`
- SystemNetworkErrors MetricSettings `mapstructure:"system.network.errors"`
- SystemNetworkErrorsReceive MetricSettings `mapstructure:"system.network.errors.receive"`
- SystemNetworkErrorsTransmit MetricSettings `mapstructure:"system.network.errors.transmit"`
- SystemNetworkIo MetricSettings `mapstructure:"system.network.io"`
- SystemNetworkIoReceive MetricSettings `mapstructure:"system.network.io.receive"`
- SystemNetworkIoTransmit MetricSettings `mapstructure:"system.network.io.transmit"`
- SystemNetworkPackets MetricSettings `mapstructure:"system.network.packets"`
- SystemNetworkPacketsReceive MetricSettings `mapstructure:"system.network.packets.receive"`
- SystemNetworkPacketsTransmit MetricSettings `mapstructure:"system.network.packets.transmit"`
+ SystemNetworkConnections MetricSettings `mapstructure:"system.network.connections"`
+ SystemNetworkConntrackCount MetricSettings `mapstructure:"system.network.conntrack.count"`
+ SystemNetworkConntrackMax MetricSettings `mapstructure:"system.network.conntrack.max"`
+ SystemNetworkDropped MetricSettings `mapstructure:"system.network.dropped"`
+ SystemNetworkErrors MetricSettings `mapstructure:"system.network.errors"`
+ SystemNetworkIo MetricSettings `mapstructure:"system.network.io"`
+ SystemNetworkPackets MetricSettings `mapstructure:"system.network.packets"`
}
func DefaultMetricsSettings() MetricsSettings {
@@ -49,39 +41,15 @@ func DefaultMetricsSettings() MetricsSettings {
SystemNetworkDropped: MetricSettings{
Enabled: true,
},
- SystemNetworkDroppedReceive: MetricSettings{
- Enabled: true,
- },
- SystemNetworkDroppedTransmit: MetricSettings{
- Enabled: true,
- },
SystemNetworkErrors: MetricSettings{
Enabled: true,
},
- SystemNetworkErrorsReceive: MetricSettings{
- Enabled: true,
- },
- SystemNetworkErrorsTransmit: MetricSettings{
- Enabled: true,
- },
SystemNetworkIo: MetricSettings{
Enabled: true,
},
- SystemNetworkIoReceive: MetricSettings{
- Enabled: true,
- },
- SystemNetworkIoTransmit: MetricSettings{
- Enabled: true,
- },
SystemNetworkPackets: MetricSettings{
Enabled: true,
},
- SystemNetworkPacketsReceive: MetricSettings{
- Enabled: true,
- },
- SystemNetworkPacketsTransmit: MetricSettings{
- Enabled: true,
- },
}
}
@@ -343,112 +311,6 @@ func newMetricSystemNetworkDropped(settings MetricSettings) metricSystemNetworkD
return m
}
-type metricSystemNetworkDroppedReceive struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills system.network.dropped.receive metric with initial data.
-func (m *metricSystemNetworkDroppedReceive) init() {
- m.data.SetName("system.network.dropped.receive")
- m.data.SetDescription("The number of packets dropped on receive.")
- m.data.SetUnit("{packets}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
-}
-
-func (m *metricSystemNetworkDroppedReceive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- dp.Attributes().PutStr("device", deviceAttributeValue)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricSystemNetworkDroppedReceive) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricSystemNetworkDroppedReceive) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricSystemNetworkDroppedReceive(settings MetricSettings) metricSystemNetworkDroppedReceive {
- m := metricSystemNetworkDroppedReceive{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
-type metricSystemNetworkDroppedTransmit struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills system.network.dropped.transmit metric with initial data.
-func (m *metricSystemNetworkDroppedTransmit) init() {
- m.data.SetName("system.network.dropped.transmit")
- m.data.SetDescription("The number of packets dropped on transmit.")
- m.data.SetUnit("{packets}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
-}
-
-func (m *metricSystemNetworkDroppedTransmit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- dp.Attributes().PutStr("device", deviceAttributeValue)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricSystemNetworkDroppedTransmit) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricSystemNetworkDroppedTransmit) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricSystemNetworkDroppedTransmit(settings MetricSettings) metricSystemNetworkDroppedTransmit {
- m := metricSystemNetworkDroppedTransmit{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
type metricSystemNetworkErrors struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -503,112 +365,6 @@ func newMetricSystemNetworkErrors(settings MetricSettings) metricSystemNetworkEr
return m
}
-type metricSystemNetworkErrorsReceive struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills system.network.errors.receive metric with initial data.
-func (m *metricSystemNetworkErrorsReceive) init() {
- m.data.SetName("system.network.errors.receive")
- m.data.SetDescription("The number of errors encountered on receive.")
- m.data.SetUnit("{errors}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
-}
-
-func (m *metricSystemNetworkErrorsReceive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- dp.Attributes().PutStr("device", deviceAttributeValue)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricSystemNetworkErrorsReceive) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricSystemNetworkErrorsReceive) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricSystemNetworkErrorsReceive(settings MetricSettings) metricSystemNetworkErrorsReceive {
- m := metricSystemNetworkErrorsReceive{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
-type metricSystemNetworkErrorsTransmit struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills system.network.errors.transmit metric with initial data.
-func (m *metricSystemNetworkErrorsTransmit) init() {
- m.data.SetName("system.network.errors.transmit")
- m.data.SetDescription("The number of errors encountered on transmit.")
- m.data.SetUnit("{errors}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
-}
-
-func (m *metricSystemNetworkErrorsTransmit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- dp.Attributes().PutStr("device", deviceAttributeValue)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricSystemNetworkErrorsTransmit) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricSystemNetworkErrorsTransmit) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricSystemNetworkErrorsTransmit(settings MetricSettings) metricSystemNetworkErrorsTransmit {
- m := metricSystemNetworkErrorsTransmit{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
type metricSystemNetworkIo struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -663,112 +419,6 @@ func newMetricSystemNetworkIo(settings MetricSettings) metricSystemNetworkIo {
return m
}
-type metricSystemNetworkIoReceive struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills system.network.io.receive metric with initial data.
-func (m *metricSystemNetworkIoReceive) init() {
- m.data.SetName("system.network.io.receive")
- m.data.SetDescription("The number of bytes received.")
- m.data.SetUnit("By")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
-}
-
-func (m *metricSystemNetworkIoReceive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- dp.Attributes().PutStr("device", deviceAttributeValue)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricSystemNetworkIoReceive) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricSystemNetworkIoReceive) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricSystemNetworkIoReceive(settings MetricSettings) metricSystemNetworkIoReceive {
- m := metricSystemNetworkIoReceive{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
-type metricSystemNetworkIoTransmit struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills system.network.io.transmit metric with initial data.
-func (m *metricSystemNetworkIoTransmit) init() {
- m.data.SetName("system.network.io.transmit")
- m.data.SetDescription("The number of bytes transmitted.")
- m.data.SetUnit("By")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
-}
-
-func (m *metricSystemNetworkIoTransmit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- dp.Attributes().PutStr("device", deviceAttributeValue)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricSystemNetworkIoTransmit) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricSystemNetworkIoTransmit) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricSystemNetworkIoTransmit(settings MetricSettings) metricSystemNetworkIoTransmit {
- m := metricSystemNetworkIoTransmit{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
type metricSystemNetworkPackets struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -823,135 +473,21 @@ func newMetricSystemNetworkPackets(settings MetricSettings) metricSystemNetworkP
return m
}
-type metricSystemNetworkPacketsReceive struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills system.network.packets.receive metric with initial data.
-func (m *metricSystemNetworkPacketsReceive) init() {
- m.data.SetName("system.network.packets.receive")
- m.data.SetDescription("The number of packets received.")
- m.data.SetUnit("{packets}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
-}
-
-func (m *metricSystemNetworkPacketsReceive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- dp.Attributes().PutStr("device", deviceAttributeValue)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricSystemNetworkPacketsReceive) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricSystemNetworkPacketsReceive) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricSystemNetworkPacketsReceive(settings MetricSettings) metricSystemNetworkPacketsReceive {
- m := metricSystemNetworkPacketsReceive{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
-type metricSystemNetworkPacketsTransmit struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills system.network.packets.transmit metric with initial data.
-func (m *metricSystemNetworkPacketsTransmit) init() {
- m.data.SetName("system.network.packets.transmit")
- m.data.SetDescription("The number of packets transmitted.")
- m.data.SetUnit("{packets}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
-}
-
-func (m *metricSystemNetworkPacketsTransmit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- dp.Attributes().PutStr("device", deviceAttributeValue)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricSystemNetworkPacketsTransmit) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricSystemNetworkPacketsTransmit) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricSystemNetworkPacketsTransmit(settings MetricSettings) metricSystemNetworkPacketsTransmit {
- m := metricSystemNetworkPacketsTransmit{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
// required to produce metric representation defined in metadata and user settings.
type MetricsBuilder struct {
- startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
- metricsCapacity int // maximum observed number of metrics per resource.
- resourceCapacity int // maximum observed number of resource attributes.
- metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
- buildInfo component.BuildInfo // contains version information
- metricSystemNetworkConnections metricSystemNetworkConnections
- metricSystemNetworkConntrackCount metricSystemNetworkConntrackCount
- metricSystemNetworkConntrackMax metricSystemNetworkConntrackMax
- metricSystemNetworkDropped metricSystemNetworkDropped
- metricSystemNetworkDroppedReceive metricSystemNetworkDroppedReceive
- metricSystemNetworkDroppedTransmit metricSystemNetworkDroppedTransmit
- metricSystemNetworkErrors metricSystemNetworkErrors
- metricSystemNetworkErrorsReceive metricSystemNetworkErrorsReceive
- metricSystemNetworkErrorsTransmit metricSystemNetworkErrorsTransmit
- metricSystemNetworkIo metricSystemNetworkIo
- metricSystemNetworkIoReceive metricSystemNetworkIoReceive
- metricSystemNetworkIoTransmit metricSystemNetworkIoTransmit
- metricSystemNetworkPackets metricSystemNetworkPackets
- metricSystemNetworkPacketsReceive metricSystemNetworkPacketsReceive
- metricSystemNetworkPacketsTransmit metricSystemNetworkPacketsTransmit
+ startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
+ metricsCapacity int // maximum observed number of metrics per resource.
+ resourceCapacity int // maximum observed number of resource attributes.
+ metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
+ buildInfo component.BuildInfo // contains version information
+ metricSystemNetworkConnections metricSystemNetworkConnections
+ metricSystemNetworkConntrackCount metricSystemNetworkConntrackCount
+ metricSystemNetworkConntrackMax metricSystemNetworkConntrackMax
+ metricSystemNetworkDropped metricSystemNetworkDropped
+ metricSystemNetworkErrors metricSystemNetworkErrors
+ metricSystemNetworkIo metricSystemNetworkIo
+ metricSystemNetworkPackets metricSystemNetworkPackets
}
// metricBuilderOption applies changes to default metrics builder.
@@ -966,24 +502,16 @@ func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption {
func NewMetricsBuilder(settings MetricsSettings, buildInfo component.BuildInfo, options ...metricBuilderOption) *MetricsBuilder {
mb := &MetricsBuilder{
- startTime: pcommon.NewTimestampFromTime(time.Now()),
- metricsBuffer: pmetric.NewMetrics(),
- buildInfo: buildInfo,
- metricSystemNetworkConnections: newMetricSystemNetworkConnections(settings.SystemNetworkConnections),
- metricSystemNetworkConntrackCount: newMetricSystemNetworkConntrackCount(settings.SystemNetworkConntrackCount),
- metricSystemNetworkConntrackMax: newMetricSystemNetworkConntrackMax(settings.SystemNetworkConntrackMax),
- metricSystemNetworkDropped: newMetricSystemNetworkDropped(settings.SystemNetworkDropped),
- metricSystemNetworkDroppedReceive: newMetricSystemNetworkDroppedReceive(settings.SystemNetworkDroppedReceive),
- metricSystemNetworkDroppedTransmit: newMetricSystemNetworkDroppedTransmit(settings.SystemNetworkDroppedTransmit),
- metricSystemNetworkErrors: newMetricSystemNetworkErrors(settings.SystemNetworkErrors),
- metricSystemNetworkErrorsReceive: newMetricSystemNetworkErrorsReceive(settings.SystemNetworkErrorsReceive),
- metricSystemNetworkErrorsTransmit: newMetricSystemNetworkErrorsTransmit(settings.SystemNetworkErrorsTransmit),
- metricSystemNetworkIo: newMetricSystemNetworkIo(settings.SystemNetworkIo),
- metricSystemNetworkIoReceive: newMetricSystemNetworkIoReceive(settings.SystemNetworkIoReceive),
- metricSystemNetworkIoTransmit: newMetricSystemNetworkIoTransmit(settings.SystemNetworkIoTransmit),
- metricSystemNetworkPackets: newMetricSystemNetworkPackets(settings.SystemNetworkPackets),
- metricSystemNetworkPacketsReceive: newMetricSystemNetworkPacketsReceive(settings.SystemNetworkPacketsReceive),
- metricSystemNetworkPacketsTransmit: newMetricSystemNetworkPacketsTransmit(settings.SystemNetworkPacketsTransmit),
+ startTime: pcommon.NewTimestampFromTime(time.Now()),
+ metricsBuffer: pmetric.NewMetrics(),
+ buildInfo: buildInfo,
+ metricSystemNetworkConnections: newMetricSystemNetworkConnections(settings.SystemNetworkConnections),
+ metricSystemNetworkConntrackCount: newMetricSystemNetworkConntrackCount(settings.SystemNetworkConntrackCount),
+ metricSystemNetworkConntrackMax: newMetricSystemNetworkConntrackMax(settings.SystemNetworkConntrackMax),
+ metricSystemNetworkDropped: newMetricSystemNetworkDropped(settings.SystemNetworkDropped),
+ metricSystemNetworkErrors: newMetricSystemNetworkErrors(settings.SystemNetworkErrors),
+ metricSystemNetworkIo: newMetricSystemNetworkIo(settings.SystemNetworkIo),
+ metricSystemNetworkPackets: newMetricSystemNetworkPackets(settings.SystemNetworkPackets),
}
for _, op := range options {
op(mb)
@@ -1041,17 +569,9 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) {
mb.metricSystemNetworkConntrackCount.emit(ils.Metrics())
mb.metricSystemNetworkConntrackMax.emit(ils.Metrics())
mb.metricSystemNetworkDropped.emit(ils.Metrics())
- mb.metricSystemNetworkDroppedReceive.emit(ils.Metrics())
- mb.metricSystemNetworkDroppedTransmit.emit(ils.Metrics())
mb.metricSystemNetworkErrors.emit(ils.Metrics())
- mb.metricSystemNetworkErrorsReceive.emit(ils.Metrics())
- mb.metricSystemNetworkErrorsTransmit.emit(ils.Metrics())
mb.metricSystemNetworkIo.emit(ils.Metrics())
- mb.metricSystemNetworkIoReceive.emit(ils.Metrics())
- mb.metricSystemNetworkIoTransmit.emit(ils.Metrics())
mb.metricSystemNetworkPackets.emit(ils.Metrics())
- mb.metricSystemNetworkPacketsReceive.emit(ils.Metrics())
- mb.metricSystemNetworkPacketsTransmit.emit(ils.Metrics())
for _, op := range rmo {
op(rm)
}
@@ -1091,61 +611,21 @@ func (mb *MetricsBuilder) RecordSystemNetworkDroppedDataPoint(ts pcommon.Timesta
mb.metricSystemNetworkDropped.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue.String())
}
-// RecordSystemNetworkDroppedReceiveDataPoint adds a data point to system.network.dropped.receive metric.
-func (mb *MetricsBuilder) RecordSystemNetworkDroppedReceiveDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- mb.metricSystemNetworkDroppedReceive.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue)
-}
-
-// RecordSystemNetworkDroppedTransmitDataPoint adds a data point to system.network.dropped.transmit metric.
-func (mb *MetricsBuilder) RecordSystemNetworkDroppedTransmitDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- mb.metricSystemNetworkDroppedTransmit.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue)
-}
-
// RecordSystemNetworkErrorsDataPoint adds a data point to system.network.errors metric.
func (mb *MetricsBuilder) RecordSystemNetworkErrorsDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue AttributeDirection) {
mb.metricSystemNetworkErrors.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue.String())
}
-// RecordSystemNetworkErrorsReceiveDataPoint adds a data point to system.network.errors.receive metric.
-func (mb *MetricsBuilder) RecordSystemNetworkErrorsReceiveDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- mb.metricSystemNetworkErrorsReceive.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue)
-}
-
-// RecordSystemNetworkErrorsTransmitDataPoint adds a data point to system.network.errors.transmit metric.
-func (mb *MetricsBuilder) RecordSystemNetworkErrorsTransmitDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- mb.metricSystemNetworkErrorsTransmit.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue)
-}
-
// RecordSystemNetworkIoDataPoint adds a data point to system.network.io metric.
func (mb *MetricsBuilder) RecordSystemNetworkIoDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue AttributeDirection) {
mb.metricSystemNetworkIo.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue.String())
}
-// RecordSystemNetworkIoReceiveDataPoint adds a data point to system.network.io.receive metric.
-func (mb *MetricsBuilder) RecordSystemNetworkIoReceiveDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- mb.metricSystemNetworkIoReceive.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue)
-}
-
-// RecordSystemNetworkIoTransmitDataPoint adds a data point to system.network.io.transmit metric.
-func (mb *MetricsBuilder) RecordSystemNetworkIoTransmitDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- mb.metricSystemNetworkIoTransmit.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue)
-}
-
// RecordSystemNetworkPacketsDataPoint adds a data point to system.network.packets metric.
func (mb *MetricsBuilder) RecordSystemNetworkPacketsDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue AttributeDirection) {
mb.metricSystemNetworkPackets.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue.String())
}
-// RecordSystemNetworkPacketsReceiveDataPoint adds a data point to system.network.packets.receive metric.
-func (mb *MetricsBuilder) RecordSystemNetworkPacketsReceiveDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- mb.metricSystemNetworkPacketsReceive.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue)
-}
-
-// RecordSystemNetworkPacketsTransmitDataPoint adds a data point to system.network.packets.transmit metric.
-func (mb *MetricsBuilder) RecordSystemNetworkPacketsTransmitDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string) {
- mb.metricSystemNetworkPacketsTransmit.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue)
-}
-
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
// and metrics builder should update its startTime and reset it's internal state accordingly.
func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {
diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/metadata.yaml b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/metadata.yaml
index 7dd8ac314e08..18d52d34f3fd 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/metadata.yaml
+++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/metadata.yaml
@@ -5,20 +5,16 @@ sem_conv_version: 1.9.0
attributes:
device:
description: Name of the network interface.
-
direction:
description: Direction of flow of bytes/operations (receive or transmit).
enum: [receive, transmit]
-
protocol:
description: Network protocol, e.g. TCP or UDP.
enum: [tcp]
-
state:
description: State of the network connection.
metrics:
- # produced when receiver.hostmetricsreceiver.emitMetricsWithDirectionAttribute feature gate is enabled
system.network.packets:
enabled: true
description: The number of packets transferred. (Deprecated)
@@ -28,30 +24,6 @@ metrics:
aggregation: cumulative
monotonic: true
attributes: [device, direction]
-
- # produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- system.network.packets.transmit:
- enabled: true
- description: The number of packets transmitted.
- unit: "{packets}"
- sum:
- value_type: int
- aggregation: cumulative
- monotonic: true
- attributes: [device]
-
- # produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- system.network.packets.receive:
- enabled: true
- description: The number of packets received.
- unit: "{packets}"
- sum:
- value_type: int
- aggregation: cumulative
- monotonic: true
- attributes: [device]
-
- # produced when receiver.hostmetricsreceiver.emitMetricsWithDirectionAttribute feature gate is enabled
system.network.dropped:
enabled: true
description: The number of packets dropped. (Deprecated)
@@ -61,30 +33,6 @@ metrics:
aggregation: cumulative
monotonic: true
attributes: [device, direction]
-
- # produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- system.network.dropped.transmit:
- enabled: true
- description: The number of packets dropped on transmit.
- unit: "{packets}"
- sum:
- value_type: int
- aggregation: cumulative
- monotonic: true
- attributes: [device]
-
- # produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- system.network.dropped.receive:
- enabled: true
- description: The number of packets dropped on receive.
- unit: "{packets}"
- sum:
- value_type: int
- aggregation: cumulative
- monotonic: true
- attributes: [device]
-
- # produced when receiver.hostmetricsreceiver.emitMetricsWithDirectionAttribute feature gate is enabled
system.network.errors:
enabled: true
description: The number of errors encountered. (Deprecated)
@@ -94,30 +42,6 @@ metrics:
aggregation: cumulative
monotonic: true
attributes: [device, direction]
-
- # produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- system.network.errors.transmit:
- enabled: true
- description: The number of errors encountered on transmit.
- unit: "{errors}"
- sum:
- value_type: int
- aggregation: cumulative
- monotonic: true
- attributes: [device]
-
- # produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- system.network.errors.receive:
- enabled: true
- description: The number of errors encountered on receive.
- unit: "{errors}"
- sum:
- value_type: int
- aggregation: cumulative
- monotonic: true
- attributes: [device]
-
- # produced when receiver.hostmetricsreceiver.emitMetricsWithDirectionAttribute feature gate is enabled
system.network.io:
enabled: true
description: The number of bytes transmitted and received. (Deprecated)
@@ -127,29 +51,6 @@ metrics:
aggregation: cumulative
monotonic: true
attributes: [device, direction]
-
- # produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- system.network.io.transmit:
- enabled: true
- description: The number of bytes transmitted.
- unit: "By"
- sum:
- value_type: int
- aggregation: cumulative
- monotonic: true
- attributes: [device]
-
- # produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- system.network.io.receive:
- enabled: true
- description: The number of bytes received.
- unit: "By"
- sum:
- value_type: int
- aggregation: cumulative
- monotonic: true
- attributes: [device]
-
system.network.connections:
enabled: true
description: The number of connections.
@@ -159,7 +60,6 @@ metrics:
aggregation: cumulative
monotonic: false
attributes: [protocol, state]
-
system.network.conntrack.count:
enabled: false
description: The count of entries in conntrack table.
@@ -168,7 +68,6 @@ metrics:
value_type: int
aggregation: cumulative
monotonic: false
-
system.network.conntrack.max:
enabled: false
description: The limit for entries in the conntrack table.
diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go
index 5106d26d35b2..7749802366fc 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go
@@ -22,13 +22,11 @@ import (
"github.com/shirou/gopsutil/v3/host"
"github.com/shirou/gopsutil/v3/net"
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/collector/featuregate"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver/scrapererror"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset"
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata"
)
@@ -47,25 +45,21 @@ type scraper struct {
excludeFS filterset.FilterSet
// for mocking
- bootTime func() (uint64, error)
- ioCounters func(bool) ([]net.IOCountersStat, error)
- connections func(string) ([]net.ConnectionStat, error)
- conntrack func() ([]net.FilterStat, error)
- emitMetricsWithDirectionAttribute bool
- emitMetricsWithoutDirectionAttribute bool
+ bootTime func() (uint64, error)
+ ioCounters func(bool) ([]net.IOCountersStat, error)
+ connections func(string) ([]net.ConnectionStat, error)
+ conntrack func() ([]net.FilterStat, error)
}
// newNetworkScraper creates a set of Network related metrics
func newNetworkScraper(_ context.Context, settings component.ReceiverCreateSettings, cfg *Config) (*scraper, error) {
scraper := &scraper{
- settings: settings,
- config: cfg,
- bootTime: host.BootTime,
- ioCounters: net.IOCounters,
- connections: net.Connections,
- conntrack: net.FilterCounters,
- emitMetricsWithDirectionAttribute: featuregate.GetRegistry().IsEnabled(internal.EmitMetricsWithDirectionAttributeFeatureGateID),
- emitMetricsWithoutDirectionAttribute: featuregate.GetRegistry().IsEnabled(internal.EmitMetricsWithoutDirectionAttributeFeatureGateID),
+ settings: settings,
+ config: cfg,
+ bootTime: host.BootTime,
+ ioCounters: net.IOCounters,
+ connections: net.Connections,
+ conntrack: net.FilterCounters,
}
var err error
@@ -143,53 +137,29 @@ func (s *scraper) recordNetworkCounterMetrics() error {
func (s *scraper) recordNetworkPacketsMetric(now pcommon.Timestamp, ioCountersSlice []net.IOCountersStat) {
for _, ioCounters := range ioCountersSlice {
- if s.emitMetricsWithoutDirectionAttribute {
- s.mb.RecordSystemNetworkPacketsTransmitDataPoint(now, int64(ioCounters.PacketsSent), ioCounters.Name)
- s.mb.RecordSystemNetworkPacketsReceiveDataPoint(now, int64(ioCounters.PacketsRecv), ioCounters.Name)
- }
- if s.emitMetricsWithDirectionAttribute {
- s.mb.RecordSystemNetworkPacketsDataPoint(now, int64(ioCounters.PacketsSent), ioCounters.Name, metadata.AttributeDirectionTransmit)
- s.mb.RecordSystemNetworkPacketsDataPoint(now, int64(ioCounters.PacketsRecv), ioCounters.Name, metadata.AttributeDirectionReceive)
- }
+ s.mb.RecordSystemNetworkPacketsDataPoint(now, int64(ioCounters.PacketsSent), ioCounters.Name, metadata.AttributeDirectionTransmit)
+ s.mb.RecordSystemNetworkPacketsDataPoint(now, int64(ioCounters.PacketsRecv), ioCounters.Name, metadata.AttributeDirectionReceive)
}
}
func (s *scraper) recordNetworkDroppedPacketsMetric(now pcommon.Timestamp, ioCountersSlice []net.IOCountersStat) {
for _, ioCounters := range ioCountersSlice {
- if s.emitMetricsWithoutDirectionAttribute {
- s.mb.RecordSystemNetworkDroppedTransmitDataPoint(now, int64(ioCounters.Dropout), ioCounters.Name)
- s.mb.RecordSystemNetworkDroppedReceiveDataPoint(now, int64(ioCounters.Dropin), ioCounters.Name)
- }
- if s.emitMetricsWithDirectionAttribute {
- s.mb.RecordSystemNetworkDroppedDataPoint(now, int64(ioCounters.Dropout), ioCounters.Name, metadata.AttributeDirectionTransmit)
- s.mb.RecordSystemNetworkDroppedDataPoint(now, int64(ioCounters.Dropin), ioCounters.Name, metadata.AttributeDirectionReceive)
- }
+ s.mb.RecordSystemNetworkDroppedDataPoint(now, int64(ioCounters.Dropout), ioCounters.Name, metadata.AttributeDirectionTransmit)
+ s.mb.RecordSystemNetworkDroppedDataPoint(now, int64(ioCounters.Dropin), ioCounters.Name, metadata.AttributeDirectionReceive)
}
}
func (s *scraper) recordNetworkErrorPacketsMetric(now pcommon.Timestamp, ioCountersSlice []net.IOCountersStat) {
for _, ioCounters := range ioCountersSlice {
- if s.emitMetricsWithoutDirectionAttribute {
- s.mb.RecordSystemNetworkErrorsTransmitDataPoint(now, int64(ioCounters.Errout), ioCounters.Name)
- s.mb.RecordSystemNetworkErrorsReceiveDataPoint(now, int64(ioCounters.Errin), ioCounters.Name)
- }
- if s.emitMetricsWithDirectionAttribute {
- s.mb.RecordSystemNetworkErrorsDataPoint(now, int64(ioCounters.Errout), ioCounters.Name, metadata.AttributeDirectionTransmit)
- s.mb.RecordSystemNetworkErrorsDataPoint(now, int64(ioCounters.Errin), ioCounters.Name, metadata.AttributeDirectionReceive)
- }
+ s.mb.RecordSystemNetworkErrorsDataPoint(now, int64(ioCounters.Errout), ioCounters.Name, metadata.AttributeDirectionTransmit)
+ s.mb.RecordSystemNetworkErrorsDataPoint(now, int64(ioCounters.Errin), ioCounters.Name, metadata.AttributeDirectionReceive)
}
}
func (s *scraper) recordNetworkIOMetric(now pcommon.Timestamp, ioCountersSlice []net.IOCountersStat) {
for _, ioCounters := range ioCountersSlice {
- if s.emitMetricsWithoutDirectionAttribute {
- s.mb.RecordSystemNetworkIoTransmitDataPoint(now, int64(ioCounters.BytesSent), ioCounters.Name)
- s.mb.RecordSystemNetworkIoReceiveDataPoint(now, int64(ioCounters.BytesRecv), ioCounters.Name)
- }
- if s.emitMetricsWithDirectionAttribute {
- s.mb.RecordSystemNetworkIoDataPoint(now, int64(ioCounters.BytesSent), ioCounters.Name, metadata.AttributeDirectionTransmit)
- s.mb.RecordSystemNetworkIoDataPoint(now, int64(ioCounters.BytesRecv), ioCounters.Name, metadata.AttributeDirectionReceive)
- }
+ s.mb.RecordSystemNetworkIoDataPoint(now, int64(ioCounters.BytesSent), ioCounters.Name, metadata.AttributeDirectionTransmit)
+ s.mb.RecordSystemNetworkIoDataPoint(now, int64(ioCounters.BytesRecv), ioCounters.Name, metadata.AttributeDirectionReceive)
}
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go
index e0be19213dde..f74b8b2d6951 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go
@@ -34,21 +34,19 @@ import (
func TestScrape(t *testing.T) {
type testCase struct {
- name string
- config Config
- bootTimeFunc func() (uint64, error)
- ioCountersFunc func(bool) ([]net.IOCountersStat, error)
- connectionsFunc func(string) ([]net.ConnectionStat, error)
- conntrackFunc func() ([]net.FilterStat, error)
- expectNetworkMetrics bool
- expectedStartTime pcommon.Timestamp
- newErrRegex string
- initializationErr string
- expectedErr string
- expectedErrCount int
- expectMetricsWithDirectionAttribute bool
- expectMetricsWithoutDirectionAttribute bool
- mutateScraper func(*scraper)
+ name string
+ config Config
+ bootTimeFunc func() (uint64, error)
+ ioCountersFunc func(bool) ([]net.IOCountersStat, error)
+ connectionsFunc func(string) ([]net.ConnectionStat, error)
+ conntrackFunc func() ([]net.FilterStat, error)
+ expectNetworkMetrics bool
+ expectedStartTime pcommon.Timestamp
+ newErrRegex string
+ initializationErr string
+ expectedErr string
+ expectedErrCount int
+ mutateScraper func(*scraper)
}
testCases := []testCase{
@@ -57,31 +55,23 @@ func TestScrape(t *testing.T) {
config: Config{
Metrics: metadata.DefaultMetricsSettings(),
},
- expectNetworkMetrics: true,
- expectMetricsWithDirectionAttribute: true,
+ expectNetworkMetrics: true,
},
{
name: "Standard with direction removed",
config: Config{
Metrics: metadata.DefaultMetricsSettings(),
},
- expectNetworkMetrics: true,
- expectMetricsWithDirectionAttribute: false,
- expectMetricsWithoutDirectionAttribute: true,
- mutateScraper: func(s *scraper) {
- s.emitMetricsWithDirectionAttribute = false
- s.emitMetricsWithoutDirectionAttribute = true
- },
+ expectNetworkMetrics: true,
},
{
name: "Validate Start Time",
config: Config{
Metrics: metadata.DefaultMetricsSettings(),
},
- bootTimeFunc: func() (uint64, error) { return 100, nil },
- expectNetworkMetrics: true,
- expectMetricsWithDirectionAttribute: true,
- expectedStartTime: 100 * 1e9,
+ bootTimeFunc: func() (uint64, error) { return 100, nil },
+ expectNetworkMetrics: true,
+ expectedStartTime: 100 * 1e9,
},
{
name: "Include Filter that matches nothing",
@@ -125,13 +115,12 @@ func TestScrape(t *testing.T) {
expectedErrCount: connectionsMetricsLen,
},
{
- name: "Conntrack error ignorred if metric disabled",
+ name: "Conntrack error ignored if metric disabled",
config: Config{
Metrics: metadata.DefaultMetricsSettings(), // conntrack metrics are disabled by default
},
- conntrackFunc: func() ([]net.FilterStat, error) { return nil, errors.New("conntrack failed") },
- expectNetworkMetrics: true,
- expectMetricsWithDirectionAttribute: true,
+ conntrackFunc: func() ([]net.FilterStat, error) { return nil, errors.New("conntrack failed") },
+ expectNetworkMetrics: true,
},
}
@@ -186,12 +175,7 @@ func TestScrape(t *testing.T) {
expectedMetricCount := 1
if test.expectNetworkMetrics {
- if test.expectMetricsWithoutDirectionAttribute {
- expectedMetricCount += 8
- }
- if test.expectMetricsWithDirectionAttribute {
- expectedMetricCount += 4
- }
+ expectedMetricCount += 4
}
assert.Equal(t, expectedMetricCount, md.MetricCount())
@@ -199,33 +183,12 @@ func TestScrape(t *testing.T) {
idx := 0
assertNetworkConnectionsMetricValid(t, metrics.At(idx))
if test.expectNetworkMetrics {
- if test.expectMetricsWithoutDirectionAttribute {
- assertNetworkIOMetricValid(t, metrics.At(idx+1), "system.network.dropped.receive",
- test.expectedStartTime, true)
- assertNetworkIOMetricValid(t, metrics.At(idx+2), "system.network.dropped.transmit",
- test.expectedStartTime, true)
- assertNetworkIOMetricValid(t, metrics.At(idx+3), "system.network.errors.receive",
- test.expectedStartTime, true)
- assertNetworkIOMetricValid(t, metrics.At(idx+4), "system.network.errors.transmit",
- test.expectedStartTime, true)
- assertNetworkIOMetricValid(t, metrics.At(idx+5), "system.network.io.receive",
- test.expectedStartTime, true)
- assertNetworkIOMetricValid(t, metrics.At(idx+6), "system.network.io.transmit",
- test.expectedStartTime, true)
- assertNetworkIOMetricValid(t, metrics.At(idx+7), "system.network.packets.receive",
- test.expectedStartTime, true)
- assertNetworkIOMetricValid(t, metrics.At(idx+8), "system.network.packets.transmit",
- test.expectedStartTime, true)
- }
- if test.expectMetricsWithDirectionAttribute {
- assertNetworkIOMetricValid(t, metrics.At(idx+1), "system.network.dropped",
- test.expectedStartTime, false)
- assertNetworkIOMetricValid(t, metrics.At(idx+2), "system.network.errors", test.expectedStartTime,
- false)
- assertNetworkIOMetricValid(t, metrics.At(idx+3), "system.network.io", test.expectedStartTime, false)
- assertNetworkIOMetricValid(t, metrics.At(idx+4), "system.network.packets",
- test.expectedStartTime, false)
- }
+ assertNetworkIOMetricValid(t, metrics.At(idx+1), "system.network.dropped",
+ test.expectedStartTime)
+ assertNetworkIOMetricValid(t, metrics.At(idx+2), "system.network.errors", test.expectedStartTime)
+ assertNetworkIOMetricValid(t, metrics.At(idx+3), "system.network.io", test.expectedStartTime)
+ assertNetworkIOMetricValid(t, metrics.At(idx+4), "system.network.packets",
+ test.expectedStartTime)
internal.AssertSameTimeStampForMetrics(t, metrics, 1, 5)
idx += 4
}
@@ -235,20 +198,14 @@ func TestScrape(t *testing.T) {
}
}
-func assertNetworkIOMetricValid(t *testing.T, metric pmetric.Metric, expectedName string, startTime pcommon.Timestamp, expectDirectionRemoved bool) {
+func assertNetworkIOMetricValid(t *testing.T, metric pmetric.Metric, expectedName string, startTime pcommon.Timestamp) {
assert.Equal(t, expectedName, metric.Name())
if startTime != 0 {
internal.AssertSumMetricStartTimeEquals(t, metric, startTime)
}
- if expectDirectionRemoved {
- assert.GreaterOrEqual(t, metric.Sum().DataPoints().Len(), 1)
- } else {
- assert.GreaterOrEqual(t, metric.Sum().DataPoints().Len(), 2)
- }
+ assert.GreaterOrEqual(t, metric.Sum().DataPoints().Len(), 2)
internal.AssertSumMetricHasAttribute(t, metric, 0, "device")
- if !expectDirectionRemoved {
- internal.AssertSumMetricHasAttribute(t, metric, 0, "direction")
- }
+ internal.AssertSumMetricHasAttribute(t, metric, 0, "direction")
}
func assertNetworkConnectionsMetricValid(t *testing.T, metric pmetric.Metric) {
diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/documentation.md
index f439f3b50d93..7cbcde3058e9 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/documentation.md
+++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/documentation.md
@@ -10,8 +10,6 @@ These are the metrics available for this scraper.
| ---- | ----------- | ---- | ---- | ---------- |
| **system.paging.faults** | The number of page faults. | {faults} | Sum(Int) | |
| **system.paging.operations** | The number of paging operations. | {operations} | Sum(Int) | |
-| **system.paging.operations.page_in** | The number of page_in operations. | {operations} | Sum(Int) | |
-| **system.paging.operations.page_out** | The number of page_out operations. | {operations} | Sum(Int) | |
| **system.paging.usage** | Swap (unix) or pagefile (windows) usage. | By | Sum(Int) | |
| system.paging.utilization | Swap (unix) or pagefile (windows) utilization. | 1 | Gauge(Double) | |
diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics.go
index 72f57b68e2d4..37a2fde9cb92 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics.go
@@ -18,12 +18,10 @@ type MetricSettings struct {
// MetricsSettings provides settings for hostmetricsreceiver/paging metrics.
type MetricsSettings struct {
- SystemPagingFaults MetricSettings `mapstructure:"system.paging.faults"`
- SystemPagingOperations MetricSettings `mapstructure:"system.paging.operations"`
- SystemPagingOperationsPageIn MetricSettings `mapstructure:"system.paging.operations.page_in"`
- SystemPagingOperationsPageOut MetricSettings `mapstructure:"system.paging.operations.page_out"`
- SystemPagingUsage MetricSettings `mapstructure:"system.paging.usage"`
- SystemPagingUtilization MetricSettings `mapstructure:"system.paging.utilization"`
+ SystemPagingFaults MetricSettings `mapstructure:"system.paging.faults"`
+ SystemPagingOperations MetricSettings `mapstructure:"system.paging.operations"`
+ SystemPagingUsage MetricSettings `mapstructure:"system.paging.usage"`
+ SystemPagingUtilization MetricSettings `mapstructure:"system.paging.utilization"`
}
func DefaultMetricsSettings() MetricsSettings {
@@ -34,12 +32,6 @@ func DefaultMetricsSettings() MetricsSettings {
SystemPagingOperations: MetricSettings{
Enabled: true,
},
- SystemPagingOperationsPageIn: MetricSettings{
- Enabled: true,
- },
- SystemPagingOperationsPageOut: MetricSettings{
- Enabled: true,
- },
SystemPagingUsage: MetricSettings{
Enabled: true,
},
@@ -238,112 +230,6 @@ func newMetricSystemPagingOperations(settings MetricSettings) metricSystemPaging
return m
}
-type metricSystemPagingOperationsPageIn struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills system.paging.operations.page_in metric with initial data.
-func (m *metricSystemPagingOperationsPageIn) init() {
- m.data.SetName("system.paging.operations.page_in")
- m.data.SetDescription("The number of page_in operations.")
- m.data.SetUnit("{operations}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
-}
-
-func (m *metricSystemPagingOperationsPageIn) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, typeAttributeValue string) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- dp.Attributes().PutStr("type", typeAttributeValue)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricSystemPagingOperationsPageIn) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricSystemPagingOperationsPageIn) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricSystemPagingOperationsPageIn(settings MetricSettings) metricSystemPagingOperationsPageIn {
- m := metricSystemPagingOperationsPageIn{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
-type metricSystemPagingOperationsPageOut struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills system.paging.operations.page_out metric with initial data.
-func (m *metricSystemPagingOperationsPageOut) init() {
- m.data.SetName("system.paging.operations.page_out")
- m.data.SetDescription("The number of page_out operations.")
- m.data.SetUnit("{operations}")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
- m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
-}
-
-func (m *metricSystemPagingOperationsPageOut) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, typeAttributeValue string) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
- dp.Attributes().PutStr("type", typeAttributeValue)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricSystemPagingOperationsPageOut) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricSystemPagingOperationsPageOut) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricSystemPagingOperationsPageOut(settings MetricSettings) metricSystemPagingOperationsPageOut {
- m := metricSystemPagingOperationsPageOut{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
type metricSystemPagingUsage struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -453,17 +339,15 @@ func newMetricSystemPagingUtilization(settings MetricSettings) metricSystemPagin
// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
// required to produce metric representation defined in metadata and user settings.
type MetricsBuilder struct {
- startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
- metricsCapacity int // maximum observed number of metrics per resource.
- resourceCapacity int // maximum observed number of resource attributes.
- metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
- buildInfo component.BuildInfo // contains version information
- metricSystemPagingFaults metricSystemPagingFaults
- metricSystemPagingOperations metricSystemPagingOperations
- metricSystemPagingOperationsPageIn metricSystemPagingOperationsPageIn
- metricSystemPagingOperationsPageOut metricSystemPagingOperationsPageOut
- metricSystemPagingUsage metricSystemPagingUsage
- metricSystemPagingUtilization metricSystemPagingUtilization
+ startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
+ metricsCapacity int // maximum observed number of metrics per resource.
+ resourceCapacity int // maximum observed number of resource attributes.
+ metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
+ buildInfo component.BuildInfo // contains version information
+ metricSystemPagingFaults metricSystemPagingFaults
+ metricSystemPagingOperations metricSystemPagingOperations
+ metricSystemPagingUsage metricSystemPagingUsage
+ metricSystemPagingUtilization metricSystemPagingUtilization
}
// metricBuilderOption applies changes to default metrics builder.
@@ -478,15 +362,13 @@ func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption {
func NewMetricsBuilder(settings MetricsSettings, buildInfo component.BuildInfo, options ...metricBuilderOption) *MetricsBuilder {
mb := &MetricsBuilder{
- startTime: pcommon.NewTimestampFromTime(time.Now()),
- metricsBuffer: pmetric.NewMetrics(),
- buildInfo: buildInfo,
- metricSystemPagingFaults: newMetricSystemPagingFaults(settings.SystemPagingFaults),
- metricSystemPagingOperations: newMetricSystemPagingOperations(settings.SystemPagingOperations),
- metricSystemPagingOperationsPageIn: newMetricSystemPagingOperationsPageIn(settings.SystemPagingOperationsPageIn),
- metricSystemPagingOperationsPageOut: newMetricSystemPagingOperationsPageOut(settings.SystemPagingOperationsPageOut),
- metricSystemPagingUsage: newMetricSystemPagingUsage(settings.SystemPagingUsage),
- metricSystemPagingUtilization: newMetricSystemPagingUtilization(settings.SystemPagingUtilization),
+ startTime: pcommon.NewTimestampFromTime(time.Now()),
+ metricsBuffer: pmetric.NewMetrics(),
+ buildInfo: buildInfo,
+ metricSystemPagingFaults: newMetricSystemPagingFaults(settings.SystemPagingFaults),
+ metricSystemPagingOperations: newMetricSystemPagingOperations(settings.SystemPagingOperations),
+ metricSystemPagingUsage: newMetricSystemPagingUsage(settings.SystemPagingUsage),
+ metricSystemPagingUtilization: newMetricSystemPagingUtilization(settings.SystemPagingUtilization),
}
for _, op := range options {
op(mb)
@@ -542,8 +424,6 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) {
ils.Metrics().EnsureCapacity(mb.metricsCapacity)
mb.metricSystemPagingFaults.emit(ils.Metrics())
mb.metricSystemPagingOperations.emit(ils.Metrics())
- mb.metricSystemPagingOperationsPageIn.emit(ils.Metrics())
- mb.metricSystemPagingOperationsPageOut.emit(ils.Metrics())
mb.metricSystemPagingUsage.emit(ils.Metrics())
mb.metricSystemPagingUtilization.emit(ils.Metrics())
for _, op := range rmo {
@@ -575,16 +455,6 @@ func (mb *MetricsBuilder) RecordSystemPagingOperationsDataPoint(ts pcommon.Times
mb.metricSystemPagingOperations.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String(), typeAttributeValue.String())
}
-// RecordSystemPagingOperationsPageInDataPoint adds a data point to system.paging.operations.page_in metric.
-func (mb *MetricsBuilder) RecordSystemPagingOperationsPageInDataPoint(ts pcommon.Timestamp, val int64, typeAttributeValue AttributeType) {
- mb.metricSystemPagingOperationsPageIn.recordDataPoint(mb.startTime, ts, val, typeAttributeValue.String())
-}
-
-// RecordSystemPagingOperationsPageOutDataPoint adds a data point to system.paging.operations.page_out metric.
-func (mb *MetricsBuilder) RecordSystemPagingOperationsPageOutDataPoint(ts pcommon.Timestamp, val int64, typeAttributeValue AttributeType) {
- mb.metricSystemPagingOperationsPageOut.recordDataPoint(mb.startTime, ts, val, typeAttributeValue.String())
-}
-
// RecordSystemPagingUsageDataPoint adds a data point to system.paging.usage metric.
func (mb *MetricsBuilder) RecordSystemPagingUsageDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, stateAttributeValue AttributeState) {
mb.metricSystemPagingUsage.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, stateAttributeValue.String())
diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/metadata.yaml b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/metadata.yaml
index b78d7b6624f7..e423c3e56dce 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/metadata.yaml
+++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/metadata.yaml
@@ -29,7 +29,6 @@ metrics:
monotonic: false
attributes: [device, state]
- # produced when receiver.hostmetricsreceiver.emitMetricsWithDirectionAttribute feature gate is enabled
system.paging.operations:
enabled: true
description: The number of paging operations.
@@ -40,28 +39,6 @@ metrics:
monotonic: true
attributes: [direction, type]
- # produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- system.paging.operations.page_in:
- enabled: true
- description: The number of page_in operations.
- unit: "{operations}"
- sum:
- value_type: int
- aggregation: cumulative
- monotonic: true
- attributes: [type]
-
- # produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- system.paging.operations.page_out:
- enabled: true
- description: The number of page_out operations.
- unit: "{operations}"
- sum:
- value_type: int
- aggregation: cumulative
- monotonic: true
- attributes: [type]
-
system.paging.faults:
enabled: true
description: The number of page faults.
diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go
index af2a92f52d6b..f2fc860e453b 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go
@@ -25,12 +25,10 @@ import (
"github.com/shirou/gopsutil/v3/host"
"github.com/shirou/gopsutil/v3/mem"
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/collector/featuregate"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver/scrapererror"
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata"
)
@@ -46,23 +44,19 @@ type scraper struct {
mb *metadata.MetricsBuilder
// for mocking
- bootTime func() (uint64, error)
- getPageFileStats func() ([]*pageFileStats, error)
- swapMemory func() (*mem.SwapMemoryStat, error)
- emitMetricsWithDirectionAttribute bool
- emitMetricsWithoutDirectionAttribute bool
+ bootTime func() (uint64, error)
+ getPageFileStats func() ([]*pageFileStats, error)
+ swapMemory func() (*mem.SwapMemoryStat, error)
}
// newPagingScraper creates a Paging Scraper
func newPagingScraper(_ context.Context, settings component.ReceiverCreateSettings, cfg *Config) *scraper {
return &scraper{
- settings: settings,
- config: cfg,
- bootTime: host.BootTime,
- getPageFileStats: getPageFileStats,
- swapMemory: mem.SwapMemory,
- emitMetricsWithDirectionAttribute: featuregate.GetRegistry().IsEnabled(internal.EmitMetricsWithDirectionAttributeFeatureGateID),
- emitMetricsWithoutDirectionAttribute: featuregate.GetRegistry().IsEnabled(internal.EmitMetricsWithoutDirectionAttributeFeatureGateID),
+ settings: settings,
+ config: cfg,
+ bootTime: host.BootTime,
+ getPageFileStats: getPageFileStats,
+ swapMemory: mem.SwapMemory,
}
}
@@ -137,19 +131,10 @@ func (s *scraper) scrapePagingMetrics() error {
}
func (s *scraper) recordPagingOperationsDataPoints(now pcommon.Timestamp, swap *mem.SwapMemoryStat) {
- if s.emitMetricsWithoutDirectionAttribute {
- s.mb.RecordSystemPagingOperationsPageInDataPoint(now, int64(swap.Sin), metadata.AttributeTypeMajor)
- s.mb.RecordSystemPagingOperationsPageOutDataPoint(now, int64(swap.Sout), metadata.AttributeTypeMajor)
- s.mb.RecordSystemPagingOperationsPageInDataPoint(now, int64(swap.PgIn), metadata.AttributeTypeMinor)
- s.mb.RecordSystemPagingOperationsPageOutDataPoint(now, int64(swap.PgOut), metadata.AttributeTypeMinor)
- }
-
- if s.emitMetricsWithDirectionAttribute {
- s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.Sin), metadata.AttributeDirectionPageIn, metadata.AttributeTypeMajor)
- s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.Sout), metadata.AttributeDirectionPageOut, metadata.AttributeTypeMajor)
- s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.PgIn), metadata.AttributeDirectionPageIn, metadata.AttributeTypeMinor)
- s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.PgOut), metadata.AttributeDirectionPageOut, metadata.AttributeTypeMinor)
- }
+ s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.Sin), metadata.AttributeDirectionPageIn, metadata.AttributeTypeMajor)
+ s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.Sout), metadata.AttributeDirectionPageOut, metadata.AttributeTypeMajor)
+ s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.PgIn), metadata.AttributeDirectionPageIn, metadata.AttributeTypeMinor)
+ s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.PgOut), metadata.AttributeDirectionPageOut, metadata.AttributeTypeMinor)
}
func (s *scraper) recordPageFaultsDataPoints(now pcommon.Timestamp, swap *mem.SwapMemoryStat) {
diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_test.go
index eca66c1d28a5..de91104aa2c2 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_test.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_test.go
@@ -32,13 +32,11 @@ import (
func TestScrape(t *testing.T) {
type testCase struct {
- name string
- config Config
- expectedStartTime pcommon.Timestamp
- initializationErr string
- expectMetricsWithDirectionAttribute bool
- expectMetricsWithoutDirectionAttribute bool
- mutateScraper func(*scraper)
+ name string
+ config Config
+ expectedStartTime pcommon.Timestamp
+ initializationErr string
+ mutateScraper func(*scraper)
}
config := metadata.DefaultMetricsSettings()
@@ -46,19 +44,12 @@ func TestScrape(t *testing.T) {
testCases := []testCase{
{
- name: "Standard",
- config: Config{Metrics: config},
- expectMetricsWithDirectionAttribute: true,
+ name: "Standard",
+ config: Config{Metrics: config},
},
{
- name: "Standard with direction removed",
- config: Config{Metrics: config},
- expectMetricsWithDirectionAttribute: false,
- expectMetricsWithoutDirectionAttribute: true,
- mutateScraper: func(s *scraper) {
- s.emitMetricsWithDirectionAttribute = false
- s.emitMetricsWithoutDirectionAttribute = true
- },
+ name: "Standard with direction removed",
+ config: Config{Metrics: config},
},
{
name: "Validate Start Time",
@@ -101,10 +92,6 @@ func TestScrape(t *testing.T) {
if runtime.GOOS == "windows" {
expectedMetrics = 3
}
- if test.expectMetricsWithoutDirectionAttribute {
- // in/out are separated into an additional metric
- expectedMetrics++
- }
assert.Equal(t, expectedMetrics, md.MetricCount())
@@ -114,15 +101,8 @@ func TestScrape(t *testing.T) {
startIndex++
}
- if test.expectMetricsWithoutDirectionAttribute {
- assertPagingOperationsMetricValid(t, []pmetric.Metric{metrics.At(startIndex),
- metrics.At(startIndex + 1)}, test.expectedStartTime, true)
- startIndex++
- }
- if test.expectMetricsWithDirectionAttribute {
- assertPagingOperationsMetricValid(t, []pmetric.Metric{metrics.At(startIndex)},
- test.expectedStartTime, false)
- }
+ assertPagingOperationsMetricValid(t, []pmetric.Metric{metrics.At(startIndex)},
+ test.expectedStartTime, false)
internal.AssertSameTimeStampForMetrics(t, metrics, 0, metrics.Len()-2)
startIndex++
@@ -219,29 +199,12 @@ func assertPagingOperationsMetricValid(t *testing.T, pagingMetric []pmetric.Metr
unit string
}
- var tests []test
-
- if removeAttribute {
- tests = []test{
- {
- name: "system.paging.operations.page_in",
- description: "The number of page_in operations.",
- unit: "{operations}",
- },
- {
- name: "system.paging.operations.page_out",
- description: "The number of page_out operations.",
- unit: "{operations}",
- },
- }
- } else {
- tests = []test{
- {
- name: "system.paging.operations",
- description: "The number of paging operations.",
- unit: "{operations}",
- },
- }
+ tests := []test{
+ {
+ name: "system.paging.operations",
+ description: "The number of paging operations.",
+ unit: "{operations}",
+ },
}
for idx, tt := range tests {
diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go
index 93731ed2f4ce..4e3ade65735a 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go
@@ -24,13 +24,11 @@ import (
"github.com/shirou/gopsutil/v3/host"
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/collector/featuregate"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver/scrapererror"
"go.uber.org/zap"
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/perfcounters"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata"
)
@@ -55,22 +53,18 @@ type scraper struct {
skipScrape bool
// for mocking
- bootTime func() (uint64, error)
- pageFileStats func() ([]*pageFileStats, error)
- emitMetricsWithDirectionAttribute bool
- emitMetricsWithoutDirectionAttribute bool
+ bootTime func() (uint64, error)
+ pageFileStats func() ([]*pageFileStats, error)
}
// newPagingScraper creates a Paging Scraper
func newPagingScraper(_ context.Context, settings component.ReceiverCreateSettings, cfg *Config) *scraper {
return &scraper{
- settings: settings,
- config: cfg,
- perfCounterScraper: &perfcounters.PerfLibScraper{},
- bootTime: host.BootTime,
- pageFileStats: getPageFileStats,
- emitMetricsWithDirectionAttribute: featuregate.GetRegistry().IsEnabled(internal.EmitMetricsWithDirectionAttributeFeatureGateID),
- emitMetricsWithoutDirectionAttribute: featuregate.GetRegistry().IsEnabled(internal.EmitMetricsWithoutDirectionAttributeFeatureGateID),
+ settings: settings,
+ config: cfg,
+ perfCounterScraper: &perfcounters.PerfLibScraper{},
+ bootTime: host.BootTime,
+ pageFileStats: getPageFileStats,
}
}
@@ -161,12 +155,6 @@ func (s *scraper) scrapePagingOperationsMetric() error {
}
func (s *scraper) recordPagingOperationsDataPoints(now pcommon.Timestamp, memoryCounterValues *perfcounters.CounterValues) {
- if s.emitMetricsWithoutDirectionAttribute {
- s.mb.RecordSystemPagingOperationsPageInDataPoint(now, memoryCounterValues.Values[pageReadsPerSec], metadata.AttributeTypeMajor)
- s.mb.RecordSystemPagingOperationsPageOutDataPoint(now, memoryCounterValues.Values[pageWritesPerSec], metadata.AttributeTypeMajor)
- }
- if s.emitMetricsWithDirectionAttribute {
- s.mb.RecordSystemPagingOperationsDataPoint(now, memoryCounterValues.Values[pageReadsPerSec], metadata.AttributeDirectionPageIn, metadata.AttributeTypeMajor)
- s.mb.RecordSystemPagingOperationsDataPoint(now, memoryCounterValues.Values[pageWritesPerSec], metadata.AttributeDirectionPageOut, metadata.AttributeTypeMajor)
- }
+ s.mb.RecordSystemPagingOperationsDataPoint(now, memoryCounterValues.Values[pageReadsPerSec], metadata.AttributeDirectionPageIn, metadata.AttributeTypeMajor)
+ s.mb.RecordSystemPagingOperationsDataPoint(now, memoryCounterValues.Values[pageWritesPerSec], metadata.AttributeDirectionPageOut, metadata.AttributeTypeMajor)
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/processscraper/documentation.md
index 72e229ec512a..1df9cd3ba12e 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/documentation.md
+++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/documentation.md
@@ -10,8 +10,6 @@ These are the metrics available for this scraper.
| ---- | ----------- | ---- | ---- | ---------- |
| **process.cpu.time** | Total CPU seconds broken down by different states. | s | Sum(Double) | |
| **process.disk.io** | Disk bytes transferred. | By | Sum(Int) | |
-| **process.disk.io.read** | Disk bytes read. | By | Sum(Int) | |
-| **process.disk.io.write** | Disk bytes written. | By | Sum(Int) | |
| **process.memory.physical_usage** | The amount of physical memory in use. | By | Sum(Int) | |
| **process.memory.virtual_usage** | Virtual memory size. | By | Sum(Int) | |
| process.threads | Process threads count. | {threads} | Sum(Int) | |
diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics.go
index b05202e76235..cf3e666698aa 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics.go
@@ -20,8 +20,6 @@ type MetricSettings struct {
type MetricsSettings struct {
ProcessCPUTime MetricSettings `mapstructure:"process.cpu.time"`
ProcessDiskIo MetricSettings `mapstructure:"process.disk.io"`
- ProcessDiskIoRead MetricSettings `mapstructure:"process.disk.io.read"`
- ProcessDiskIoWrite MetricSettings `mapstructure:"process.disk.io.write"`
ProcessMemoryPhysicalUsage MetricSettings `mapstructure:"process.memory.physical_usage"`
ProcessMemoryVirtualUsage MetricSettings `mapstructure:"process.memory.virtual_usage"`
ProcessThreads MetricSettings `mapstructure:"process.threads"`
@@ -35,12 +33,6 @@ func DefaultMetricsSettings() MetricsSettings {
ProcessDiskIo: MetricSettings{
Enabled: true,
},
- ProcessDiskIoRead: MetricSettings{
- Enabled: true,
- },
- ProcessDiskIoWrite: MetricSettings{
- Enabled: true,
- },
ProcessMemoryPhysicalUsage: MetricSettings{
Enabled: true,
},
@@ -215,108 +207,6 @@ func newMetricProcessDiskIo(settings MetricSettings) metricProcessDiskIo {
return m
}
-type metricProcessDiskIoRead struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills process.disk.io.read metric with initial data.
-func (m *metricProcessDiskIoRead) init() {
- m.data.SetName("process.disk.io.read")
- m.data.SetDescription("Disk bytes read.")
- m.data.SetUnit("By")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
-}
-
-func (m *metricProcessDiskIoRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricProcessDiskIoRead) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricProcessDiskIoRead) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricProcessDiskIoRead(settings MetricSettings) metricProcessDiskIoRead {
- m := metricProcessDiskIoRead{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
-type metricProcessDiskIoWrite struct {
- data pmetric.Metric // data buffer for generated metric.
- settings MetricSettings // metric settings provided by user.
- capacity int // max observed number of data points added to the metric.
-}
-
-// init fills process.disk.io.write metric with initial data.
-func (m *metricProcessDiskIoWrite) init() {
- m.data.SetName("process.disk.io.write")
- m.data.SetDescription("Disk bytes written.")
- m.data.SetUnit("By")
- m.data.SetEmptySum()
- m.data.Sum().SetIsMonotonic(true)
- m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
-}
-
-func (m *metricProcessDiskIoWrite) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
- if !m.settings.Enabled {
- return
- }
- dp := m.data.Sum().DataPoints().AppendEmpty()
- dp.SetStartTimestamp(start)
- dp.SetTimestamp(ts)
- dp.SetIntValue(val)
-}
-
-// updateCapacity saves max length of data point slices that will be used for the slice capacity.
-func (m *metricProcessDiskIoWrite) updateCapacity() {
- if m.data.Sum().DataPoints().Len() > m.capacity {
- m.capacity = m.data.Sum().DataPoints().Len()
- }
-}
-
-// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
-func (m *metricProcessDiskIoWrite) emit(metrics pmetric.MetricSlice) {
- if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
- m.updateCapacity()
- m.data.MoveTo(metrics.AppendEmpty())
- m.init()
- }
-}
-
-func newMetricProcessDiskIoWrite(settings MetricSettings) metricProcessDiskIoWrite {
- m := metricProcessDiskIoWrite{settings: settings}
- if settings.Enabled {
- m.data = pmetric.NewMetric()
- m.init()
- }
- return m
-}
-
type metricProcessMemoryPhysicalUsage struct {
data pmetric.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
@@ -480,8 +370,6 @@ type MetricsBuilder struct {
buildInfo component.BuildInfo // contains version information
metricProcessCPUTime metricProcessCPUTime
metricProcessDiskIo metricProcessDiskIo
- metricProcessDiskIoRead metricProcessDiskIoRead
- metricProcessDiskIoWrite metricProcessDiskIoWrite
metricProcessMemoryPhysicalUsage metricProcessMemoryPhysicalUsage
metricProcessMemoryVirtualUsage metricProcessMemoryVirtualUsage
metricProcessThreads metricProcessThreads
@@ -504,8 +392,6 @@ func NewMetricsBuilder(settings MetricsSettings, buildInfo component.BuildInfo,
buildInfo: buildInfo,
metricProcessCPUTime: newMetricProcessCPUTime(settings.ProcessCPUTime),
metricProcessDiskIo: newMetricProcessDiskIo(settings.ProcessDiskIo),
- metricProcessDiskIoRead: newMetricProcessDiskIoRead(settings.ProcessDiskIoRead),
- metricProcessDiskIoWrite: newMetricProcessDiskIoWrite(settings.ProcessDiskIoWrite),
metricProcessMemoryPhysicalUsage: newMetricProcessMemoryPhysicalUsage(settings.ProcessMemoryPhysicalUsage),
metricProcessMemoryVirtualUsage: newMetricProcessMemoryVirtualUsage(settings.ProcessMemoryVirtualUsage),
metricProcessThreads: newMetricProcessThreads(settings.ProcessThreads),
@@ -613,8 +499,6 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) {
ils.Metrics().EnsureCapacity(mb.metricsCapacity)
mb.metricProcessCPUTime.emit(ils.Metrics())
mb.metricProcessDiskIo.emit(ils.Metrics())
- mb.metricProcessDiskIoRead.emit(ils.Metrics())
- mb.metricProcessDiskIoWrite.emit(ils.Metrics())
mb.metricProcessMemoryPhysicalUsage.emit(ils.Metrics())
mb.metricProcessMemoryVirtualUsage.emit(ils.Metrics())
mb.metricProcessThreads.emit(ils.Metrics())
@@ -647,16 +531,6 @@ func (mb *MetricsBuilder) RecordProcessDiskIoDataPoint(ts pcommon.Timestamp, val
mb.metricProcessDiskIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue.String())
}
-// RecordProcessDiskIoReadDataPoint adds a data point to process.disk.io.read metric.
-func (mb *MetricsBuilder) RecordProcessDiskIoReadDataPoint(ts pcommon.Timestamp, val int64) {
- mb.metricProcessDiskIoRead.recordDataPoint(mb.startTime, ts, val)
-}
-
-// RecordProcessDiskIoWriteDataPoint adds a data point to process.disk.io.write metric.
-func (mb *MetricsBuilder) RecordProcessDiskIoWriteDataPoint(ts pcommon.Timestamp, val int64) {
- mb.metricProcessDiskIoWrite.recordDataPoint(mb.startTime, ts, val)
-}
-
// RecordProcessMemoryPhysicalUsageDataPoint adds a data point to process.memory.physical_usage metric.
func (mb *MetricsBuilder) RecordProcessMemoryPhysicalUsageDataPoint(ts pcommon.Timestamp, val int64) {
mb.metricProcessMemoryPhysicalUsage.recordDataPoint(mb.startTime, ts, val)
diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/metadata.yaml b/receiver/hostmetricsreceiver/internal/scraper/processscraper/metadata.yaml
index 711830fd5452..88549997c1d5 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/metadata.yaml
+++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/metadata.yaml
@@ -76,7 +76,6 @@ metrics:
aggregation: cumulative
monotonic: false
- # produced when receiver.hostmetricsreceiver.emitMetricsWithDirectionAttribute feature gate is enabled
process.disk.io:
enabled: true
description: Disk bytes transferred.
@@ -87,26 +86,6 @@ metrics:
monotonic: true
attributes: [direction]
- # produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- process.disk.io.read:
- enabled: true
- description: Disk bytes read.
- unit: By
- sum:
- value_type: int
- aggregation: cumulative
- monotonic: true
-
- # produced when receiver.hostmetricsreceiver.emitMetricsWithoutDirectionAttribute feature gate is enabled
- process.disk.io.write:
- enabled: true
- description: Disk bytes written.
- unit: By
- sum:
- value_type: int
- aggregation: cumulative
- monotonic: true
-
process.threads:
enabled: false
description: Process threads count.
diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go
index 4e5c6ae0a313..ade3e1fb7396 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go
@@ -21,13 +21,11 @@ import (
"time"
"go.opentelemetry.io/collector/component"
- "go.opentelemetry.io/collector/featuregate"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/receiver/scrapererror"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset"
- "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal"
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata"
)
@@ -49,22 +47,18 @@ type scraper struct {
excludeFS filterset.FilterSet
scrapeProcessDelay time.Duration
// for mocking
- getProcessCreateTime func(p processHandle) (int64, error)
- getProcessHandles func() (processHandles, error)
- emitMetricsWithDirectionAttribute bool
- emitMetricsWithoutDirectionAttribute bool
+ getProcessCreateTime func(p processHandle) (int64, error)
+ getProcessHandles func() (processHandles, error)
}
// newProcessScraper creates a Process Scraper
func newProcessScraper(settings component.ReceiverCreateSettings, cfg *Config) (*scraper, error) {
scraper := &scraper{
- settings: settings,
- config: cfg,
- getProcessCreateTime: processHandle.CreateTime,
- getProcessHandles: getProcessHandlesInternal,
- emitMetricsWithDirectionAttribute: featuregate.GetRegistry().IsEnabled(internal.EmitMetricsWithDirectionAttributeFeatureGateID),
- emitMetricsWithoutDirectionAttribute: featuregate.GetRegistry().IsEnabled(internal.EmitMetricsWithoutDirectionAttributeFeatureGateID),
- scrapeProcessDelay: cfg.ScrapeProcessDelay,
+ settings: settings,
+ config: cfg,
+ getProcessCreateTime: processHandle.CreateTime,
+ getProcessHandles: getProcessHandlesInternal,
+ scrapeProcessDelay: cfg.ScrapeProcessDelay,
}
var err error
@@ -229,14 +223,8 @@ func (s *scraper) scrapeAndAppendDiskIOMetric(now pcommon.Timestamp, handle proc
return err
}
- if s.emitMetricsWithoutDirectionAttribute {
- s.mb.RecordProcessDiskIoReadDataPoint(now, int64(io.ReadBytes))
- s.mb.RecordProcessDiskIoWriteDataPoint(now, int64(io.WriteBytes))
- }
- if s.emitMetricsWithDirectionAttribute {
- s.mb.RecordProcessDiskIoDataPoint(now, int64(io.ReadBytes), metadata.AttributeDirectionRead)
- s.mb.RecordProcessDiskIoDataPoint(now, int64(io.WriteBytes), metadata.AttributeDirectionWrite)
- }
+ s.mb.RecordProcessDiskIoDataPoint(now, int64(io.ReadBytes), metadata.AttributeDirectionRead)
+ s.mb.RecordProcessDiskIoDataPoint(now, int64(io.WriteBytes), metadata.AttributeDirectionWrite)
return nil
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go
index 74ca56dfe71b..cd6f11789ff6 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go
@@ -47,35 +47,13 @@ func skipTestOnUnsupportedOS(t *testing.T) {
func TestScrape(t *testing.T) {
skipTestOnUnsupportedOS(t)
type testCase struct {
- name string
- expectMetricsWithDirectionAttribute bool
- expectMetricsWithoutDirectionAttribute bool
- expectThreadsCount bool
- mutateScraper func(*scraper)
+ name string
+ expectThreadsCount bool
+ mutateScraper func(*scraper)
}
testCases := []testCase{
{
- name: "Standard",
- expectMetricsWithDirectionAttribute: true,
- expectMetricsWithoutDirectionAttribute: false,
- },
- {
- name: "Standard with direction removed",
- expectMetricsWithDirectionAttribute: false,
- expectMetricsWithoutDirectionAttribute: true,
- mutateScraper: func(s *scraper) {
- s.emitMetricsWithDirectionAttribute = false
- s.emitMetricsWithoutDirectionAttribute = true
- },
- },
- {
- name: "Emit both old and new metrics",
- expectMetricsWithDirectionAttribute: true,
- expectMetricsWithoutDirectionAttribute: true,
- mutateScraper: func(s *scraper) {
- s.emitMetricsWithDirectionAttribute = true
- s.emitMetricsWithoutDirectionAttribute = true
- },
+ name: "Standard",
},
{
name: "With threads count",
@@ -122,12 +100,7 @@ func TestScrape(t *testing.T) {
assertProcessResourceAttributesExist(t, md.ResourceMetrics())
assertCPUTimeMetricValid(t, md.ResourceMetrics(), expectedStartTime)
assertMemoryUsageMetricValid(t, md.ResourceMetrics(), expectedStartTime)
- if test.expectMetricsWithDirectionAttribute {
- assertOldDiskIOMetricValid(t, md.ResourceMetrics(), expectedStartTime)
- }
- if test.expectMetricsWithoutDirectionAttribute {
- assertNewDiskIOMetricValid(t, md.ResourceMetrics(), expectedStartTime)
- }
+ assertOldDiskIOMetricValid(t, md.ResourceMetrics(), expectedStartTime)
if test.expectThreadsCount {
assertThreadsCountValid(t, md.ResourceMetrics(), expectedStartTime)
} else {
@@ -179,16 +152,6 @@ func assertMemoryUsageMetricValid(t *testing.T, resourceMetrics pmetric.Resource
}
}
-func assertNewDiskIOMetricValid(t *testing.T, resourceMetrics pmetric.ResourceMetricsSlice,
- startTime pcommon.Timestamp) {
- for _, metricName := range []string{"process.disk.io.read", "process.disk.io.write"} {
- diskIOMetric := getMetric(t, metricName, resourceMetrics)
- if startTime != 0 {
- internal.AssertSumMetricStartTimeEquals(t, diskIOMetric, startTime)
- }
- }
-}
-
func assertThreadsCountValid(t *testing.T, resourceMetrics pmetric.ResourceMetricsSlice, startTime pcommon.Timestamp) {
for _, metricName := range []string{"process.threads"} {
threadsMetric := getMetric(t, metricName, resourceMetrics)