diff --git a/receiver/memcachedreceiver/config.go b/receiver/memcachedreceiver/config.go
index d73d1cf722b3..210df325bb84 100644
--- a/receiver/memcachedreceiver/config.go
+++ b/receiver/memcachedreceiver/config.go
@@ -19,6 +19,8 @@ import (
"go.opentelemetry.io/collector/config/confignet"
"go.opentelemetry.io/collector/receiver/scraperhelper"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/memcachedreceiver/internal/metadata"
)
type Config struct {
@@ -27,4 +29,7 @@ type Config struct {
// Timeout for the memcache stats request
Timeout time.Duration `mapstructure:"timeout"`
+
+ // Metrics allows customizing scraped metrics representation.
+ Metrics metadata.MetricsSettings `mapstructure:"metrics"`
}
diff --git a/receiver/memcachedreceiver/doc.go b/receiver/memcachedreceiver/doc.go
index 9173ba0e5bef..cb19b090fa2d 100644
--- a/receiver/memcachedreceiver/doc.go
+++ b/receiver/memcachedreceiver/doc.go
@@ -15,6 +15,6 @@
//go:build !windows
// +build !windows
-//go:generate mdatagen metadata.yaml
+//go:generate mdatagen --experimental-gen metadata.yaml
package memcachedreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/memcachedreceiver"
diff --git a/receiver/memcachedreceiver/documentation.md b/receiver/memcachedreceiver/documentation.md
index 578d45cd009a..d6a59f1c6c9b 100644
--- a/receiver/memcachedreceiver/documentation.md
+++ b/receiver/memcachedreceiver/documentation.md
@@ -20,7 +20,14 @@ These are the metrics available for this scraper.
| **memcached.operations** | Operation counts. | {operations} | Sum(Int) |
|
| **memcached.threads** | Number of threads used by the memcached instance. | {threads} | Sum(Int) | |
-**Highlighted metrics** are emitted by default.
+**Highlighted metrics** are emitted by default. Other metrics are optional and not emitted by default.
+Any metric can be enabled or disabled with the following scraper configuration:
+
+```yaml
+metrics:
+ :
+ enabled:
+```
## Metric attributes
diff --git a/receiver/memcachedreceiver/factory.go b/receiver/memcachedreceiver/factory.go
index a90e2f701240..4d647e1b5da9 100644
--- a/receiver/memcachedreceiver/factory.go
+++ b/receiver/memcachedreceiver/factory.go
@@ -14,8 +14,6 @@
package memcachedreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/memcachedreceiver"
-//go:generate mdatagen metadata.yaml
-
import (
"context"
"time"
@@ -25,6 +23,8 @@ import (
"go.opentelemetry.io/collector/config/confignet"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/receiver/scraperhelper"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/memcachedreceiver/internal/metadata"
)
const (
@@ -49,6 +49,7 @@ func createDefaultConfig() config.Receiver {
NetAddr: confignet.NetAddr{
Endpoint: "localhost:11211",
},
+ Metrics: metadata.DefaultMetricsSettings(),
}
}
diff --git a/receiver/memcachedreceiver/internal/metadata/generated_metrics.go b/receiver/memcachedreceiver/internal/metadata/generated_metrics.go
deleted file mode 100644
index 241f5266bcfd..000000000000
--- a/receiver/memcachedreceiver/internal/metadata/generated_metrics.go
+++ /dev/null
@@ -1,290 +0,0 @@
-// Code generated by mdatagen. DO NOT EDIT.
-
-package metadata
-
-import (
- "go.opentelemetry.io/collector/config"
- "go.opentelemetry.io/collector/pdata/pmetric"
-)
-
-// Type is the component type name.
-const Type config.Type = "memcachedreceiver"
-
-// MetricIntf is an interface to generically interact with generated metric.
-type MetricIntf interface {
- Name() string
- New() pmetric.Metric
- Init(metric pmetric.Metric)
-}
-
-// Intentionally not exposing this so that it is opaque and can change freely.
-type metricImpl struct {
- name string
- initFunc func(pmetric.Metric)
-}
-
-// Name returns the metric name.
-func (m *metricImpl) Name() string {
- return m.name
-}
-
-// New creates a metric object preinitialized.
-func (m *metricImpl) New() pmetric.Metric {
- metric := pmetric.NewMetric()
- m.Init(metric)
- return metric
-}
-
-// Init initializes the provided metric object.
-func (m *metricImpl) Init(metric pmetric.Metric) {
- m.initFunc(metric)
-}
-
-type metricStruct struct {
- MemcachedBytes MetricIntf
- MemcachedCommands MetricIntf
- MemcachedConnectionsCurrent MetricIntf
- MemcachedConnectionsTotal MetricIntf
- MemcachedCPUUsage MetricIntf
- MemcachedCurrentItems MetricIntf
- MemcachedEvictions MetricIntf
- MemcachedNetwork MetricIntf
- MemcachedOperationHitRatio MetricIntf
- MemcachedOperations MetricIntf
- MemcachedThreads MetricIntf
-}
-
-// Names returns a list of all the metric name strings.
-func (m *metricStruct) Names() []string {
- return []string{
- "memcached.bytes",
- "memcached.commands",
- "memcached.connections.current",
- "memcached.connections.total",
- "memcached.cpu.usage",
- "memcached.current_items",
- "memcached.evictions",
- "memcached.network",
- "memcached.operation_hit_ratio",
- "memcached.operations",
- "memcached.threads",
- }
-}
-
-var metricsByName = map[string]MetricIntf{
- "memcached.bytes": Metrics.MemcachedBytes,
- "memcached.commands": Metrics.MemcachedCommands,
- "memcached.connections.current": Metrics.MemcachedConnectionsCurrent,
- "memcached.connections.total": Metrics.MemcachedConnectionsTotal,
- "memcached.cpu.usage": Metrics.MemcachedCPUUsage,
- "memcached.current_items": Metrics.MemcachedCurrentItems,
- "memcached.evictions": Metrics.MemcachedEvictions,
- "memcached.network": Metrics.MemcachedNetwork,
- "memcached.operation_hit_ratio": Metrics.MemcachedOperationHitRatio,
- "memcached.operations": Metrics.MemcachedOperations,
- "memcached.threads": Metrics.MemcachedThreads,
-}
-
-func (m *metricStruct) ByName(n string) MetricIntf {
- return metricsByName[n]
-}
-
-// Metrics contains a set of methods for each metric that help with
-// manipulating those metrics.
-var Metrics = &metricStruct{
- &metricImpl{
- "memcached.bytes",
- func(metric pmetric.Metric) {
- metric.SetName("memcached.bytes")
- metric.SetDescription("Current number of bytes used by this server to store items.")
- metric.SetUnit("By")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "memcached.commands",
- func(metric pmetric.Metric) {
- metric.SetName("memcached.commands")
- metric.SetDescription("Commands executed.")
- metric.SetUnit("{commands}")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "memcached.connections.current",
- func(metric pmetric.Metric) {
- metric.SetName("memcached.connections.current")
- metric.SetDescription("The current number of open connections.")
- metric.SetUnit("{connections}")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(false)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "memcached.connections.total",
- func(metric pmetric.Metric) {
- metric.SetName("memcached.connections.total")
- metric.SetDescription("Total number of connections opened since the server started running.")
- metric.SetUnit("{connections}")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "memcached.cpu.usage",
- func(metric pmetric.Metric) {
- metric.SetName("memcached.cpu.usage")
- metric.SetDescription("Accumulated user and system time.")
- metric.SetUnit("s")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "memcached.current_items",
- func(metric pmetric.Metric) {
- metric.SetName("memcached.current_items")
- metric.SetDescription("Number of items currently stored in the cache.")
- metric.SetUnit("{items}")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(false)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "memcached.evictions",
- func(metric pmetric.Metric) {
- metric.SetName("memcached.evictions")
- metric.SetDescription("Cache item evictions.")
- metric.SetUnit("{evictions}")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "memcached.network",
- func(metric pmetric.Metric) {
- metric.SetName("memcached.network")
- metric.SetDescription("Bytes transferred over the network.")
- metric.SetUnit("by")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "memcached.operation_hit_ratio",
- func(metric pmetric.Metric) {
- metric.SetName("memcached.operation_hit_ratio")
- metric.SetDescription("Hit ratio for operations, expressed as a percentage value between 0.0 and 100.0.")
- metric.SetUnit("%")
- metric.SetDataType(pmetric.MetricDataTypeGauge)
- },
- },
- &metricImpl{
- "memcached.operations",
- func(metric pmetric.Metric) {
- metric.SetName("memcached.operations")
- metric.SetDescription("Operation counts.")
- metric.SetUnit("{operations}")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(true)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
- &metricImpl{
- "memcached.threads",
- func(metric pmetric.Metric) {
- metric.SetName("memcached.threads")
- metric.SetDescription("Number of threads used by the memcached instance.")
- metric.SetUnit("{threads}")
- metric.SetDataType(pmetric.MetricDataTypeSum)
- metric.Sum().SetIsMonotonic(false)
- metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
- },
- },
-}
-
-// M contains a set of methods for each metric that help with
-// manipulating those metrics. M is an alias for Metrics
-var M = Metrics
-
-// Attributes contains the possible metric attributes that can be used.
-var Attributes = struct {
- // Command (The type of command.)
- Command string
- // Direction (Direction of data flow.)
- Direction string
- // Operation (The type of operation.)
- Operation string
- // State (The type of CPU usage.)
- State string
- // Type (Result of cache request.)
- Type string
-}{
- "command",
- "direction",
- "operation",
- "state",
- "type",
-}
-
-// A is an alias for Attributes.
-var A = Attributes
-
-// AttributeCommand are the possible values that the attribute "command" can have.
-var AttributeCommand = struct {
- Get string
- Set string
- Flush string
- Touch string
-}{
- "get",
- "set",
- "flush",
- "touch",
-}
-
-// AttributeDirection are the possible values that the attribute "direction" can have.
-var AttributeDirection = struct {
- Sent string
- Received string
-}{
- "sent",
- "received",
-}
-
-// AttributeOperation are the possible values that the attribute "operation" can have.
-var AttributeOperation = struct {
- Increment string
- Decrement string
- Get string
-}{
- "increment",
- "decrement",
- "get",
-}
-
-// AttributeState are the possible values that the attribute "state" can have.
-var AttributeState = struct {
- System string
- User string
-}{
- "system",
- "user",
-}
-
-// AttributeType are the possible values that the attribute "type" can have.
-var AttributeType = struct {
- Hit string
- Miss string
-}{
- "hit",
- "miss",
-}
diff --git a/receiver/memcachedreceiver/internal/metadata/generated_metrics_v2.go b/receiver/memcachedreceiver/internal/metadata/generated_metrics_v2.go
new file mode 100644
index 000000000000..529d8842fde2
--- /dev/null
+++ b/receiver/memcachedreceiver/internal/metadata/generated_metrics_v2.go
@@ -0,0 +1,879 @@
+// Code generated by mdatagen. DO NOT EDIT.
+
+package metadata
+
+import (
+ "time"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/pmetric"
+)
+
+// MetricSettings provides common settings for a particular metric.
+type MetricSettings struct {
+ Enabled bool `mapstructure:"enabled"`
+}
+
+// MetricsSettings provides settings for memcachedreceiver metrics.
+type MetricsSettings struct {
+ MemcachedBytes MetricSettings `mapstructure:"memcached.bytes"`
+ MemcachedCommands MetricSettings `mapstructure:"memcached.commands"`
+ MemcachedConnectionsCurrent MetricSettings `mapstructure:"memcached.connections.current"`
+ MemcachedConnectionsTotal MetricSettings `mapstructure:"memcached.connections.total"`
+ MemcachedCPUUsage MetricSettings `mapstructure:"memcached.cpu.usage"`
+ MemcachedCurrentItems MetricSettings `mapstructure:"memcached.current_items"`
+ MemcachedEvictions MetricSettings `mapstructure:"memcached.evictions"`
+ MemcachedNetwork MetricSettings `mapstructure:"memcached.network"`
+ MemcachedOperationHitRatio MetricSettings `mapstructure:"memcached.operation_hit_ratio"`
+ MemcachedOperations MetricSettings `mapstructure:"memcached.operations"`
+ MemcachedThreads MetricSettings `mapstructure:"memcached.threads"`
+}
+
+func DefaultMetricsSettings() MetricsSettings {
+ return MetricsSettings{
+ MemcachedBytes: MetricSettings{
+ Enabled: true,
+ },
+ MemcachedCommands: MetricSettings{
+ Enabled: true,
+ },
+ MemcachedConnectionsCurrent: MetricSettings{
+ Enabled: true,
+ },
+ MemcachedConnectionsTotal: MetricSettings{
+ Enabled: true,
+ },
+ MemcachedCPUUsage: MetricSettings{
+ Enabled: true,
+ },
+ MemcachedCurrentItems: MetricSettings{
+ Enabled: true,
+ },
+ MemcachedEvictions: MetricSettings{
+ Enabled: true,
+ },
+ MemcachedNetwork: MetricSettings{
+ Enabled: true,
+ },
+ MemcachedOperationHitRatio: MetricSettings{
+ Enabled: true,
+ },
+ MemcachedOperations: MetricSettings{
+ Enabled: true,
+ },
+ MemcachedThreads: MetricSettings{
+ Enabled: true,
+ },
+ }
+}
+
+type metricMemcachedBytes struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills memcached.bytes metric with initial data.
+func (m *metricMemcachedBytes) init() {
+ m.data.SetName("memcached.bytes")
+ m.data.SetDescription("Current number of bytes used by this server to store items.")
+ m.data.SetUnit("By")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+}
+
+func (m *metricMemcachedBytes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricMemcachedBytes) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricMemcachedBytes) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricMemcachedBytes(settings MetricSettings) metricMemcachedBytes {
+ m := metricMemcachedBytes{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricMemcachedCommands struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills memcached.commands metric with initial data.
+func (m *metricMemcachedCommands) init() {
+ m.data.SetName("memcached.commands")
+ m.data.SetDescription("Commands executed.")
+ m.data.SetUnit("{commands}")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricMemcachedCommands) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, commandAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert(A.Command, pcommon.NewValueString(commandAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricMemcachedCommands) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricMemcachedCommands) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricMemcachedCommands(settings MetricSettings) metricMemcachedCommands {
+ m := metricMemcachedCommands{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricMemcachedConnectionsCurrent struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills memcached.connections.current metric with initial data.
+func (m *metricMemcachedConnectionsCurrent) init() {
+ m.data.SetName("memcached.connections.current")
+ m.data.SetDescription("The current number of open connections.")
+ m.data.SetUnit("{connections}")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(false)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricMemcachedConnectionsCurrent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricMemcachedConnectionsCurrent) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricMemcachedConnectionsCurrent) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricMemcachedConnectionsCurrent(settings MetricSettings) metricMemcachedConnectionsCurrent {
+ m := metricMemcachedConnectionsCurrent{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricMemcachedConnectionsTotal struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills memcached.connections.total metric with initial data.
+func (m *metricMemcachedConnectionsTotal) init() {
+ m.data.SetName("memcached.connections.total")
+ m.data.SetDescription("Total number of connections opened since the server started running.")
+ m.data.SetUnit("{connections}")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricMemcachedConnectionsTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricMemcachedConnectionsTotal) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricMemcachedConnectionsTotal) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricMemcachedConnectionsTotal(settings MetricSettings) metricMemcachedConnectionsTotal {
+ m := metricMemcachedConnectionsTotal{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricMemcachedCPUUsage struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills memcached.cpu.usage metric with initial data.
+func (m *metricMemcachedCPUUsage) init() {
+ m.data.SetName("memcached.cpu.usage")
+ m.data.SetDescription("Accumulated user and system time.")
+ m.data.SetUnit("s")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricMemcachedCPUUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, stateAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleVal(val)
+ dp.Attributes().Insert(A.State, pcommon.NewValueString(stateAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricMemcachedCPUUsage) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricMemcachedCPUUsage) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricMemcachedCPUUsage(settings MetricSettings) metricMemcachedCPUUsage {
+ m := metricMemcachedCPUUsage{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricMemcachedCurrentItems struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills memcached.current_items metric with initial data.
+func (m *metricMemcachedCurrentItems) init() {
+ m.data.SetName("memcached.current_items")
+ m.data.SetDescription("Number of items currently stored in the cache.")
+ m.data.SetUnit("{items}")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(false)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricMemcachedCurrentItems) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricMemcachedCurrentItems) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricMemcachedCurrentItems) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricMemcachedCurrentItems(settings MetricSettings) metricMemcachedCurrentItems {
+ m := metricMemcachedCurrentItems{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricMemcachedEvictions struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills memcached.evictions metric with initial data.
+func (m *metricMemcachedEvictions) init() {
+ m.data.SetName("memcached.evictions")
+ m.data.SetDescription("Cache item evictions.")
+ m.data.SetUnit("{evictions}")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricMemcachedEvictions) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricMemcachedEvictions) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricMemcachedEvictions) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricMemcachedEvictions(settings MetricSettings) metricMemcachedEvictions {
+ m := metricMemcachedEvictions{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricMemcachedNetwork struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills memcached.network metric with initial data.
+func (m *metricMemcachedNetwork) init() {
+ m.data.SetName("memcached.network")
+ m.data.SetDescription("Bytes transferred over the network.")
+ m.data.SetUnit("by")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricMemcachedNetwork) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, directionAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricMemcachedNetwork) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricMemcachedNetwork) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricMemcachedNetwork(settings MetricSettings) metricMemcachedNetwork {
+ m := metricMemcachedNetwork{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricMemcachedOperationHitRatio struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills memcached.operation_hit_ratio metric with initial data.
+func (m *metricMemcachedOperationHitRatio) init() {
+ m.data.SetName("memcached.operation_hit_ratio")
+ m.data.SetDescription("Hit ratio for operations, expressed as a percentage value between 0.0 and 100.0.")
+ m.data.SetUnit("%")
+ m.data.SetDataType(pmetric.MetricDataTypeGauge)
+ m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricMemcachedOperationHitRatio) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, operationAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Gauge().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetDoubleVal(val)
+ dp.Attributes().Insert(A.Operation, pcommon.NewValueString(operationAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricMemcachedOperationHitRatio) updateCapacity() {
+ if m.data.Gauge().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Gauge().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricMemcachedOperationHitRatio) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricMemcachedOperationHitRatio(settings MetricSettings) metricMemcachedOperationHitRatio {
+ m := metricMemcachedOperationHitRatio{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricMemcachedOperations struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills memcached.operations metric with initial data.
+func (m *metricMemcachedOperations) init() {
+ m.data.SetName("memcached.operations")
+ m.data.SetDescription("Operation counts.")
+ m.data.SetUnit("{operations}")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(true)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+ m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
+}
+
+func (m *metricMemcachedOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, typeAttributeValue string, operationAttributeValue string) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+ dp.Attributes().Insert(A.Type, pcommon.NewValueString(typeAttributeValue))
+ dp.Attributes().Insert(A.Operation, pcommon.NewValueString(operationAttributeValue))
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricMemcachedOperations) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricMemcachedOperations) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricMemcachedOperations(settings MetricSettings) metricMemcachedOperations {
+ m := metricMemcachedOperations{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+type metricMemcachedThreads struct {
+ data pmetric.Metric // data buffer for generated metric.
+ settings MetricSettings // metric settings provided by user.
+ capacity int // max observed number of data points added to the metric.
+}
+
+// init fills memcached.threads metric with initial data.
+func (m *metricMemcachedThreads) init() {
+ m.data.SetName("memcached.threads")
+ m.data.SetDescription("Number of threads used by the memcached instance.")
+ m.data.SetUnit("{threads}")
+ m.data.SetDataType(pmetric.MetricDataTypeSum)
+ m.data.Sum().SetIsMonotonic(false)
+ m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative)
+}
+
+func (m *metricMemcachedThreads) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) {
+ if !m.settings.Enabled {
+ return
+ }
+ dp := m.data.Sum().DataPoints().AppendEmpty()
+ dp.SetStartTimestamp(start)
+ dp.SetTimestamp(ts)
+ dp.SetIntVal(val)
+}
+
+// updateCapacity saves max length of data point slices that will be used for the slice capacity.
+func (m *metricMemcachedThreads) updateCapacity() {
+ if m.data.Sum().DataPoints().Len() > m.capacity {
+ m.capacity = m.data.Sum().DataPoints().Len()
+ }
+}
+
+// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
+func (m *metricMemcachedThreads) emit(metrics pmetric.MetricSlice) {
+ if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
+ m.updateCapacity()
+ m.data.MoveTo(metrics.AppendEmpty())
+ m.init()
+ }
+}
+
+func newMetricMemcachedThreads(settings MetricSettings) metricMemcachedThreads {
+ m := metricMemcachedThreads{settings: settings}
+ if settings.Enabled {
+ m.data = pmetric.NewMetric()
+ m.init()
+ }
+ return m
+}
+
+// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
+// required to produce metric representation defined in metadata and user settings.
+type MetricsBuilder struct {
+ startTime pcommon.Timestamp // start time that will be applied to all recorded data points.
+ metricsCapacity int // maximum observed number of metrics per resource.
+ resourceCapacity int // maximum observed number of resource attributes.
+ metricsBuffer pmetric.Metrics // accumulates metrics data before emitting.
+ metricMemcachedBytes metricMemcachedBytes
+ metricMemcachedCommands metricMemcachedCommands
+ metricMemcachedConnectionsCurrent metricMemcachedConnectionsCurrent
+ metricMemcachedConnectionsTotal metricMemcachedConnectionsTotal
+ metricMemcachedCPUUsage metricMemcachedCPUUsage
+ metricMemcachedCurrentItems metricMemcachedCurrentItems
+ metricMemcachedEvictions metricMemcachedEvictions
+ metricMemcachedNetwork metricMemcachedNetwork
+ metricMemcachedOperationHitRatio metricMemcachedOperationHitRatio
+ metricMemcachedOperations metricMemcachedOperations
+ metricMemcachedThreads metricMemcachedThreads
+}
+
+// metricBuilderOption applies changes to default metrics builder.
+type metricBuilderOption func(*MetricsBuilder)
+
+// WithStartTime sets startTime on the metrics builder.
+func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption {
+ return func(mb *MetricsBuilder) {
+ mb.startTime = startTime
+ }
+}
+
+func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder {
+ mb := &MetricsBuilder{
+ startTime: pcommon.NewTimestampFromTime(time.Now()),
+ metricsBuffer: pmetric.NewMetrics(),
+ metricMemcachedBytes: newMetricMemcachedBytes(settings.MemcachedBytes),
+ metricMemcachedCommands: newMetricMemcachedCommands(settings.MemcachedCommands),
+ metricMemcachedConnectionsCurrent: newMetricMemcachedConnectionsCurrent(settings.MemcachedConnectionsCurrent),
+ metricMemcachedConnectionsTotal: newMetricMemcachedConnectionsTotal(settings.MemcachedConnectionsTotal),
+ metricMemcachedCPUUsage: newMetricMemcachedCPUUsage(settings.MemcachedCPUUsage),
+ metricMemcachedCurrentItems: newMetricMemcachedCurrentItems(settings.MemcachedCurrentItems),
+ metricMemcachedEvictions: newMetricMemcachedEvictions(settings.MemcachedEvictions),
+ metricMemcachedNetwork: newMetricMemcachedNetwork(settings.MemcachedNetwork),
+ metricMemcachedOperationHitRatio: newMetricMemcachedOperationHitRatio(settings.MemcachedOperationHitRatio),
+ metricMemcachedOperations: newMetricMemcachedOperations(settings.MemcachedOperations),
+ metricMemcachedThreads: newMetricMemcachedThreads(settings.MemcachedThreads),
+ }
+ for _, op := range options {
+ op(mb)
+ }
+ return mb
+}
+
+// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity.
+func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) {
+ if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() {
+ mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len()
+ }
+ if mb.resourceCapacity < rm.Resource().Attributes().Len() {
+ mb.resourceCapacity = rm.Resource().Attributes().Len()
+ }
+}
+
+// ResourceOption applies changes to provided resource.
+type ResourceOption func(pcommon.Resource)
+
+// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for
+// recording another set of data points as part of another resource. This function can be helpful when one scraper
+// needs to emit metrics from several resources. Otherwise calling this function is not required,
+// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments.
+func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) {
+ rm := pmetric.NewResourceMetrics()
+ rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity)
+ for _, op := range ro {
+ op(rm.Resource())
+ }
+ ils := rm.ScopeMetrics().AppendEmpty()
+ ils.Scope().SetName("otelcol/memcachedreceiver")
+ ils.Metrics().EnsureCapacity(mb.metricsCapacity)
+ mb.metricMemcachedBytes.emit(ils.Metrics())
+ mb.metricMemcachedCommands.emit(ils.Metrics())
+ mb.metricMemcachedConnectionsCurrent.emit(ils.Metrics())
+ mb.metricMemcachedConnectionsTotal.emit(ils.Metrics())
+ mb.metricMemcachedCPUUsage.emit(ils.Metrics())
+ mb.metricMemcachedCurrentItems.emit(ils.Metrics())
+ mb.metricMemcachedEvictions.emit(ils.Metrics())
+ mb.metricMemcachedNetwork.emit(ils.Metrics())
+ mb.metricMemcachedOperationHitRatio.emit(ils.Metrics())
+ mb.metricMemcachedOperations.emit(ils.Metrics())
+ mb.metricMemcachedThreads.emit(ils.Metrics())
+ if ils.Metrics().Len() > 0 {
+ mb.updateCapacity(rm)
+ rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty())
+ }
+}
+
+// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for
+// recording another set of metrics. This function will be responsible for applying all the transformations required to
+// produce metric representation defined in metadata and user settings, e.g. delta or cumulative.
+func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics {
+ mb.EmitForResource(ro...)
+ metrics := pmetric.NewMetrics()
+ mb.metricsBuffer.MoveTo(metrics)
+ return metrics
+}
+
+// RecordMemcachedBytesDataPoint adds a data point to memcached.bytes metric.
+func (mb *MetricsBuilder) RecordMemcachedBytesDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricMemcachedBytes.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordMemcachedCommandsDataPoint adds a data point to memcached.commands metric.
+func (mb *MetricsBuilder) RecordMemcachedCommandsDataPoint(ts pcommon.Timestamp, val int64, commandAttributeValue string) {
+ mb.metricMemcachedCommands.recordDataPoint(mb.startTime, ts, val, commandAttributeValue)
+}
+
+// RecordMemcachedConnectionsCurrentDataPoint adds a data point to memcached.connections.current metric.
+func (mb *MetricsBuilder) RecordMemcachedConnectionsCurrentDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricMemcachedConnectionsCurrent.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordMemcachedConnectionsTotalDataPoint adds a data point to memcached.connections.total metric.
+func (mb *MetricsBuilder) RecordMemcachedConnectionsTotalDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricMemcachedConnectionsTotal.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordMemcachedCPUUsageDataPoint adds a data point to memcached.cpu.usage metric.
+func (mb *MetricsBuilder) RecordMemcachedCPUUsageDataPoint(ts pcommon.Timestamp, val float64, stateAttributeValue string) {
+ mb.metricMemcachedCPUUsage.recordDataPoint(mb.startTime, ts, val, stateAttributeValue)
+}
+
+// RecordMemcachedCurrentItemsDataPoint adds a data point to memcached.current_items metric.
+func (mb *MetricsBuilder) RecordMemcachedCurrentItemsDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricMemcachedCurrentItems.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordMemcachedEvictionsDataPoint adds a data point to memcached.evictions metric.
+func (mb *MetricsBuilder) RecordMemcachedEvictionsDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricMemcachedEvictions.recordDataPoint(mb.startTime, ts, val)
+}
+
+// RecordMemcachedNetworkDataPoint adds a data point to memcached.network metric.
+func (mb *MetricsBuilder) RecordMemcachedNetworkDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue string) {
+ mb.metricMemcachedNetwork.recordDataPoint(mb.startTime, ts, val, directionAttributeValue)
+}
+
+// RecordMemcachedOperationHitRatioDataPoint adds a data point to memcached.operation_hit_ratio metric.
+func (mb *MetricsBuilder) RecordMemcachedOperationHitRatioDataPoint(ts pcommon.Timestamp, val float64, operationAttributeValue string) {
+ mb.metricMemcachedOperationHitRatio.recordDataPoint(mb.startTime, ts, val, operationAttributeValue)
+}
+
+// RecordMemcachedOperationsDataPoint adds a data point to memcached.operations metric.
+func (mb *MetricsBuilder) RecordMemcachedOperationsDataPoint(ts pcommon.Timestamp, val int64, typeAttributeValue string, operationAttributeValue string) {
+ mb.metricMemcachedOperations.recordDataPoint(mb.startTime, ts, val, typeAttributeValue, operationAttributeValue)
+}
+
+// RecordMemcachedThreadsDataPoint adds a data point to memcached.threads metric.
+func (mb *MetricsBuilder) RecordMemcachedThreadsDataPoint(ts pcommon.Timestamp, val int64) {
+ mb.metricMemcachedThreads.recordDataPoint(mb.startTime, ts, val)
+}
+
+// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
+// and metrics builder should update its startTime and reset it's internal state accordingly.
+func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {
+ mb.startTime = pcommon.NewTimestampFromTime(time.Now())
+ for _, op := range options {
+ op(mb)
+ }
+}
+
+// Attributes contains the possible metric attributes that can be used.
+var Attributes = struct {
+ // Command (The type of command.)
+ Command string
+ // Direction (Direction of data flow.)
+ Direction string
+ // Operation (The type of operation.)
+ Operation string
+ // State (The type of CPU usage.)
+ State string
+ // Type (Result of cache request.)
+ Type string
+}{
+ "command",
+ "direction",
+ "operation",
+ "state",
+ "type",
+}
+
+// A is an alias for Attributes.
+var A = Attributes
+
+// AttributeCommand are the possible values that the attribute "command" can have.
+var AttributeCommand = struct {
+ Get string
+ Set string
+ Flush string
+ Touch string
+}{
+ "get",
+ "set",
+ "flush",
+ "touch",
+}
+
+// AttributeDirection are the possible values that the attribute "direction" can have.
+var AttributeDirection = struct {
+ Sent string
+ Received string
+}{
+ "sent",
+ "received",
+}
+
+// AttributeOperation are the possible values that the attribute "operation" can have.
+var AttributeOperation = struct {
+ Increment string
+ Decrement string
+ Get string
+}{
+ "increment",
+ "decrement",
+ "get",
+}
+
+// AttributeState are the possible values that the attribute "state" can have.
+var AttributeState = struct {
+ System string
+ User string
+}{
+ "system",
+ "user",
+}
+
+// AttributeType are the possible values that the attribute "type" can have.
+var AttributeType = struct {
+ Hit string
+ Miss string
+}{
+ "hit",
+ "miss",
+}
diff --git a/receiver/memcachedreceiver/scraper.go b/receiver/memcachedreceiver/scraper.go
index 3673219bb670..d3a1f9a2bba8 100644
--- a/receiver/memcachedreceiver/scraper.go
+++ b/receiver/memcachedreceiver/scraper.go
@@ -29,6 +29,7 @@ import (
type memcachedScraper struct {
logger *zap.Logger
config *Config
+ mb *metadata.MetricsBuilder
newClient newMemcachedClientFunc
}
@@ -40,6 +41,7 @@ func newMemcachedScraper(
logger: logger,
config: config,
newClient: newMemcachedClient,
+ mb: metadata.NewMetricsBuilder(config.Metrics),
}
}
@@ -48,7 +50,7 @@ func (r *memcachedScraper) scrape(_ context.Context) (pmetric.Metrics, error) {
// constructor.
statsClient, err := r.newClient(r.config.Endpoint, r.config.Timeout)
if err != nil {
- r.logger.Error("Failed to estalbish client", zap.Error(err))
+ r.logger.Error("Failed to establish client", zap.Error(err))
return pmetric.Metrics{}, err
}
@@ -59,128 +61,92 @@ func (r *memcachedScraper) scrape(_ context.Context) (pmetric.Metrics, error) {
}
now := pcommon.NewTimestampFromTime(time.Now())
- md := pmetric.NewMetrics()
- ilm := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty()
- ilm.Scope().SetName("otelcol/memcached")
-
- commandCount := initMetric(ilm.Metrics(), metadata.M.MemcachedCommands).Sum().DataPoints()
- rUsage := initMetric(ilm.Metrics(), metadata.M.MemcachedCPUUsage).Sum().DataPoints()
- network := initMetric(ilm.Metrics(), metadata.M.MemcachedNetwork).Sum().DataPoints()
- operationCount := initMetric(ilm.Metrics(), metadata.M.MemcachedOperations).Sum().DataPoints()
- hitRatio := initMetric(ilm.Metrics(), metadata.M.MemcachedOperationHitRatio).Gauge().DataPoints()
- bytes := initMetric(ilm.Metrics(), metadata.M.MemcachedBytes).Gauge().DataPoints()
- currConn := initMetric(ilm.Metrics(), metadata.M.MemcachedConnectionsCurrent).Sum().DataPoints()
- totalConn := initMetric(ilm.Metrics(), metadata.M.MemcachedConnectionsTotal).Sum().DataPoints()
- currItems := initMetric(ilm.Metrics(), metadata.M.MemcachedCurrentItems).Sum().DataPoints()
- threads := initMetric(ilm.Metrics(), metadata.M.MemcachedThreads).Sum().DataPoints()
- evictions := initMetric(ilm.Metrics(), metadata.M.MemcachedEvictions).Sum().DataPoints()
for _, stats := range allServerStats {
for k, v := range stats.Stats {
- attributes := pcommon.NewMap()
switch k {
case "bytes":
if parsedV, ok := r.parseInt(k, v); ok {
- r.addToIntMetric(bytes, attributes, parsedV, now)
+ r.mb.RecordMemcachedBytesDataPoint(now, parsedV)
}
case "curr_connections":
if parsedV, ok := r.parseInt(k, v); ok {
- r.addToIntMetric(currConn, attributes, parsedV, now)
+ r.mb.RecordMemcachedConnectionsCurrentDataPoint(now, parsedV)
}
case "total_connections":
if parsedV, ok := r.parseInt(k, v); ok {
- r.addToIntMetric(totalConn, attributes, parsedV, now)
+ r.mb.RecordMemcachedConnectionsTotalDataPoint(now, parsedV)
}
case "cmd_get":
- attributes.Insert(metadata.A.Command, pcommon.NewValueString("get"))
if parsedV, ok := r.parseInt(k, v); ok {
- r.addToIntMetric(commandCount, attributes, parsedV, now)
+ r.mb.RecordMemcachedCommandsDataPoint(now, parsedV, "get")
}
case "cmd_set":
- attributes.Insert(metadata.A.Command, pcommon.NewValueString("set"))
if parsedV, ok := r.parseInt(k, v); ok {
- r.addToIntMetric(commandCount, attributes, parsedV, now)
+ r.mb.RecordMemcachedCommandsDataPoint(now, parsedV, "set")
}
case "cmd_flush":
- attributes.Insert(metadata.A.Command, pcommon.NewValueString("flush"))
if parsedV, ok := r.parseInt(k, v); ok {
- r.addToIntMetric(commandCount, attributes, parsedV, now)
+ r.mb.RecordMemcachedCommandsDataPoint(now, parsedV, "flush")
}
case "cmd_touch":
- attributes.Insert(metadata.A.Command, pcommon.NewValueString("touch"))
if parsedV, ok := r.parseInt(k, v); ok {
- r.addToIntMetric(commandCount, attributes, parsedV, now)
+ r.mb.RecordMemcachedCommandsDataPoint(now, parsedV, "touch")
}
case "curr_items":
if parsedV, ok := r.parseInt(k, v); ok {
- r.addToIntMetric(currItems, attributes, parsedV, now)
+ r.mb.RecordMemcachedCurrentItemsDataPoint(now, parsedV)
}
case "threads":
if parsedV, ok := r.parseInt(k, v); ok {
- r.addToIntMetric(threads, attributes, parsedV, now)
+ r.mb.RecordMemcachedThreadsDataPoint(now, parsedV)
}
case "evictions":
if parsedV, ok := r.parseInt(k, v); ok {
- r.addToIntMetric(evictions, attributes, parsedV, now)
+ r.mb.RecordMemcachedEvictionsDataPoint(now, parsedV)
}
case "bytes_read":
- attributes.Insert(metadata.A.Direction, pcommon.NewValueString("received"))
if parsedV, ok := r.parseInt(k, v); ok {
- r.addToIntMetric(network, attributes, parsedV, now)
+ r.mb.RecordMemcachedNetworkDataPoint(now, parsedV, "received")
}
case "bytes_written":
- attributes.Insert(metadata.A.Direction, pcommon.NewValueString("sent"))
if parsedV, ok := r.parseInt(k, v); ok {
- r.addToIntMetric(network, attributes, parsedV, now)
+ r.mb.RecordMemcachedNetworkDataPoint(now, parsedV, "sent")
}
case "get_hits":
- attributes.Insert(metadata.A.Operation, pcommon.NewValueString("get"))
- attributes.Insert(metadata.A.Type, pcommon.NewValueString("hit"))
if parsedV, ok := r.parseInt(k, v); ok {
- r.addToIntMetric(operationCount, attributes, parsedV, now)
+ r.mb.RecordMemcachedOperationsDataPoint(now, parsedV, "hit", "get")
}
case "get_misses":
- attributes.Insert(metadata.A.Operation, pcommon.NewValueString("get"))
- attributes.Insert(metadata.A.Type, pcommon.NewValueString("miss"))
if parsedV, ok := r.parseInt(k, v); ok {
- r.addToIntMetric(operationCount, attributes, parsedV, now)
+ r.mb.RecordMemcachedOperationsDataPoint(now, parsedV, "miss", "get")
}
case "incr_hits":
- attributes.Insert(metadata.A.Operation, pcommon.NewValueString("increment"))
- attributes.Insert(metadata.A.Type, pcommon.NewValueString("hit"))
if parsedV, ok := r.parseInt(k, v); ok {
- r.addToIntMetric(operationCount, attributes, parsedV, now)
+ r.mb.RecordMemcachedOperationsDataPoint(now, parsedV, "hit", "increment")
}
case "incr_misses":
- attributes.Insert(metadata.A.Operation, pcommon.NewValueString("increment"))
- attributes.Insert(metadata.A.Type, pcommon.NewValueString("miss"))
if parsedV, ok := r.parseInt(k, v); ok {
- r.addToIntMetric(operationCount, attributes, parsedV, now)
+ r.mb.RecordMemcachedOperationsDataPoint(now, parsedV, "miss", "increment")
}
case "decr_hits":
- attributes.Insert(metadata.A.Operation, pcommon.NewValueString("decrement"))
- attributes.Insert(metadata.A.Type, pcommon.NewValueString("hit"))
if parsedV, ok := r.parseInt(k, v); ok {
- r.addToIntMetric(operationCount, attributes, parsedV, now)
+ r.mb.RecordMemcachedOperationsDataPoint(now, parsedV, "hit", "decrement")
}
case "decr_misses":
- attributes.Insert(metadata.A.Operation, pcommon.NewValueString("decrement"))
- attributes.Insert(metadata.A.Type, pcommon.NewValueString("miss"))
if parsedV, ok := r.parseInt(k, v); ok {
- r.addToIntMetric(operationCount, attributes, parsedV, now)
+ r.mb.RecordMemcachedOperationsDataPoint(now, parsedV, "miss", "decrement")
}
case "rusage_system":
- attributes.Insert(metadata.A.State, pcommon.NewValueString("system"))
if parsedV, ok := r.parseFloat(k, v); ok {
- r.addToDoubleMetric(rUsage, attributes, parsedV, now)
+ r.mb.RecordMemcachedCPUUsageDataPoint(now, parsedV, "system")
}
case "rusage_user":
- attributes.Insert(metadata.A.State, pcommon.NewValueString("user"))
if parsedV, ok := r.parseFloat(k, v); ok {
- r.addToDoubleMetric(rUsage, attributes, parsedV, now)
+ r.mb.RecordMemcachedCPUUsageDataPoint(now, parsedV, "user")
}
}
}
@@ -191,7 +157,7 @@ func (r *memcachedScraper) scrape(_ context.Context) (pmetric.Metrics, error) {
parsedHit, okHit := r.parseInt("incr_hits", stats.Stats["incr_hits"])
parsedMiss, okMiss := r.parseInt("incr_misses", stats.Stats["incr_misses"])
if okHit && okMiss {
- r.addToDoubleMetric(hitRatio, attributes, calculateHitRatio(parsedHit, parsedMiss), now)
+ r.mb.RecordMemcachedOperationHitRatioDataPoint(now, calculateHitRatio(parsedHit, parsedMiss), "increment")
}
attributes = pcommon.NewMap()
@@ -199,7 +165,7 @@ func (r *memcachedScraper) scrape(_ context.Context) (pmetric.Metrics, error) {
parsedHit, okHit = r.parseInt("decr_hits", stats.Stats["decr_hits"])
parsedMiss, okMiss = r.parseInt("decr_misses", stats.Stats["decr_misses"])
if okHit && okMiss {
- r.addToDoubleMetric(hitRatio, attributes, calculateHitRatio(parsedHit, parsedMiss), now)
+ r.mb.RecordMemcachedOperationHitRatioDataPoint(now, calculateHitRatio(parsedHit, parsedMiss), "decrement")
}
attributes = pcommon.NewMap()
@@ -207,16 +173,11 @@ func (r *memcachedScraper) scrape(_ context.Context) (pmetric.Metrics, error) {
parsedHit, okHit = r.parseInt("get_hits", stats.Stats["get_hits"])
parsedMiss, okMiss = r.parseInt("get_misses", stats.Stats["get_misses"])
if okHit && okMiss {
- r.addToDoubleMetric(hitRatio, attributes, calculateHitRatio(parsedHit, parsedMiss), now)
+ r.mb.RecordMemcachedOperationHitRatioDataPoint(now, calculateHitRatio(parsedHit, parsedMiss), "get")
}
}
- return md, nil
-}
-func initMetric(ms pmetric.MetricSlice, mi metadata.MetricIntf) pmetric.Metric {
- m := ms.AppendEmpty()
- mi.Init(m)
- return m
+ return r.mb.Emit(), nil
}
func calculateHitRatio(misses, hits int64) float64 {
@@ -225,7 +186,7 @@ func calculateHitRatio(misses, hits int64) float64 {
}
hitsFloat := float64(hits)
missesFloat := float64(misses)
- return (hitsFloat / (hitsFloat + missesFloat) * 100)
+ return hitsFloat / (hitsFloat + missesFloat) * 100
}
// parseInt converts string to int64.
@@ -256,21 +217,3 @@ func (r *memcachedScraper) logInvalid(expectedType, key, value string) {
zap.String("value", value),
)
}
-
-func (r *memcachedScraper) addToDoubleMetric(metric pmetric.NumberDataPointSlice, attributes pcommon.Map, value float64, now pcommon.Timestamp) {
- dataPoint := metric.AppendEmpty()
- dataPoint.SetTimestamp(now)
- dataPoint.SetDoubleVal(value)
- if attributes.Len() > 0 {
- attributes.CopyTo(dataPoint.Attributes())
- }
-}
-
-func (r *memcachedScraper) addToIntMetric(metric pmetric.NumberDataPointSlice, attributes pcommon.Map, value int64, now pcommon.Timestamp) {
- dataPoint := metric.AppendEmpty()
- dataPoint.SetTimestamp(now)
- dataPoint.SetIntVal(value)
- if attributes.Len() > 0 {
- attributes.CopyTo(dataPoint.Attributes())
- }
-}
diff --git a/receiver/memcachedreceiver/testdata/expected_metrics/test_scraper/expected.json b/receiver/memcachedreceiver/testdata/expected_metrics/test_scraper/expected.json
index e48693725cfc..1fb0a3c5df41 100644
--- a/receiver/memcachedreceiver/testdata/expected_metrics/test_scraper/expected.json
+++ b/receiver/memcachedreceiver/testdata/expected_metrics/test_scraper/expected.json
@@ -4,7 +4,7 @@
"scopeMetrics": [
{
"scope": {
- "name": "otelcol/memcached"
+ "name": "otelcol/memcachedreceiver"
},
"metrics": [
{