diff --git a/receiver/postgresqlreceiver/client.go b/receiver/postgresqlreceiver/client.go index 606dd9cb32de..dffafd17307b 100644 --- a/receiver/postgresqlreceiver/client.go +++ b/receiver/postgresqlreceiver/client.go @@ -57,6 +57,10 @@ type client interface { getIndexStats(ctx context.Context, database string) (map[indexIdentifer]indexStat, error) getActiveConnections(ctx context.Context) (int64, error) listDatabases(ctx context.Context) ([]string, error) + getRowStats(ctx context.Context) ([]RowStats, error) + getQueryStats(ctx context.Context) ([]queryStats, error) + getBufferHit(ctx context.Context) ([]BufferHit, error) + getVersionString(ctx context.Context) (string, error) } type postgreSQLClient struct { @@ -587,6 +591,175 @@ func (c *postgreSQLClient) getReplicationStats(ctx context.Context) ([]replicati return rs, errors } +type RowStats struct { + relationName string + rowsReturned int64 + rowsFetched int64 + rowsInserted int64 + rowsUpdated int64 + rowsDeleted int64 + rowsHotUpdated int64 + liveRows int64 + deadRows int64 +} + +func (c *postgreSQLClient) getRowStats(ctx context.Context) ([]RowStats, error) { + query := `SELECT + relname, + pg_stat_get_tuples_returned(relid) AS rows_returned, + pg_stat_get_tuples_fetched(relid) AS rows_fetched, + pg_stat_get_tuples_inserted(relid) AS rows_inserted, + pg_stat_get_tuples_updated(relid) AS rows_updated, + pg_stat_get_tuples_deleted(relid) AS rows_deleted, + pg_stat_get_tuples_hot_updated(relid) AS rows_hot_updated, + pg_stat_get_live_tuples(relid) AS live_rows, + pg_stat_get_dead_tuples(relid) AS dead_rows + FROM + pg_stat_all_tables; + ` + + rows, err := c.client.QueryContext(ctx, query) + if err != nil { + return nil, fmt.Errorf("unable to query pg_stat_all_tables:: %w", err) + } + + defer rows.Close() + + var rs []RowStats + var errors error + + for rows.Next() { + var ( + relname sql.NullString + rowsReturned sql.NullInt64 + rowsFetched sql.NullInt64 + rowsInserted sql.NullInt64 + rowsUpdated sql.NullInt64 + rowsDeleted sql.NullInt64 + rowsHotUpdated sql.NullInt64 + liveRows sql.NullInt64 + deadRows sql.NullInt64 + ) + + err := rows.Scan( + &relname, + &rowsReturned, + &rowsFetched, + &rowsInserted, + &rowsUpdated, + &rowsDeleted, + &rowsHotUpdated, + &liveRows, + &deadRows, + ) + + if err != nil { + errors = multierr.Append(errors, err) + } + + rs = append(rs, RowStats{ + relname.String, + rowsReturned.Int64, + rowsFetched.Int64, + rowsInserted.Int64, + rowsUpdated.Int64, + rowsDeleted.Int64, + rowsHotUpdated.Int64, + liveRows.Int64, + deadRows.Int64, + }) + } + return rs, nil +} + +type queryStats struct { + queryId string + queryText string + queryCount int64 + queryExecTime int64 +} + +func (c *postgreSQLClient) getQueryStats(ctx context.Context) ([]queryStats, error) { + query := `SELECT + queryid, + query, + calls, + total_exec_time + FROM pg_stat_statements; + ` + + rows, err := c.client.QueryContext(ctx, query) + if err != nil { + return nil, fmt.Errorf("unable to query pg_stat_statements: %w", err) + } + defer rows.Close() + var qs []queryStats + var errors error + for rows.Next() { + var queryId, queryText string + var queryCount int64 + var queryExecTime float64 + err = rows.Scan(&queryId, &queryText, &queryCount, &queryExecTime) + if err != nil { + errors = multierr.Append(errors, err) + } + queryExectimeNS := int64(queryExecTime * 1000000) + qs = append(qs, queryStats{ + queryId: queryId, + queryText: queryText, + queryCount: queryCount, + queryExecTime: queryExectimeNS, + }) + } + return qs, errors +} + +type BufferHit struct { + dbName string + hits int64 +} + +func (c *postgreSQLClient) getBufferHit(ctx context.Context) ([]BufferHit, error) { + query := `SELECT datname, blks_hit FROM pg_stat_database;` + + rows, err := c.client.QueryContext(ctx, query) + if err != nil { + return nil, fmt.Errorf("unable to query pg_stat_database:: %w", err) + } + + defer rows.Close() + + var bh []BufferHit + var errors error + + for rows.Next() { + var dbname sql.NullString + var hits sql.NullInt64 + + err = rows.Scan(&dbname, &hits) + + if err != nil { + errors = multierr.Append(errors, err) + continue + } + bh = append(bh, BufferHit{ + dbName: dbname.String, + hits: hits.Int64, + }) + } + return bh, errors +} + +func (c *postgreSQLClient) getVersionString(ctx context.Context) (string, error) { + var version string + err := c.client.QueryRowContext(ctx, "SHOW server_version").Scan(&version) + if err != nil { + return "", fmt.Errorf("failed to get PostgreSQL version: %w", err) + } + + return version, nil +} + func (c *postgreSQLClient) getLatestWalAgeSeconds(ctx context.Context) (int64, error) { query := `SELECT coalesce(last_archived_time, CURRENT_TIMESTAMP) AS last_archived_wal, diff --git a/receiver/postgresqlreceiver/documentation.md b/receiver/postgresqlreceiver/documentation.md index f203a7b52f91..6a585577d72f 100644 --- a/receiver/postgresqlreceiver/documentation.md +++ b/receiver/postgresqlreceiver/documentation.md @@ -92,6 +92,20 @@ The number of blocks read. | ---- | ----------- | ------ | | source | The block read source type. | Str: ``heap_read``, ``heap_hit``, ``idx_read``, ``idx_hit``, ``toast_read``, ``toast_hit``, ``tidx_read``, ``tidx_hit`` | +### postgresql.buffer_hit + +The number of disk block hits in the buffer cache, thereby avoiding database reads, tagged with database name. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {hit}/s | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| dbname | name of the database | Any Str | + ### postgresql.commits The number of commits. @@ -114,7 +128,7 @@ Configured maximum number of client connections allowed | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| {connections} | Gauge | Int | +| {connection} | Gauge | Int | ### postgresql.database.count @@ -148,6 +162,20 @@ The size of the index on disk. | ---- | ----------- | ---------- | | By | Gauge | Int | +### postgresql.live_rows + +The approximate number of live rows, tagged with relation name. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {row} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| relation_name | name of the relation | Any Str | + ### postgresql.operations The number of db row operations. @@ -162,6 +190,36 @@ The number of db row operations. | ---- | ----------- | ------ | | operation | The database operation. | Str: ``ins``, ``upd``, ``del``, ``hot_upd`` | +### postgresql.query.count + +Number of times the statement was executed. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| 1 | Sum | Int | Cumulative | false | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| query_text | Text of a representative statement | Any Str | +| query_id | Hash code to identify identical normalized queries. | Any Str | + +### postgresql.query.total_exec_time + +Total wait time of the normalised timed events in nanaoseconds. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| ns | Sum | Int | Cumulative | false | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| query_text | Text of a representative statement | Any Str | +| query_id | Hash code to identify identical normalized queries. | Any Str | + ### postgresql.replication.data_delay The amount of data delayed in replication. @@ -198,6 +256,62 @@ The number of rows in the database. | ---- | ----------- | ------ | | state | The tuple (row) state. | Str: ``dead``, ``live`` | +### postgresql.rows_deleted + +Rows deleted by queries in this db, tagged with relation name. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {row}/s | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| relation_name | name of the relation | Any Str | + +### postgresql.rows_fetched + +Rows fetched by queries in this db, tagged with relation name. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {row}/s | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| relation_name | name of the relation | Any Str | + +### postgresql.rows_inserted + +Rows inserted by queries in the db, tagged with relation name. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {row}/s | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| relation_name | name of the relation | Any Str | + +### postgresql.rows_updated + +Rows updated by queries in the db, tagged with relation name. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {row}/s | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| relation_name | name of the relation | Any Str | + ### postgresql.table.count Number of user tables in a database. @@ -324,6 +438,7 @@ This metric requires WAL to be enabled with at least one replica. | Name | Description | Values | Enabled | | ---- | ----------- | ------ | ------- | | postgresql.database.name | The name of the database. | Any Str | true | +| postgresql.db.version | The version of postgresql databse | Any Str | true | | postgresql.index.name | The name of the index on a table. | Any Str | true | | postgresql.schema.name | The schema name. | Any Str | true | | postgresql.table.name | The table name. | Any Str | true | diff --git a/receiver/postgresqlreceiver/generated_package_test.go b/receiver/postgresqlreceiver/generated_package_test.go index 40a54575086a..392019650d88 100644 --- a/receiver/postgresqlreceiver/generated_package_test.go +++ b/receiver/postgresqlreceiver/generated_package_test.go @@ -3,9 +3,8 @@ package postgresqlreceiver import ( - "testing" - "go.uber.org/goleak" + "testing" ) func TestMain(m *testing.M) { diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_config.go b/receiver/postgresqlreceiver/internal/metadata/generated_config.go index afaafb83fe93..3644415156c3 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_config.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_config.go @@ -35,6 +35,7 @@ type MetricsConfig struct { PostgresqlBgwriterDuration MetricConfig `mapstructure:"postgresql.bgwriter.duration"` PostgresqlBgwriterMaxwritten MetricConfig `mapstructure:"postgresql.bgwriter.maxwritten"` PostgresqlBlocksRead MetricConfig `mapstructure:"postgresql.blocks_read"` + PostgresqlBufferHit MetricConfig `mapstructure:"postgresql.buffer_hit"` PostgresqlCommits MetricConfig `mapstructure:"postgresql.commits"` PostgresqlConnectionCount MetricConfig `mapstructure:"postgresql.connection.count"` PostgresqlConnectionMax MetricConfig `mapstructure:"postgresql.connection.max"` @@ -44,10 +45,17 @@ type MetricsConfig struct { PostgresqlDeadlocks MetricConfig `mapstructure:"postgresql.deadlocks"` PostgresqlIndexScans MetricConfig `mapstructure:"postgresql.index.scans"` PostgresqlIndexSize MetricConfig `mapstructure:"postgresql.index.size"` + PostgresqlLiveRows MetricConfig `mapstructure:"postgresql.live_rows"` PostgresqlOperations MetricConfig `mapstructure:"postgresql.operations"` + PostgresqlQueryCount MetricConfig `mapstructure:"postgresql.query.count"` + PostgresqlQueryTotalExecTime MetricConfig `mapstructure:"postgresql.query.total_exec_time"` PostgresqlReplicationDataDelay MetricConfig `mapstructure:"postgresql.replication.data_delay"` PostgresqlRollbacks MetricConfig `mapstructure:"postgresql.rollbacks"` PostgresqlRows MetricConfig `mapstructure:"postgresql.rows"` + PostgresqlRowsDeleted MetricConfig `mapstructure:"postgresql.rows_deleted"` + PostgresqlRowsFetched MetricConfig `mapstructure:"postgresql.rows_fetched"` + PostgresqlRowsInserted MetricConfig `mapstructure:"postgresql.rows_inserted"` + PostgresqlRowsUpdated MetricConfig `mapstructure:"postgresql.rows_updated"` PostgresqlSequentialScans MetricConfig `mapstructure:"postgresql.sequential_scans"` PostgresqlTableCount MetricConfig `mapstructure:"postgresql.table.count"` PostgresqlTableSize MetricConfig `mapstructure:"postgresql.table.size"` @@ -81,6 +89,9 @@ func DefaultMetricsConfig() MetricsConfig { PostgresqlBlocksRead: MetricConfig{ Enabled: true, }, + PostgresqlBufferHit: MetricConfig{ + Enabled: true, + }, PostgresqlCommits: MetricConfig{ Enabled: true, }, @@ -108,9 +119,18 @@ func DefaultMetricsConfig() MetricsConfig { PostgresqlIndexSize: MetricConfig{ Enabled: true, }, + PostgresqlLiveRows: MetricConfig{ + Enabled: true, + }, PostgresqlOperations: MetricConfig{ Enabled: true, }, + PostgresqlQueryCount: MetricConfig{ + Enabled: true, + }, + PostgresqlQueryTotalExecTime: MetricConfig{ + Enabled: true, + }, PostgresqlReplicationDataDelay: MetricConfig{ Enabled: true, }, @@ -120,6 +140,18 @@ func DefaultMetricsConfig() MetricsConfig { PostgresqlRows: MetricConfig{ Enabled: true, }, + PostgresqlRowsDeleted: MetricConfig{ + Enabled: true, + }, + PostgresqlRowsFetched: MetricConfig{ + Enabled: true, + }, + PostgresqlRowsInserted: MetricConfig{ + Enabled: true, + }, + PostgresqlRowsUpdated: MetricConfig{ + Enabled: true, + }, PostgresqlSequentialScans: MetricConfig{ Enabled: false, }, @@ -176,6 +208,7 @@ func (rac *ResourceAttributeConfig) Unmarshal(parser *confmap.Conf) error { // ResourceAttributesConfig provides config for postgresql resource attributes. type ResourceAttributesConfig struct { PostgresqlDatabaseName ResourceAttributeConfig `mapstructure:"postgresql.database.name"` + PostgresqlDbVersion ResourceAttributeConfig `mapstructure:"postgresql.db.version"` PostgresqlIndexName ResourceAttributeConfig `mapstructure:"postgresql.index.name"` PostgresqlSchemaName ResourceAttributeConfig `mapstructure:"postgresql.schema.name"` PostgresqlTableName ResourceAttributeConfig `mapstructure:"postgresql.table.name"` @@ -186,6 +219,9 @@ func DefaultResourceAttributesConfig() ResourceAttributesConfig { PostgresqlDatabaseName: ResourceAttributeConfig{ Enabled: true, }, + PostgresqlDbVersion: ResourceAttributeConfig{ + Enabled: true, + }, PostgresqlIndexName: ResourceAttributeConfig{ Enabled: true, }, diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go b/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go index 9fc56002195c..f13faf0b636e 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go @@ -32,6 +32,7 @@ func TestMetricsBuilderConfig(t *testing.T) { PostgresqlBgwriterDuration: MetricConfig{Enabled: true}, PostgresqlBgwriterMaxwritten: MetricConfig{Enabled: true}, PostgresqlBlocksRead: MetricConfig{Enabled: true}, + PostgresqlBufferHit: MetricConfig{Enabled: true}, PostgresqlCommits: MetricConfig{Enabled: true}, PostgresqlConnectionCount: MetricConfig{Enabled: true}, PostgresqlConnectionMax: MetricConfig{Enabled: true}, @@ -41,10 +42,17 @@ func TestMetricsBuilderConfig(t *testing.T) { PostgresqlDeadlocks: MetricConfig{Enabled: true}, PostgresqlIndexScans: MetricConfig{Enabled: true}, PostgresqlIndexSize: MetricConfig{Enabled: true}, + PostgresqlLiveRows: MetricConfig{Enabled: true}, PostgresqlOperations: MetricConfig{Enabled: true}, + PostgresqlQueryCount: MetricConfig{Enabled: true}, + PostgresqlQueryTotalExecTime: MetricConfig{Enabled: true}, PostgresqlReplicationDataDelay: MetricConfig{Enabled: true}, PostgresqlRollbacks: MetricConfig{Enabled: true}, PostgresqlRows: MetricConfig{Enabled: true}, + PostgresqlRowsDeleted: MetricConfig{Enabled: true}, + PostgresqlRowsFetched: MetricConfig{Enabled: true}, + PostgresqlRowsInserted: MetricConfig{Enabled: true}, + PostgresqlRowsUpdated: MetricConfig{Enabled: true}, PostgresqlSequentialScans: MetricConfig{Enabled: true}, PostgresqlTableCount: MetricConfig{Enabled: true}, PostgresqlTableSize: MetricConfig{Enabled: true}, @@ -56,6 +64,7 @@ func TestMetricsBuilderConfig(t *testing.T) { }, ResourceAttributes: ResourceAttributesConfig{ PostgresqlDatabaseName: ResourceAttributeConfig{Enabled: true}, + PostgresqlDbVersion: ResourceAttributeConfig{Enabled: true}, PostgresqlIndexName: ResourceAttributeConfig{Enabled: true}, PostgresqlSchemaName: ResourceAttributeConfig{Enabled: true}, PostgresqlTableName: ResourceAttributeConfig{Enabled: true}, @@ -73,6 +82,7 @@ func TestMetricsBuilderConfig(t *testing.T) { PostgresqlBgwriterDuration: MetricConfig{Enabled: false}, PostgresqlBgwriterMaxwritten: MetricConfig{Enabled: false}, PostgresqlBlocksRead: MetricConfig{Enabled: false}, + PostgresqlBufferHit: MetricConfig{Enabled: false}, PostgresqlCommits: MetricConfig{Enabled: false}, PostgresqlConnectionCount: MetricConfig{Enabled: false}, PostgresqlConnectionMax: MetricConfig{Enabled: false}, @@ -82,10 +92,17 @@ func TestMetricsBuilderConfig(t *testing.T) { PostgresqlDeadlocks: MetricConfig{Enabled: false}, PostgresqlIndexScans: MetricConfig{Enabled: false}, PostgresqlIndexSize: MetricConfig{Enabled: false}, + PostgresqlLiveRows: MetricConfig{Enabled: false}, PostgresqlOperations: MetricConfig{Enabled: false}, + PostgresqlQueryCount: MetricConfig{Enabled: false}, + PostgresqlQueryTotalExecTime: MetricConfig{Enabled: false}, PostgresqlReplicationDataDelay: MetricConfig{Enabled: false}, PostgresqlRollbacks: MetricConfig{Enabled: false}, PostgresqlRows: MetricConfig{Enabled: false}, + PostgresqlRowsDeleted: MetricConfig{Enabled: false}, + PostgresqlRowsFetched: MetricConfig{Enabled: false}, + PostgresqlRowsInserted: MetricConfig{Enabled: false}, + PostgresqlRowsUpdated: MetricConfig{Enabled: false}, PostgresqlSequentialScans: MetricConfig{Enabled: false}, PostgresqlTableCount: MetricConfig{Enabled: false}, PostgresqlTableSize: MetricConfig{Enabled: false}, @@ -97,6 +114,7 @@ func TestMetricsBuilderConfig(t *testing.T) { }, ResourceAttributes: ResourceAttributesConfig{ PostgresqlDatabaseName: ResourceAttributeConfig{Enabled: false}, + PostgresqlDbVersion: ResourceAttributeConfig{Enabled: false}, PostgresqlIndexName: ResourceAttributeConfig{Enabled: false}, PostgresqlSchemaName: ResourceAttributeConfig{Enabled: false}, PostgresqlTableName: ResourceAttributeConfig{Enabled: false}, @@ -137,6 +155,7 @@ func TestResourceAttributesConfig(t *testing.T) { name: "all_set", want: ResourceAttributesConfig{ PostgresqlDatabaseName: ResourceAttributeConfig{Enabled: true}, + PostgresqlDbVersion: ResourceAttributeConfig{Enabled: true}, PostgresqlIndexName: ResourceAttributeConfig{Enabled: true}, PostgresqlSchemaName: ResourceAttributeConfig{Enabled: true}, PostgresqlTableName: ResourceAttributeConfig{Enabled: true}, @@ -146,6 +165,7 @@ func TestResourceAttributesConfig(t *testing.T) { name: "none_set", want: ResourceAttributesConfig{ PostgresqlDatabaseName: ResourceAttributeConfig{Enabled: false}, + PostgresqlDbVersion: ResourceAttributeConfig{Enabled: false}, PostgresqlIndexName: ResourceAttributeConfig{Enabled: false}, PostgresqlSchemaName: ResourceAttributeConfig{Enabled: false}, PostgresqlTableName: ResourceAttributeConfig{Enabled: false}, diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go b/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go index c596b4691ac6..f0eca7866fa3 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go @@ -603,6 +603,57 @@ func newMetricPostgresqlBlocksRead(cfg MetricConfig) metricPostgresqlBlocksRead return m } +type metricPostgresqlBufferHit struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.buffer_hit metric with initial data. +func (m *metricPostgresqlBufferHit) init() { + m.data.SetName("postgresql.buffer_hit") + m.data.SetDescription("The number of disk block hits in the buffer cache, thereby avoiding database reads, tagged with database name.") + m.data.SetUnit("{hit}/s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricPostgresqlBufferHit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, dbnameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("dbname", dbnameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlBufferHit) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlBufferHit) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlBufferHit(cfg MetricConfig) metricPostgresqlBufferHit { + m := metricPostgresqlBufferHit{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricPostgresqlCommits struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -713,7 +764,7 @@ type metricPostgresqlConnectionMax struct { func (m *metricPostgresqlConnectionMax) init() { m.data.SetName("postgresql.connection.max") m.data.SetDescription("Configured maximum number of client connections allowed") - m.data.SetUnit("{connections}") + m.data.SetUnit("{connection}") m.data.SetEmptyGauge() } @@ -1058,6 +1109,57 @@ func newMetricPostgresqlIndexSize(cfg MetricConfig) metricPostgresqlIndexSize { return m } +type metricPostgresqlLiveRows struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.live_rows metric with initial data. +func (m *metricPostgresqlLiveRows) init() { + m.data.SetName("postgresql.live_rows") + m.data.SetDescription("The approximate number of live rows, tagged with relation name.") + m.data.SetUnit("{row}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricPostgresqlLiveRows) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("relation_name", relationNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlLiveRows) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlLiveRows) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlLiveRows(cfg MetricConfig) metricPostgresqlLiveRows { + m := metricPostgresqlLiveRows{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricPostgresqlOperations struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1111,6 +1213,114 @@ func newMetricPostgresqlOperations(cfg MetricConfig) metricPostgresqlOperations return m } +type metricPostgresqlQueryCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.query.count metric with initial data. +func (m *metricPostgresqlQueryCount) init() { + m.data.SetName("postgresql.query.count") + m.data.SetDescription("Number of times the statement was executed.") + m.data.SetUnit("1") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricPostgresqlQueryCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryTextAttributeValue string, queryIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("query_text", queryTextAttributeValue) + dp.Attributes().PutStr("query_id", queryIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlQueryCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlQueryCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlQueryCount(cfg MetricConfig) metricPostgresqlQueryCount { + m := metricPostgresqlQueryCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricPostgresqlQueryTotalExecTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.query.total_exec_time metric with initial data. +func (m *metricPostgresqlQueryTotalExecTime) init() { + m.data.SetName("postgresql.query.total_exec_time") + m.data.SetDescription("Total wait time of the normalised timed events in nanaoseconds.") + m.data.SetUnit("ns") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricPostgresqlQueryTotalExecTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryTextAttributeValue string, queryIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("query_text", queryTextAttributeValue) + dp.Attributes().PutStr("query_id", queryIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlQueryTotalExecTime) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlQueryTotalExecTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlQueryTotalExecTime(cfg MetricConfig) metricPostgresqlQueryTotalExecTime { + m := metricPostgresqlQueryTotalExecTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricPostgresqlReplicationDataDelay struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1266,6 +1476,210 @@ func newMetricPostgresqlRows(cfg MetricConfig) metricPostgresqlRows { return m } +type metricPostgresqlRowsDeleted struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.rows_deleted metric with initial data. +func (m *metricPostgresqlRowsDeleted) init() { + m.data.SetName("postgresql.rows_deleted") + m.data.SetDescription("Rows deleted by queries in this db, tagged with relation name.") + m.data.SetUnit("{row}/s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricPostgresqlRowsDeleted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("relation_name", relationNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlRowsDeleted) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlRowsDeleted) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlRowsDeleted(cfg MetricConfig) metricPostgresqlRowsDeleted { + m := metricPostgresqlRowsDeleted{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricPostgresqlRowsFetched struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.rows_fetched metric with initial data. +func (m *metricPostgresqlRowsFetched) init() { + m.data.SetName("postgresql.rows_fetched") + m.data.SetDescription("Rows fetched by queries in this db, tagged with relation name.") + m.data.SetUnit("{row}/s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricPostgresqlRowsFetched) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("relation_name", relationNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlRowsFetched) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlRowsFetched) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlRowsFetched(cfg MetricConfig) metricPostgresqlRowsFetched { + m := metricPostgresqlRowsFetched{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricPostgresqlRowsInserted struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.rows_inserted metric with initial data. +func (m *metricPostgresqlRowsInserted) init() { + m.data.SetName("postgresql.rows_inserted") + m.data.SetDescription("Rows inserted by queries in the db, tagged with relation name.") + m.data.SetUnit("{row}/s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricPostgresqlRowsInserted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("relation_name", relationNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlRowsInserted) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlRowsInserted) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlRowsInserted(cfg MetricConfig) metricPostgresqlRowsInserted { + m := metricPostgresqlRowsInserted{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricPostgresqlRowsUpdated struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.rows_updated metric with initial data. +func (m *metricPostgresqlRowsUpdated) init() { + m.data.SetName("postgresql.rows_updated") + m.data.SetDescription("Rows updated by queries in the db, tagged with relation name.") + m.data.SetUnit("{row}/s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricPostgresqlRowsUpdated) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("relation_name", relationNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlRowsUpdated) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlRowsUpdated) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlRowsUpdated(cfg MetricConfig) metricPostgresqlRowsUpdated { + m := metricPostgresqlRowsUpdated{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricPostgresqlSequentialScans struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1691,6 +2105,7 @@ type MetricsBuilder struct { metricPostgresqlBgwriterDuration metricPostgresqlBgwriterDuration metricPostgresqlBgwriterMaxwritten metricPostgresqlBgwriterMaxwritten metricPostgresqlBlocksRead metricPostgresqlBlocksRead + metricPostgresqlBufferHit metricPostgresqlBufferHit metricPostgresqlCommits metricPostgresqlCommits metricPostgresqlConnectionCount metricPostgresqlConnectionCount metricPostgresqlConnectionMax metricPostgresqlConnectionMax @@ -1700,10 +2115,17 @@ type MetricsBuilder struct { metricPostgresqlDeadlocks metricPostgresqlDeadlocks metricPostgresqlIndexScans metricPostgresqlIndexScans metricPostgresqlIndexSize metricPostgresqlIndexSize + metricPostgresqlLiveRows metricPostgresqlLiveRows metricPostgresqlOperations metricPostgresqlOperations + metricPostgresqlQueryCount metricPostgresqlQueryCount + metricPostgresqlQueryTotalExecTime metricPostgresqlQueryTotalExecTime metricPostgresqlReplicationDataDelay metricPostgresqlReplicationDataDelay metricPostgresqlRollbacks metricPostgresqlRollbacks metricPostgresqlRows metricPostgresqlRows + metricPostgresqlRowsDeleted metricPostgresqlRowsDeleted + metricPostgresqlRowsFetched metricPostgresqlRowsFetched + metricPostgresqlRowsInserted metricPostgresqlRowsInserted + metricPostgresqlRowsUpdated metricPostgresqlRowsUpdated metricPostgresqlSequentialScans metricPostgresqlSequentialScans metricPostgresqlTableCount metricPostgresqlTableCount metricPostgresqlTableSize metricPostgresqlTableSize @@ -1737,6 +2159,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricPostgresqlBgwriterDuration: newMetricPostgresqlBgwriterDuration(mbc.Metrics.PostgresqlBgwriterDuration), metricPostgresqlBgwriterMaxwritten: newMetricPostgresqlBgwriterMaxwritten(mbc.Metrics.PostgresqlBgwriterMaxwritten), metricPostgresqlBlocksRead: newMetricPostgresqlBlocksRead(mbc.Metrics.PostgresqlBlocksRead), + metricPostgresqlBufferHit: newMetricPostgresqlBufferHit(mbc.Metrics.PostgresqlBufferHit), metricPostgresqlCommits: newMetricPostgresqlCommits(mbc.Metrics.PostgresqlCommits), metricPostgresqlConnectionCount: newMetricPostgresqlConnectionCount(mbc.Metrics.PostgresqlConnectionCount), metricPostgresqlConnectionMax: newMetricPostgresqlConnectionMax(mbc.Metrics.PostgresqlConnectionMax), @@ -1746,10 +2169,17 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricPostgresqlDeadlocks: newMetricPostgresqlDeadlocks(mbc.Metrics.PostgresqlDeadlocks), metricPostgresqlIndexScans: newMetricPostgresqlIndexScans(mbc.Metrics.PostgresqlIndexScans), metricPostgresqlIndexSize: newMetricPostgresqlIndexSize(mbc.Metrics.PostgresqlIndexSize), + metricPostgresqlLiveRows: newMetricPostgresqlLiveRows(mbc.Metrics.PostgresqlLiveRows), metricPostgresqlOperations: newMetricPostgresqlOperations(mbc.Metrics.PostgresqlOperations), + metricPostgresqlQueryCount: newMetricPostgresqlQueryCount(mbc.Metrics.PostgresqlQueryCount), + metricPostgresqlQueryTotalExecTime: newMetricPostgresqlQueryTotalExecTime(mbc.Metrics.PostgresqlQueryTotalExecTime), metricPostgresqlReplicationDataDelay: newMetricPostgresqlReplicationDataDelay(mbc.Metrics.PostgresqlReplicationDataDelay), metricPostgresqlRollbacks: newMetricPostgresqlRollbacks(mbc.Metrics.PostgresqlRollbacks), metricPostgresqlRows: newMetricPostgresqlRows(mbc.Metrics.PostgresqlRows), + metricPostgresqlRowsDeleted: newMetricPostgresqlRowsDeleted(mbc.Metrics.PostgresqlRowsDeleted), + metricPostgresqlRowsFetched: newMetricPostgresqlRowsFetched(mbc.Metrics.PostgresqlRowsFetched), + metricPostgresqlRowsInserted: newMetricPostgresqlRowsInserted(mbc.Metrics.PostgresqlRowsInserted), + metricPostgresqlRowsUpdated: newMetricPostgresqlRowsUpdated(mbc.Metrics.PostgresqlRowsUpdated), metricPostgresqlSequentialScans: newMetricPostgresqlSequentialScans(mbc.Metrics.PostgresqlSequentialScans), metricPostgresqlTableCount: newMetricPostgresqlTableCount(mbc.Metrics.PostgresqlTableCount), metricPostgresqlTableSize: newMetricPostgresqlTableSize(mbc.Metrics.PostgresqlTableSize), @@ -1767,6 +2197,12 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt if mbc.ResourceAttributes.PostgresqlDatabaseName.MetricsExclude != nil { mb.resourceAttributeExcludeFilter["postgresql.database.name"] = filter.CreateFilter(mbc.ResourceAttributes.PostgresqlDatabaseName.MetricsExclude) } + if mbc.ResourceAttributes.PostgresqlDbVersion.MetricsInclude != nil { + mb.resourceAttributeIncludeFilter["postgresql.db.version"] = filter.CreateFilter(mbc.ResourceAttributes.PostgresqlDbVersion.MetricsInclude) + } + if mbc.ResourceAttributes.PostgresqlDbVersion.MetricsExclude != nil { + mb.resourceAttributeExcludeFilter["postgresql.db.version"] = filter.CreateFilter(mbc.ResourceAttributes.PostgresqlDbVersion.MetricsExclude) + } if mbc.ResourceAttributes.PostgresqlIndexName.MetricsInclude != nil { mb.resourceAttributeIncludeFilter["postgresql.index.name"] = filter.CreateFilter(mbc.ResourceAttributes.PostgresqlIndexName.MetricsInclude) } @@ -1853,6 +2289,7 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricPostgresqlBgwriterDuration.emit(ils.Metrics()) mb.metricPostgresqlBgwriterMaxwritten.emit(ils.Metrics()) mb.metricPostgresqlBlocksRead.emit(ils.Metrics()) + mb.metricPostgresqlBufferHit.emit(ils.Metrics()) mb.metricPostgresqlCommits.emit(ils.Metrics()) mb.metricPostgresqlConnectionCount.emit(ils.Metrics()) mb.metricPostgresqlConnectionMax.emit(ils.Metrics()) @@ -1862,10 +2299,17 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricPostgresqlDeadlocks.emit(ils.Metrics()) mb.metricPostgresqlIndexScans.emit(ils.Metrics()) mb.metricPostgresqlIndexSize.emit(ils.Metrics()) + mb.metricPostgresqlLiveRows.emit(ils.Metrics()) mb.metricPostgresqlOperations.emit(ils.Metrics()) + mb.metricPostgresqlQueryCount.emit(ils.Metrics()) + mb.metricPostgresqlQueryTotalExecTime.emit(ils.Metrics()) mb.metricPostgresqlReplicationDataDelay.emit(ils.Metrics()) mb.metricPostgresqlRollbacks.emit(ils.Metrics()) mb.metricPostgresqlRows.emit(ils.Metrics()) + mb.metricPostgresqlRowsDeleted.emit(ils.Metrics()) + mb.metricPostgresqlRowsFetched.emit(ils.Metrics()) + mb.metricPostgresqlRowsInserted.emit(ils.Metrics()) + mb.metricPostgresqlRowsUpdated.emit(ils.Metrics()) mb.metricPostgresqlSequentialScans.emit(ils.Metrics()) mb.metricPostgresqlTableCount.emit(ils.Metrics()) mb.metricPostgresqlTableSize.emit(ils.Metrics()) @@ -1940,6 +2384,11 @@ func (mb *MetricsBuilder) RecordPostgresqlBlocksReadDataPoint(ts pcommon.Timesta mb.metricPostgresqlBlocksRead.recordDataPoint(mb.startTime, ts, val, sourceAttributeValue.String()) } +// RecordPostgresqlBufferHitDataPoint adds a data point to postgresql.buffer_hit metric. +func (mb *MetricsBuilder) RecordPostgresqlBufferHitDataPoint(ts pcommon.Timestamp, val int64, dbnameAttributeValue string) { + mb.metricPostgresqlBufferHit.recordDataPoint(mb.startTime, ts, val, dbnameAttributeValue) +} + // RecordPostgresqlCommitsDataPoint adds a data point to postgresql.commits metric. func (mb *MetricsBuilder) RecordPostgresqlCommitsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlCommits.recordDataPoint(mb.startTime, ts, val) @@ -1985,11 +2434,26 @@ func (mb *MetricsBuilder) RecordPostgresqlIndexSizeDataPoint(ts pcommon.Timestam mb.metricPostgresqlIndexSize.recordDataPoint(mb.startTime, ts, val) } +// RecordPostgresqlLiveRowsDataPoint adds a data point to postgresql.live_rows metric. +func (mb *MetricsBuilder) RecordPostgresqlLiveRowsDataPoint(ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + mb.metricPostgresqlLiveRows.recordDataPoint(mb.startTime, ts, val, relationNameAttributeValue) +} + // RecordPostgresqlOperationsDataPoint adds a data point to postgresql.operations metric. func (mb *MetricsBuilder) RecordPostgresqlOperationsDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation) { mb.metricPostgresqlOperations.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String()) } +// RecordPostgresqlQueryCountDataPoint adds a data point to postgresql.query.count metric. +func (mb *MetricsBuilder) RecordPostgresqlQueryCountDataPoint(ts pcommon.Timestamp, val int64, queryTextAttributeValue string, queryIDAttributeValue string) { + mb.metricPostgresqlQueryCount.recordDataPoint(mb.startTime, ts, val, queryTextAttributeValue, queryIDAttributeValue) +} + +// RecordPostgresqlQueryTotalExecTimeDataPoint adds a data point to postgresql.query.total_exec_time metric. +func (mb *MetricsBuilder) RecordPostgresqlQueryTotalExecTimeDataPoint(ts pcommon.Timestamp, val int64, queryTextAttributeValue string, queryIDAttributeValue string) { + mb.metricPostgresqlQueryTotalExecTime.recordDataPoint(mb.startTime, ts, val, queryTextAttributeValue, queryIDAttributeValue) +} + // RecordPostgresqlReplicationDataDelayDataPoint adds a data point to postgresql.replication.data_delay metric. func (mb *MetricsBuilder) RecordPostgresqlReplicationDataDelayDataPoint(ts pcommon.Timestamp, val int64, replicationClientAttributeValue string) { mb.metricPostgresqlReplicationDataDelay.recordDataPoint(mb.startTime, ts, val, replicationClientAttributeValue) @@ -2005,6 +2469,26 @@ func (mb *MetricsBuilder) RecordPostgresqlRowsDataPoint(ts pcommon.Timestamp, va mb.metricPostgresqlRows.recordDataPoint(mb.startTime, ts, val, stateAttributeValue.String()) } +// RecordPostgresqlRowsDeletedDataPoint adds a data point to postgresql.rows_deleted metric. +func (mb *MetricsBuilder) RecordPostgresqlRowsDeletedDataPoint(ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + mb.metricPostgresqlRowsDeleted.recordDataPoint(mb.startTime, ts, val, relationNameAttributeValue) +} + +// RecordPostgresqlRowsFetchedDataPoint adds a data point to postgresql.rows_fetched metric. +func (mb *MetricsBuilder) RecordPostgresqlRowsFetchedDataPoint(ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + mb.metricPostgresqlRowsFetched.recordDataPoint(mb.startTime, ts, val, relationNameAttributeValue) +} + +// RecordPostgresqlRowsInsertedDataPoint adds a data point to postgresql.rows_inserted metric. +func (mb *MetricsBuilder) RecordPostgresqlRowsInsertedDataPoint(ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + mb.metricPostgresqlRowsInserted.recordDataPoint(mb.startTime, ts, val, relationNameAttributeValue) +} + +// RecordPostgresqlRowsUpdatedDataPoint adds a data point to postgresql.rows_updated metric. +func (mb *MetricsBuilder) RecordPostgresqlRowsUpdatedDataPoint(ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + mb.metricPostgresqlRowsUpdated.recordDataPoint(mb.startTime, ts, val, relationNameAttributeValue) +} + // RecordPostgresqlSequentialScansDataPoint adds a data point to postgresql.sequential_scans metric. func (mb *MetricsBuilder) RecordPostgresqlSequentialScansDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlSequentialScans.recordDataPoint(mb.startTime, ts, val) diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go b/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go index a7571ddd1fe4..1356af938ffd 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go @@ -96,6 +96,10 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordPostgresqlBlocksReadDataPoint(ts, 1, AttributeSourceHeapRead) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordPostgresqlBufferHitDataPoint(ts, 1, "dbname-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordPostgresqlCommitsDataPoint(ts, 1) @@ -130,10 +134,22 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordPostgresqlIndexSizeDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordPostgresqlLiveRowsDataPoint(ts, 1, "relation_name-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordPostgresqlOperationsDataPoint(ts, 1, AttributeOperationIns) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordPostgresqlQueryCountDataPoint(ts, 1, "query_text-val", "query_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordPostgresqlQueryTotalExecTimeDataPoint(ts, 1, "query_text-val", "query_id-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordPostgresqlReplicationDataDelayDataPoint(ts, 1, "replication_client-val") @@ -146,6 +162,22 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordPostgresqlRowsDataPoint(ts, 1, AttributeStateDead) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordPostgresqlRowsDeletedDataPoint(ts, 1, "relation_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordPostgresqlRowsFetchedDataPoint(ts, 1, "relation_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordPostgresqlRowsInsertedDataPoint(ts, 1, "relation_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordPostgresqlRowsUpdatedDataPoint(ts, 1, "relation_name-val") + allMetricsCount++ mb.RecordPostgresqlSequentialScansDataPoint(ts, 1) @@ -177,6 +209,7 @@ func TestMetricsBuilder(t *testing.T) { rb := mb.NewResourceBuilder() rb.SetPostgresqlDatabaseName("postgresql.database.name-val") + rb.SetPostgresqlDbVersion("postgresql.db.version-val") rb.SetPostgresqlIndexName("postgresql.index.name-val") rb.SetPostgresqlSchemaName("postgresql.schema.name-val") rb.SetPostgresqlTableName("postgresql.table.name-val") @@ -312,6 +345,21 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("source") assert.True(t, ok) assert.EqualValues(t, "heap_read", attrVal.Str()) + case "postgresql.buffer_hit": + assert.False(t, validatedMetrics["postgresql.buffer_hit"], "Found a duplicate in the metrics slice: postgresql.buffer_hit") + validatedMetrics["postgresql.buffer_hit"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of disk block hits in the buffer cache, thereby avoiding database reads, tagged with database name.", ms.At(i).Description()) + assert.Equal(t, "{hit}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("dbname") + assert.True(t, ok) + assert.EqualValues(t, "dbname-val", attrVal.Str()) case "postgresql.commits": assert.False(t, validatedMetrics["postgresql.commits"], "Found a duplicate in the metrics slice: postgresql.commits") validatedMetrics["postgresql.commits"] = true @@ -344,7 +392,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Configured maximum number of client connections allowed", ms.At(i).Description()) - assert.Equal(t, "{connections}", ms.At(i).Unit()) + assert.Equal(t, "{connection}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -439,6 +487,21 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "postgresql.live_rows": + assert.False(t, validatedMetrics["postgresql.live_rows"], "Found a duplicate in the metrics slice: postgresql.live_rows") + validatedMetrics["postgresql.live_rows"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The approximate number of live rows, tagged with relation name.", ms.At(i).Description()) + assert.Equal(t, "{row}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("relation_name") + assert.True(t, ok) + assert.EqualValues(t, "relation_name-val", attrVal.Str()) case "postgresql.operations": assert.False(t, validatedMetrics["postgresql.operations"], "Found a duplicate in the metrics slice: postgresql.operations") validatedMetrics["postgresql.operations"] = true @@ -456,6 +519,46 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("operation") assert.True(t, ok) assert.EqualValues(t, "ins", attrVal.Str()) + case "postgresql.query.count": + assert.False(t, validatedMetrics["postgresql.query.count"], "Found a duplicate in the metrics slice: postgresql.query.count") + validatedMetrics["postgresql.query.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of times the statement was executed.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("query_text") + assert.True(t, ok) + assert.EqualValues(t, "query_text-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_id") + assert.True(t, ok) + assert.EqualValues(t, "query_id-val", attrVal.Str()) + case "postgresql.query.total_exec_time": + assert.False(t, validatedMetrics["postgresql.query.total_exec_time"], "Found a duplicate in the metrics slice: postgresql.query.total_exec_time") + validatedMetrics["postgresql.query.total_exec_time"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Total wait time of the normalised timed events in nanaoseconds.", ms.At(i).Description()) + assert.Equal(t, "ns", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("query_text") + assert.True(t, ok) + assert.EqualValues(t, "query_text-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_id") + assert.True(t, ok) + assert.EqualValues(t, "query_id-val", attrVal.Str()) case "postgresql.replication.data_delay": assert.False(t, validatedMetrics["postgresql.replication.data_delay"], "Found a duplicate in the metrics slice: postgresql.replication.data_delay") validatedMetrics["postgresql.replication.data_delay"] = true @@ -502,6 +605,66 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("state") assert.True(t, ok) assert.EqualValues(t, "dead", attrVal.Str()) + case "postgresql.rows_deleted": + assert.False(t, validatedMetrics["postgresql.rows_deleted"], "Found a duplicate in the metrics slice: postgresql.rows_deleted") + validatedMetrics["postgresql.rows_deleted"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Rows deleted by queries in this db, tagged with relation name.", ms.At(i).Description()) + assert.Equal(t, "{row}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("relation_name") + assert.True(t, ok) + assert.EqualValues(t, "relation_name-val", attrVal.Str()) + case "postgresql.rows_fetched": + assert.False(t, validatedMetrics["postgresql.rows_fetched"], "Found a duplicate in the metrics slice: postgresql.rows_fetched") + validatedMetrics["postgresql.rows_fetched"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Rows fetched by queries in this db, tagged with relation name.", ms.At(i).Description()) + assert.Equal(t, "{row}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("relation_name") + assert.True(t, ok) + assert.EqualValues(t, "relation_name-val", attrVal.Str()) + case "postgresql.rows_inserted": + assert.False(t, validatedMetrics["postgresql.rows_inserted"], "Found a duplicate in the metrics slice: postgresql.rows_inserted") + validatedMetrics["postgresql.rows_inserted"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Rows inserted by queries in the db, tagged with relation name.", ms.At(i).Description()) + assert.Equal(t, "{row}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("relation_name") + assert.True(t, ok) + assert.EqualValues(t, "relation_name-val", attrVal.Str()) + case "postgresql.rows_updated": + assert.False(t, validatedMetrics["postgresql.rows_updated"], "Found a duplicate in the metrics slice: postgresql.rows_updated") + validatedMetrics["postgresql.rows_updated"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Rows updated by queries in the db, tagged with relation name.", ms.At(i).Description()) + assert.Equal(t, "{row}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("relation_name") + assert.True(t, ok) + assert.EqualValues(t, "relation_name-val", attrVal.Str()) case "postgresql.sequential_scans": assert.False(t, validatedMetrics["postgresql.sequential_scans"], "Found a duplicate in the metrics slice: postgresql.sequential_scans") validatedMetrics["postgresql.sequential_scans"] = true diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_resource.go b/receiver/postgresqlreceiver/internal/metadata/generated_resource.go index 094f68d726bd..26970bb2ed84 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_resource.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_resource.go @@ -28,6 +28,13 @@ func (rb *ResourceBuilder) SetPostgresqlDatabaseName(val string) { } } +// SetPostgresqlDbVersion sets provided value as "postgresql.db.version" attribute. +func (rb *ResourceBuilder) SetPostgresqlDbVersion(val string) { + if rb.config.PostgresqlDbVersion.Enabled { + rb.res.Attributes().PutStr("postgresql.db.version", val) + } +} + // SetPostgresqlIndexName sets provided value as "postgresql.index.name" attribute. func (rb *ResourceBuilder) SetPostgresqlIndexName(val string) { if rb.config.PostgresqlIndexName.Enabled { diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_resource_test.go b/receiver/postgresqlreceiver/internal/metadata/generated_resource_test.go index c23116db14d0..b959c1dd4e1f 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_resource_test.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_resource_test.go @@ -14,6 +14,7 @@ func TestResourceBuilder(t *testing.T) { cfg := loadResourceAttributesConfig(t, test) rb := NewResourceBuilder(cfg) rb.SetPostgresqlDatabaseName("postgresql.database.name-val") + rb.SetPostgresqlDbVersion("postgresql.db.version-val") rb.SetPostgresqlIndexName("postgresql.index.name-val") rb.SetPostgresqlSchemaName("postgresql.schema.name-val") rb.SetPostgresqlTableName("postgresql.table.name-val") @@ -23,9 +24,9 @@ func TestResourceBuilder(t *testing.T) { switch test { case "default": - assert.Equal(t, 4, res.Attributes().Len()) + assert.Equal(t, 5, res.Attributes().Len()) case "all_set": - assert.Equal(t, 4, res.Attributes().Len()) + assert.Equal(t, 5, res.Attributes().Len()) case "none_set": assert.Equal(t, 0, res.Attributes().Len()) return @@ -38,6 +39,11 @@ func TestResourceBuilder(t *testing.T) { if ok { assert.EqualValues(t, "postgresql.database.name-val", val.Str()) } + val, ok = res.Attributes().Get("postgresql.db.version") + assert.True(t, ok) + if ok { + assert.EqualValues(t, "postgresql.db.version-val", val.Str()) + } val, ok = res.Attributes().Get("postgresql.index.name") assert.True(t, ok) if ok { diff --git a/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml b/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml index 0b27c1df6325..94212ba64f84 100644 --- a/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml @@ -15,6 +15,8 @@ all_set: enabled: true postgresql.blocks_read: enabled: true + postgresql.buffer_hit: + enabled: true postgresql.commits: enabled: true postgresql.connection.count: @@ -33,14 +35,28 @@ all_set: enabled: true postgresql.index.size: enabled: true + postgresql.live_rows: + enabled: true postgresql.operations: enabled: true + postgresql.query.count: + enabled: true + postgresql.query.total_exec_time: + enabled: true postgresql.replication.data_delay: enabled: true postgresql.rollbacks: enabled: true postgresql.rows: enabled: true + postgresql.rows_deleted: + enabled: true + postgresql.rows_fetched: + enabled: true + postgresql.rows_inserted: + enabled: true + postgresql.rows_updated: + enabled: true postgresql.sequential_scans: enabled: true postgresql.table.count: @@ -60,6 +76,8 @@ all_set: resource_attributes: postgresql.database.name: enabled: true + postgresql.db.version: + enabled: true postgresql.index.name: enabled: true postgresql.schema.name: @@ -82,6 +100,8 @@ none_set: enabled: false postgresql.blocks_read: enabled: false + postgresql.buffer_hit: + enabled: false postgresql.commits: enabled: false postgresql.connection.count: @@ -100,14 +120,28 @@ none_set: enabled: false postgresql.index.size: enabled: false + postgresql.live_rows: + enabled: false postgresql.operations: enabled: false + postgresql.query.count: + enabled: false + postgresql.query.total_exec_time: + enabled: false postgresql.replication.data_delay: enabled: false postgresql.rollbacks: enabled: false postgresql.rows: enabled: false + postgresql.rows_deleted: + enabled: false + postgresql.rows_fetched: + enabled: false + postgresql.rows_inserted: + enabled: false + postgresql.rows_updated: + enabled: false postgresql.sequential_scans: enabled: false postgresql.table.count: @@ -127,6 +161,8 @@ none_set: resource_attributes: postgresql.database.name: enabled: false + postgresql.db.version: + enabled: false postgresql.index.name: enabled: false postgresql.schema.name: @@ -139,6 +175,10 @@ filter_set_include: enabled: true metrics_include: - regexp: ".*" + postgresql.db.version: + enabled: true + metrics_include: + - regexp: ".*" postgresql.index.name: enabled: true metrics_include: @@ -157,6 +197,10 @@ filter_set_exclude: enabled: true metrics_exclude: - strict: "postgresql.database.name-val" + postgresql.db.version: + enabled: true + metrics_exclude: + - strict: "postgresql.db.version-val" postgresql.index.name: enabled: true metrics_exclude: diff --git a/receiver/postgresqlreceiver/metadata.yaml b/receiver/postgresqlreceiver/metadata.yaml index 912ffa5f2690..5ae9e5415ad4 100644 --- a/receiver/postgresqlreceiver/metadata.yaml +++ b/receiver/postgresqlreceiver/metadata.yaml @@ -26,6 +26,10 @@ resource_attributes: description: The name of the index on a table. enabled: true type: string + postgresql.db.version: + description: The version of postgresql databse + enabled: true + type: string attributes: bg_buffer_source: @@ -88,6 +92,18 @@ attributes: description: The operation which is responsible for the lag. type: string enum: [flush, replay, write] + relation_name: + description: name of the relation + type: string + dbname: + description: name of the database + type: string + query_text: + description: Text of a representative statement + type: string + query_id: + description: Hash code to identify identical normalized queries. + type: string metrics: postgresql.bgwriter.buffers.allocated: @@ -187,7 +203,7 @@ metrics: postgresql.connection.max: enabled: true description: Configured maximum number of client connections allowed - unit: "{connections}" + unit: "{connection}" gauge: value_type: int postgresql.rows: @@ -311,7 +327,6 @@ metrics: value_type: double extended_documentation: | This metric requires WAL to be enabled with at least one replica. - postgresql.connection.count: enabled: true description: The number of active connections to this database. If DBM is enabled, @@ -319,7 +334,82 @@ metrics: unit: '{connection}' gauge: value_type: int - + + #DBM METRICS: + postgresql.query.total_exec_time: + enabled: true + description: Total wait time of the normalised timed events in nanaoseconds. + unit: ns + sum: + value_type: int + monotonic: false + aggregation_temporality: cumulative + attributes: [query_text, query_id] + + postgresql.query.count: + enabled: true + description: Number of times the statement was executed. + unit: 1 + sum: + value_type: int + monotonic: false + aggregation_temporality: cumulative + attributes: [query_text, query_id] + + postgresql.rows_deleted: + attributes: + - relation_name + enabled: true + description: Rows deleted by queries in this db, tagged with relation name. + unit: '{row}/s' + gauge: + value_type: int + + postgresql.rows_fetched: + attributes: + - relation_name + enabled: true + description: Rows fetched by queries in this db, tagged with relation name. + unit: '{row}/s' + gauge: + value_type: int + + postgresql.rows_updated: + attributes: + - relation_name + enabled: true + description: Rows updated by queries in the db, tagged with relation name. + unit: '{row}/s' + gauge: + value_type: int + + postgresql.rows_inserted: + attributes: + - relation_name + enabled: true + description: Rows inserted by queries in the db, tagged with relation name. + unit: '{row}/s' + gauge: + value_type: int + + postgresql.live_rows: + attributes: + - relation_name + enabled: true + description: The approximate number of live rows, tagged with relation name. + unit: '{row}' + gauge: + value_type: int + + postgresql.buffer_hit: + attributes: + - dbname + enabled: true + description: The number of disk block hits in the buffer cache, thereby avoiding database reads, tagged with database name. + unit: '{hit}/s' + gauge: + value_type: int + tests: config: \ No newline at end of file diff --git a/receiver/postgresqlreceiver/scraper.go b/receiver/postgresqlreceiver/scraper.go index 39aa19d9cf35..fd86849d3923 100644 --- a/receiver/postgresqlreceiver/scraper.go +++ b/receiver/postgresqlreceiver/scraper.go @@ -155,19 +155,28 @@ func (p *postgreSQLScraper) scrape(ctx context.Context) (pmetric.Metrics, error) p.collectIndexes(ctx, now, dbClient, database, &errs) } + rb := p.mb.NewResourceBuilder() + rb.SetPostgresqlDatabaseName("N/A") + p.mb.RecordPostgresqlDatabaseCountDataPoint(now, int64(len(databases))) p.collectBGWriterStats(ctx, now, listClient, &errs) p.collectWalAge(ctx, now, listClient, &errs) p.collectReplicationStats(ctx, now, listClient, &errs) p.collectMaxConnections(ctx, now, listClient, &errs) p.collectDatabaseLocks(ctx, now, listClient, &errs) + p.collectRowStats(ctx, now, listClient, &errs) + p.collectQueryPerfStats(ctx, now, listClient, &errs) + p.collectBufferHits(ctx, now, listClient, &errs) p.collectActiveConnections(ctx, now, listClient, &errs) - rb := p.mb.NewResourceBuilder() - rb.SetPostgresqlDatabaseName("N/A") - p.mb.EmitForResource(metadata.WithResource(rb.Emit())) + version, err := listClient.getVersionString(ctx) + if err != nil { + errs.add(err) + } + rb.SetPostgresqlDbVersion(version) + p.mb.EmitForResource(metadata.WithResource(rb.Emit())) return p.mb.Emit(), errs.combine() } @@ -420,6 +429,68 @@ func (p *postgreSQLScraper) collectWalAge( p.mb.RecordPostgresqlWalAgeDataPoint(now, walAge) } +func (p *postgreSQLScraper) collectRowStats( + ctx context.Context, + now pcommon.Timestamp, + client client, + errs *errsMux, +) { + rs, err := client.getRowStats(ctx) + + if err != nil { + errs.addPartial(err) + return + } + // pp.Println(rs) + for _, s := range rs { + // p.mb.RecordPostgresqlRowsReturnedDataPoint(now, s.rowsReturned, s.relationName) + p.mb.RecordPostgresqlRowsFetchedDataPoint(now, s.rowsFetched, s.relationName) + p.mb.RecordPostgresqlRowsInsertedDataPoint(now, s.rowsInserted, s.relationName) + p.mb.RecordPostgresqlRowsUpdatedDataPoint(now, s.rowsUpdated, s.relationName) + p.mb.RecordPostgresqlRowsDeletedDataPoint(now, s.rowsDeleted, s.relationName) + // p.mb.RecordPostgresqlRowsHotUpdatedDataPoint(now, s.rowsHotUpdated, s.relationName) + p.mb.RecordPostgresqlLiveRowsDataPoint(now, s.liveRows, s.relationName) + // p.mb.RecordPostgresqlDeadRowsDataPoint(now, s.deadRows, s.relationName) + } + +} + +func (p *postgreSQLScraper) collectQueryPerfStats( + ctx context.Context, + now pcommon.Timestamp, + client client, + errs *errsMux, +) { + queryStats, err := client.getQueryStats(ctx) + if err != nil { + errs.addPartial(err) + return + } + + for _, s := range queryStats { + p.mb.RecordPostgresqlQueryCountDataPoint(now, s.queryCount, s.queryText, s.queryId) + p.mb.RecordPostgresqlQueryTotalExecTimeDataPoint(now, int64(s.queryExecTime), s.queryText, s.queryId) + } +} + +func (p *postgreSQLScraper) collectBufferHits( + ctx context.Context, + now pcommon.Timestamp, + client client, + errs *errsMux, +) { + bhs, err := client.getBufferHit(ctx) + + if err != nil { + errs.addPartial(err) + return + } + + for _, s := range bhs { + p.mb.RecordPostgresqlBufferHitDataPoint(now, s.hits, s.dbName) + } +} + func (p *postgreSQLScraper) retrieveDatabaseStats( ctx context.Context, wg *sync.WaitGroup, diff --git a/receiver/postgresqlreceiver/scraper_test.go b/receiver/postgresqlreceiver/scraper_test.go index e660e892d869..daf9c36d952c 100644 --- a/receiver/postgresqlreceiver/scraper_test.go +++ b/receiver/postgresqlreceiver/scraper_test.go @@ -56,8 +56,14 @@ func TestScraper(t *testing.T) { expectedMetrics, err := golden.ReadMetrics(expectedFile) require.NoError(t, err) - require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, pmetrictest.IgnoreResourceMetricsOrder(), - pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreTimestamp())) + require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, + pmetrictest.IgnoreMetricsOrder(), + pmetrictest.IgnoreResourceMetricsOrder(), + pmetrictest.IgnoreMetricDataPointsOrder(), + pmetrictest.IgnoreStartTimestamp(), + pmetrictest.IgnoreTimestamp()), + ) + } runTest(true, "expected_schemaattr.yaml") @@ -93,8 +99,13 @@ func TestScraperNoDatabaseSingle(t *testing.T) { expectedMetrics, err := golden.ReadMetrics(expectedFile) require.NoError(t, err) - require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, pmetrictest.IgnoreResourceMetricsOrder(), - pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreTimestamp())) + require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, + pmetrictest.IgnoreMetricsOrder(), + pmetrictest.IgnoreResourceMetricsOrder(), + pmetrictest.IgnoreMetricDataPointsOrder(), + pmetrictest.IgnoreStartTimestamp(), + pmetrictest.IgnoreTimestamp(), + )) cfg.Metrics.PostgresqlWalDelay.Enabled = false cfg.Metrics.PostgresqlDeadlocks.Enabled = false @@ -110,8 +121,13 @@ func TestScraperNoDatabaseSingle(t *testing.T) { expectedMetrics, err = golden.ReadMetrics(expectedFile) require.NoError(t, err) - require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, pmetrictest.IgnoreResourceMetricsOrder(), - pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreTimestamp())) + require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, + pmetrictest.IgnoreMetricsOrder(), + pmetrictest.IgnoreResourceMetricsOrder(), + pmetrictest.IgnoreMetricDataPointsOrder(), + pmetrictest.IgnoreStartTimestamp(), + pmetrictest.IgnoreTimestamp(), + )) } runTest(true, "expected_schemaattr.yaml", "expected_default_metrics_schemaattr.yaml") @@ -147,8 +163,13 @@ func TestScraperNoDatabaseMultipleWithoutPreciseLag(t *testing.T) { expectedMetrics, err := golden.ReadMetrics(expectedFile) require.NoError(t, err) - require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, pmetrictest.IgnoreResourceMetricsOrder(), - pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreTimestamp())) + require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, + pmetrictest.IgnoreMetricsOrder(), + pmetrictest.IgnoreResourceMetricsOrder(), + pmetrictest.IgnoreMetricDataPointsOrder(), + pmetrictest.IgnoreStartTimestamp(), + pmetrictest.IgnoreTimestamp(), + )) } runTest(true, "expected_imprecise_lag_schemaattr.yaml") @@ -184,8 +205,13 @@ func TestScraperNoDatabaseMultiple(t *testing.T) { expectedMetrics, err := golden.ReadMetrics(expectedFile) require.NoError(t, err) fmt.Println(actualMetrics.ResourceMetrics()) - require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, pmetrictest.IgnoreResourceMetricsOrder(), - pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreTimestamp())) + require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, + pmetrictest.IgnoreMetricsOrder(), + pmetrictest.IgnoreResourceMetricsOrder(), + pmetrictest.IgnoreMetricDataPointsOrder(), + pmetrictest.IgnoreStartTimestamp(), + pmetrictest.IgnoreTimestamp(), + )) } runTest(true, "expected_schemaattr.yaml") @@ -222,8 +248,13 @@ func TestScraperWithResourceAttributeFeatureGate(t *testing.T) { expectedMetrics, err := golden.ReadMetrics(expectedFile) require.NoError(t, err) - require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, pmetrictest.IgnoreResourceMetricsOrder(), - pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreTimestamp())) + require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, + pmetrictest.IgnoreMetricsOrder(), + pmetrictest.IgnoreResourceMetricsOrder(), + pmetrictest.IgnoreMetricDataPointsOrder(), + pmetrictest.IgnoreStartTimestamp(), + pmetrictest.IgnoreTimestamp(), + )) } runTest(true, "expected_schemaattr.yaml") @@ -259,8 +290,13 @@ func TestScraperWithResourceAttributeFeatureGateSingle(t *testing.T) { expectedMetrics, err := golden.ReadMetrics(expectedFile) require.NoError(t, err) - require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, pmetrictest.IgnoreResourceMetricsOrder(), - pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreTimestamp())) + require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, + pmetrictest.IgnoreMetricsOrder(), + pmetrictest.IgnoreResourceMetricsOrder(), + pmetrictest.IgnoreMetricDataPointsOrder(), + pmetrictest.IgnoreStartTimestamp(), + pmetrictest.IgnoreTimestamp(), + )) } runTest(true, "expected_schemaattr.yaml") @@ -287,8 +323,13 @@ func TestScraperExcludeDatabase(t *testing.T) { expectedMetrics, err := golden.ReadMetrics(expectedFile) require.NoError(t, err) - require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, pmetrictest.IgnoreResourceMetricsOrder(), - pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreTimestamp())) + require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, + pmetrictest.IgnoreMetricsOrder(), + pmetrictest.IgnoreResourceMetricsOrder(), + pmetrictest.IgnoreMetricDataPointsOrder(), + pmetrictest.IgnoreStartTimestamp(), + pmetrictest.IgnoreTimestamp(), + )) } runTest(true, "exclude_schemaattr.yaml") @@ -340,6 +381,26 @@ func (m *mockClient) getIndexStats(ctx context.Context, database string) (map[in return args.Get(0).(map[indexIdentifer]indexStat), args.Error(1) } +func (m *mockClient) getQueryStats(ctx context.Context) ([]queryStats, error) { + args := m.Called(ctx) + return args.Get(0).([]queryStats), args.Error(1) +} + +func (m *mockClient) getBufferHit(ctx context.Context) ([]BufferHit, error) { + args := m.Called(ctx) + return args.Get(0).([]BufferHit), args.Error(1) +} + +func (m *mockClient) getRowStats(ctx context.Context) ([]RowStats, error) { + args := m.Called(ctx) + return args.Get(0).([]RowStats), args.Error(1) +} + +func (m *mockClient) getVersionString(ctx context.Context) (string, error) { + args := m.Called(ctx) + return args.Get(0).(string), args.Error(1) +} + func (m *mockClient) getBGWriterStats(ctx context.Context) (*bgStat, error) { args := m.Called(ctx) return args.Get(0).(*bgStat), args.Error(1) @@ -350,6 +411,11 @@ func (m *mockClient) getMaxConnections(ctx context.Context) (int64, error) { return args.Get(0).(int64), args.Error(1) } +func (m *mockClient) getActiveConnections(ctx context.Context) (int64, error) { + args := m.Called(ctx) + return args.Get(0).(int64), args.Error(1) +} + func (m *mockClient) getLatestWalAgeSeconds(ctx context.Context) (int64, error) { args := m.Called(ctx) return args.Get(0).(int64), args.Error(1) @@ -425,6 +491,8 @@ func (m *mockClient) initMocks(database string, schema string, databases []strin maxWritten: 11, }, nil) m.On("getMaxConnections", mock.Anything).Return(int64(100), nil) + m.On("getActiveConnections", mock.Anything).Return(int64(1), nil) + m.On("getVersionString", mock.Anything).Return("16.3 (Ubuntu 16.3-1.pgdg22.04+1)", nil) m.On("getLatestWalAgeSeconds", mock.Anything).Return(int64(3600), nil) m.On("getDatabaseLocks", mock.Anything).Return([]databaseLocks{ { @@ -476,6 +544,69 @@ func (m *mockClient) initMocks(database string, schema string, databases []strin writeLag: -1, }, }, nil) + m.On("getQueryStats", mock.Anything).Return([]queryStats{ + { + queryId: "6366587321661213570", + queryText: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department", + queryCount: 1, + queryExecTime: 16401, + }, + { + queryId: "7034792503091443675", + queryText: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname", + queryCount: 5, + queryExecTime: 416529, + }, + { + queryId: "-5872536860935463852", + queryText: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)", + queryCount: 1, + queryExecTime: 25141, + }, + }, nil) + + m.On("getBufferHit", mock.Anything).Return([]BufferHit{ + { + dbName: "", + hits: 2148, + }, + { + dbName: "postgres", + hits: 9053, + }, + { + dbName: "template1", + hits: 8527, + }, + { + dbName: "template0", + hits: 0, + }, + }, nil) + m.On("getRowStats", mock.Anything).Return([]RowStats{ + { + relationName: "public.table1", + rowsReturned: 41923, + rowsFetched: 0, // + rowsInserted: 165, // + rowsUpdated: 2, // + rowsDeleted: 88, // + rowsHotUpdated: 2, + liveRows: 77, // + deadRows: 90, + }, + // { + // relationName: "public.table2", + // rowsReturned: 41923, + // rowsFetched: 0, + // rowsInserted: 165, + // rowsUpdated: 2, + // rowsDeleted: 88, + // rowsHotUpdated: 2, + // liveRows: 77, + // deadRows: 90, + // }, + }, nil) } else { table1 := "table1" table2 := "table2" diff --git a/receiver/postgresqlreceiver/testdata/scraper/multiple/exclude.yaml b/receiver/postgresqlreceiver/testdata/scraper/multiple/exclude.yaml index e8b3f6071a03..0877519332ed 100644 --- a/receiver/postgresqlreceiver/testdata/scraper/multiple/exclude.yaml +++ b/receiver/postgresqlreceiver/testdata/scraper/multiple/exclude.yaml @@ -1,7 +1,169 @@ resourceMetrics: - - resource: {} + - resource: + attributes: + - key: postgresql.database.name + value: + stringValue: "N/A" + - key: postgresql.db.version + value: + stringValue: "16.3 (Ubuntu 16.3-1.pgdg22.04+1)" scopeMetrics: - metrics: + - description: The number of active connections to this database. If DBM is enabled, this metric is tagged with state, app, db and user + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: postgresql.connection.count + unit: '{connection}' + - description: Total wait time of the normalised timed events in nanaoseconds. + name: postgresql.query.total_exec_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "16401" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "416529" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "25141" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: ns + - description: Number of times the statement was executed. + name: postgresql.query.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "5" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: "1" + - description: The number of disk block hits in the buffer cache, thereby avoiding database reads, tagged with database name. + gauge: + dataPoints: + - asInt: "2148" + attributes: + - key: dbname + value: + stringValue: "" + - asInt: "9053" + attributes: + - key: dbname + value: + stringValue: "postgres" + - asInt: "8527" + attributes: + - key: dbname + value: + stringValue: "template1" + - asInt: "0" + attributes: + - key: dbname + value: + stringValue: "template0" + name: postgresql.buffer_hit + unit: '{hit}/s' + - description: Rows deleted by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "88" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_deleted + unit: '{row}/s' + - description: Rows fetched by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "0" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_fetched + unit: '{row}/s' + - description: Rows inserted by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "165" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_inserted + unit: '{row}/s' + - description: Rows updated by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "2" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_updated + unit: '{row}/s' + - description: The approximate number of live rows, tagged with relation name. + gauge: + dataPoints: + - asInt: "77" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.live_rows + unit: '{row}' - description: Number of buffers allocated. name: postgresql.bgwriter.buffers.allocated sum: @@ -106,7 +268,7 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: postgresql.connection.max - unit: '{connections}' + unit: '{connection}' - description: Number of user databases. name: postgresql.database.count sum: diff --git a/receiver/postgresqlreceiver/testdata/scraper/multiple/exclude_schemaattr.yaml b/receiver/postgresqlreceiver/testdata/scraper/multiple/exclude_schemaattr.yaml index 79fe672e03d4..051043327677 100644 --- a/receiver/postgresqlreceiver/testdata/scraper/multiple/exclude_schemaattr.yaml +++ b/receiver/postgresqlreceiver/testdata/scraper/multiple/exclude_schemaattr.yaml @@ -1,7 +1,169 @@ resourceMetrics: - - resource: {} + - resource: + attributes: + - key: postgresql.database.name + value: + stringValue: "N/A" + - key: postgresql.db.version + value: + stringValue: "16.3 (Ubuntu 16.3-1.pgdg22.04+1)" scopeMetrics: - metrics: + - description: The number of active connections to this database. If DBM is enabled, this metric is tagged with state, app, db and user + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: postgresql.connection.count + unit: '{connection}' + - description: Total wait time of the normalised timed events in nanaoseconds. + name: postgresql.query.total_exec_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "16401" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "416529" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "25141" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: ns + - description: Number of times the statement was executed. + name: postgresql.query.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "5" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: "1" + - description: The number of disk block hits in the buffer cache, thereby avoiding database reads, tagged with database name. + gauge: + dataPoints: + - asInt: "2148" + attributes: + - key: dbname + value: + stringValue: "" + - asInt: "9053" + attributes: + - key: dbname + value: + stringValue: "postgres" + - asInt: "8527" + attributes: + - key: dbname + value: + stringValue: "template1" + - asInt: "0" + attributes: + - key: dbname + value: + stringValue: "template0" + name: postgresql.buffer_hit + unit: '{hit}/s' + - description: Rows deleted by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "88" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_deleted + unit: '{row}/s' + - description: Rows fetched by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "0" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_fetched + unit: '{row}/s' + - description: Rows inserted by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "165" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_inserted + unit: '{row}/s' + - description: Rows updated by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "2" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_updated + unit: '{row}/s' + - description: The approximate number of live rows, tagged with relation name. + gauge: + dataPoints: + - asInt: "77" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.live_rows + unit: '{row}' - description: Number of buffers allocated. name: postgresql.bgwriter.buffers.allocated sum: @@ -106,7 +268,7 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: postgresql.connection.max - unit: '{connections}' + unit: '{connection}' - description: Number of user databases. name: postgresql.database.count sum: diff --git a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected.yaml b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected.yaml index 1f030adf956a..70dd87714e0f 100644 --- a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected.yaml +++ b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected.yaml @@ -1,7 +1,169 @@ resourceMetrics: - - resource: {} + - resource: + attributes: + - key: postgresql.database.name + value: + stringValue: "N/A" + - key: postgresql.db.version + value: + stringValue: "16.3 (Ubuntu 16.3-1.pgdg22.04+1)" scopeMetrics: - metrics: + - description: The number of active connections to this database. If DBM is enabled, this metric is tagged with state, app, db and user + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: postgresql.connection.count + unit: '{connection}' + - description: Total wait time of the normalised timed events in nanaoseconds. + name: postgresql.query.total_exec_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "16401" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "416529" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "25141" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: ns + - description: Number of times the statement was executed. + name: postgresql.query.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "5" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: "1" + - description: The number of disk block hits in the buffer cache, thereby avoiding database reads, tagged with database name. + gauge: + dataPoints: + - asInt: "2148" + attributes: + - key: dbname + value: + stringValue: "" + - asInt: "9053" + attributes: + - key: dbname + value: + stringValue: "postgres" + - asInt: "8527" + attributes: + - key: dbname + value: + stringValue: "template1" + - asInt: "0" + attributes: + - key: dbname + value: + stringValue: "template0" + name: postgresql.buffer_hit + unit: '{hit}/s' + - description: Rows deleted by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "88" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_deleted + unit: '{row}/s' + - description: Rows fetched by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "0" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_fetched + unit: '{row}/s' + - description: Rows inserted by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "165" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_inserted + unit: '{row}/s' + - description: Rows updated by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "2" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_updated + unit: '{row}/s' + - description: The approximate number of live rows, tagged with relation name. + gauge: + dataPoints: + - asInt: "77" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.live_rows + unit: '{row}' - description: Number of buffers allocated. name: postgresql.bgwriter.buffers.allocated sum: @@ -106,7 +268,7 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: postgresql.connection.max - unit: '{connections}' + unit: '{connection}' - description: Number of user databases. name: postgresql.database.count sum: diff --git a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag.yaml b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag.yaml index aa2f5eebe41d..1815ce75da4e 100644 --- a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag.yaml +++ b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag.yaml @@ -1,7 +1,169 @@ resourceMetrics: - - resource: {} + - resource: + attributes: + - key: postgresql.database.name + value: + stringValue: "N/A" + - key: postgresql.db.version + value: + stringValue: "16.3 (Ubuntu 16.3-1.pgdg22.04+1)" scopeMetrics: - metrics: + - description: The number of active connections to this database. If DBM is enabled, this metric is tagged with state, app, db and user + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: postgresql.connection.count + unit: '{connection}' + - description: Total wait time of the normalised timed events in nanaoseconds. + name: postgresql.query.total_exec_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "16401" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "416529" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "25141" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: ns + - description: Number of times the statement was executed. + name: postgresql.query.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "5" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: "1" + - description: The number of disk block hits in the buffer cache, thereby avoiding database reads, tagged with database name. + gauge: + dataPoints: + - asInt: "2148" + attributes: + - key: dbname + value: + stringValue: "" + - asInt: "9053" + attributes: + - key: dbname + value: + stringValue: "postgres" + - asInt: "8527" + attributes: + - key: dbname + value: + stringValue: "template1" + - asInt: "0" + attributes: + - key: dbname + value: + stringValue: "template0" + name: postgresql.buffer_hit + unit: '{hit}/s' + - description: Rows deleted by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "88" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_deleted + unit: '{row}/s' + - description: Rows fetched by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "0" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_fetched + unit: '{row}/s' + - description: Rows inserted by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "165" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_inserted + unit: '{row}/s' + - description: Rows updated by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "2" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_updated + unit: '{row}/s' + - description: The approximate number of live rows, tagged with relation name. + gauge: + dataPoints: + - asInt: "77" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.live_rows + unit: '{row}' - description: Number of buffers allocated. name: postgresql.bgwriter.buffers.allocated sum: @@ -106,7 +268,7 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: postgresql.connection.max - unit: '{connections}' + unit: '{connection}' - description: Number of user databases. name: postgresql.database.count sum: diff --git a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag_schemaattr.yaml b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag_schemaattr.yaml index 2b0d4fc8ace5..c7b85dc79d02 100644 --- a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag_schemaattr.yaml +++ b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag_schemaattr.yaml @@ -1,7 +1,169 @@ resourceMetrics: - - resource: {} + - resource: + attributes: + - key: postgresql.database.name + value: + stringValue: "N/A" + - key: postgresql.db.version + value: + stringValue: "16.3 (Ubuntu 16.3-1.pgdg22.04+1)" scopeMetrics: - metrics: + - description: The number of active connections to this database. If DBM is enabled, this metric is tagged with state, app, db and user + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: postgresql.connection.count + unit: '{connection}' + - description: Total wait time of the normalised timed events in nanaoseconds. + name: postgresql.query.total_exec_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "16401" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "416529" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "25141" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: ns + - description: Number of times the statement was executed. + name: postgresql.query.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "5" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: "1" + - description: The number of disk block hits in the buffer cache, thereby avoiding database reads, tagged with database name. + gauge: + dataPoints: + - asInt: "2148" + attributes: + - key: dbname + value: + stringValue: "" + - asInt: "9053" + attributes: + - key: dbname + value: + stringValue: "postgres" + - asInt: "8527" + attributes: + - key: dbname + value: + stringValue: "template1" + - asInt: "0" + attributes: + - key: dbname + value: + stringValue: "template0" + name: postgresql.buffer_hit + unit: '{hit}/s' + - description: Rows deleted by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "88" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_deleted + unit: '{row}/s' + - description: Rows fetched by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "0" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_fetched + unit: '{row}/s' + - description: Rows inserted by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "165" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_inserted + unit: '{row}/s' + - description: Rows updated by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "2" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_updated + unit: '{row}/s' + - description: The approximate number of live rows, tagged with relation name. + gauge: + dataPoints: + - asInt: "77" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.live_rows + unit: '{row}' - description: Number of buffers allocated. name: postgresql.bgwriter.buffers.allocated sum: @@ -106,7 +268,7 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: postgresql.connection.max - unit: '{connections}' + unit: '{connection}' - description: Number of user databases. name: postgresql.database.count sum: diff --git a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_schemaattr.yaml b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_schemaattr.yaml index 84839f8820e2..e6ab56ef2b14 100644 --- a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_schemaattr.yaml +++ b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_schemaattr.yaml @@ -1,7 +1,169 @@ resourceMetrics: - - resource: {} + - resource: + attributes: + - key: postgresql.database.name + value: + stringValue: "N/A" + - key: postgresql.db.version + value: + stringValue: "16.3 (Ubuntu 16.3-1.pgdg22.04+1)" scopeMetrics: - metrics: + - description: The number of active connections to this database. If DBM is enabled, this metric is tagged with state, app, db and user + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: postgresql.connection.count + unit: '{connection}' + - description: Total wait time of the normalised timed events in nanaoseconds. + name: postgresql.query.total_exec_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "16401" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "416529" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "25141" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: ns + - description: Number of times the statement was executed. + name: postgresql.query.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "5" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: "1" + - description: The number of disk block hits in the buffer cache, thereby avoiding database reads, tagged with database name. + gauge: + dataPoints: + - asInt: "2148" + attributes: + - key: dbname + value: + stringValue: "" + - asInt: "9053" + attributes: + - key: dbname + value: + stringValue: "postgres" + - asInt: "8527" + attributes: + - key: dbname + value: + stringValue: "template1" + - asInt: "0" + attributes: + - key: dbname + value: + stringValue: "template0" + name: postgresql.buffer_hit + unit: '{hit}/s' + - description: Rows deleted by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "88" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_deleted + unit: '{row}/s' + - description: Rows fetched by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "0" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_fetched + unit: '{row}/s' + - description: Rows inserted by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "165" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_inserted + unit: '{row}/s' + - description: Rows updated by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "2" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_updated + unit: '{row}/s' + - description: The approximate number of live rows, tagged with relation name. + gauge: + dataPoints: + - asInt: "77" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.live_rows + unit: '{row}' - description: Number of buffers allocated. name: postgresql.bgwriter.buffers.allocated sum: @@ -106,7 +268,7 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: postgresql.connection.max - unit: '{connections}' + unit: '{connection}' - description: Number of user databases. name: postgresql.database.count sum: diff --git a/receiver/postgresqlreceiver/testdata/scraper/otel/expected.yaml b/receiver/postgresqlreceiver/testdata/scraper/otel/expected.yaml index b25ddb787449..595c3ab92b5e 100644 --- a/receiver/postgresqlreceiver/testdata/scraper/otel/expected.yaml +++ b/receiver/postgresqlreceiver/testdata/scraper/otel/expected.yaml @@ -1,7 +1,169 @@ resourceMetrics: - - resource: {} + - resource: + attributes: + - key: postgresql.database.name + value: + stringValue: "N/A" + - key: postgresql.db.version + value: + stringValue: "16.3 (Ubuntu 16.3-1.pgdg22.04+1)" scopeMetrics: - metrics: + - description: The number of active connections to this database. If DBM is enabled, this metric is tagged with state, app, db and user + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: postgresql.connection.count + unit: '{connection}' + - description: Total wait time of the normalised timed events in nanaoseconds. + name: postgresql.query.total_exec_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "16401" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "416529" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "25141" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: ns + - description: Number of times the statement was executed. + name: postgresql.query.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "5" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: "1" + - description: The number of disk block hits in the buffer cache, thereby avoiding database reads, tagged with database name. + gauge: + dataPoints: + - asInt: "2148" + attributes: + - key: dbname + value: + stringValue: "" + - asInt: "9053" + attributes: + - key: dbname + value: + stringValue: "postgres" + - asInt: "8527" + attributes: + - key: dbname + value: + stringValue: "template1" + - asInt: "0" + attributes: + - key: dbname + value: + stringValue: "template0" + name: postgresql.buffer_hit + unit: '{hit}/s' + - description: Rows deleted by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "88" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_deleted + unit: '{row}/s' + - description: Rows fetched by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "0" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_fetched + unit: '{row}/s' + - description: Rows inserted by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "165" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_inserted + unit: '{row}/s' + - description: Rows updated by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "2" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_updated + unit: '{row}/s' + - description: The approximate number of live rows, tagged with relation name. + gauge: + dataPoints: + - asInt: "77" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.live_rows + unit: '{row}' - description: Number of buffers allocated. name: postgresql.bgwriter.buffers.allocated sum: @@ -106,7 +268,7 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: postgresql.connection.max - unit: '{connections}' + unit: '{connection}' - description: Number of user databases. name: postgresql.database.count sum: diff --git a/receiver/postgresqlreceiver/testdata/scraper/otel/expected_default_metrics.yaml b/receiver/postgresqlreceiver/testdata/scraper/otel/expected_default_metrics.yaml index b2cd42fd84d3..4e3dad931af5 100644 --- a/receiver/postgresqlreceiver/testdata/scraper/otel/expected_default_metrics.yaml +++ b/receiver/postgresqlreceiver/testdata/scraper/otel/expected_default_metrics.yaml @@ -1,7 +1,169 @@ resourceMetrics: - - resource: {} + - resource: + attributes: + - key: postgresql.database.name + value: + stringValue: "N/A" + - key: postgresql.db.version + value: + stringValue: "16.3 (Ubuntu 16.3-1.pgdg22.04+1)" scopeMetrics: - metrics: + - description: The number of active connections to this database. If DBM is enabled, this metric is tagged with state, app, db and user + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: postgresql.connection.count + unit: '{connection}' + - description: Total wait time of the normalised timed events in nanaoseconds. + name: postgresql.query.total_exec_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "16401" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "416529" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "25141" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: ns + - description: Number of times the statement was executed. + name: postgresql.query.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "5" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: "1" + - description: The number of disk block hits in the buffer cache, thereby avoiding database reads, tagged with database name. + gauge: + dataPoints: + - asInt: "2148" + attributes: + - key: dbname + value: + stringValue: "" + - asInt: "9053" + attributes: + - key: dbname + value: + stringValue: "postgres" + - asInt: "8527" + attributes: + - key: dbname + value: + stringValue: "template1" + - asInt: "0" + attributes: + - key: dbname + value: + stringValue: "template0" + name: postgresql.buffer_hit + unit: '{hit}/s' + - description: Rows deleted by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "88" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_deleted + unit: '{row}/s' + - description: Rows fetched by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "0" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_fetched + unit: '{row}/s' + - description: Rows inserted by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "165" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_inserted + unit: '{row}/s' + - description: Rows updated by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "2" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_updated + unit: '{row}/s' + - description: The approximate number of live rows, tagged with relation name. + gauge: + dataPoints: + - asInt: "77" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.live_rows + unit: '{row}' - description: Number of buffers allocated. name: postgresql.bgwriter.buffers.allocated sum: @@ -106,7 +268,7 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: postgresql.connection.max - unit: '{connections}' + unit: '{connection}' - description: Number of user databases. name: postgresql.database.count sum: diff --git a/receiver/postgresqlreceiver/testdata/scraper/otel/expected_default_metrics_schemaattr.yaml b/receiver/postgresqlreceiver/testdata/scraper/otel/expected_default_metrics_schemaattr.yaml index 7ae29a56aeff..28680ae728b0 100644 --- a/receiver/postgresqlreceiver/testdata/scraper/otel/expected_default_metrics_schemaattr.yaml +++ b/receiver/postgresqlreceiver/testdata/scraper/otel/expected_default_metrics_schemaattr.yaml @@ -1,7 +1,169 @@ resourceMetrics: - - resource: {} + - resource: + attributes: + - key: postgresql.database.name + value: + stringValue: "N/A" + - key: postgresql.db.version + value: + stringValue: "16.3 (Ubuntu 16.3-1.pgdg22.04+1)" scopeMetrics: - metrics: + - description: The number of active connections to this database. If DBM is enabled, this metric is tagged with state, app, db and user + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: postgresql.connection.count + unit: '{connection}' + - description: Total wait time of the normalised timed events in nanaoseconds. + name: postgresql.query.total_exec_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "16401" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "416529" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "25141" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: ns + - description: Number of times the statement was executed. + name: postgresql.query.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "5" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: "1" + - description: The number of disk block hits in the buffer cache, thereby avoiding database reads, tagged with database name. + gauge: + dataPoints: + - asInt: "2148" + attributes: + - key: dbname + value: + stringValue: "" + - asInt: "9053" + attributes: + - key: dbname + value: + stringValue: "postgres" + - asInt: "8527" + attributes: + - key: dbname + value: + stringValue: "template1" + - asInt: "0" + attributes: + - key: dbname + value: + stringValue: "template0" + name: postgresql.buffer_hit + unit: '{hit}/s' + - description: Rows deleted by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "88" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_deleted + unit: '{row}/s' + - description: Rows fetched by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "0" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_fetched + unit: '{row}/s' + - description: Rows inserted by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "165" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_inserted + unit: '{row}/s' + - description: Rows updated by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "2" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_updated + unit: '{row}/s' + - description: The approximate number of live rows, tagged with relation name. + gauge: + dataPoints: + - asInt: "77" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.live_rows + unit: '{row}' - description: Number of buffers allocated. name: postgresql.bgwriter.buffers.allocated sum: @@ -106,7 +268,7 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: postgresql.connection.max - unit: '{connections}' + unit: '{connection}' - description: Number of user databases. name: postgresql.database.count sum: diff --git a/receiver/postgresqlreceiver/testdata/scraper/otel/expected_schemaattr.yaml b/receiver/postgresqlreceiver/testdata/scraper/otel/expected_schemaattr.yaml index 7713307cbaf7..efd1e7b3d130 100644 --- a/receiver/postgresqlreceiver/testdata/scraper/otel/expected_schemaattr.yaml +++ b/receiver/postgresqlreceiver/testdata/scraper/otel/expected_schemaattr.yaml @@ -1,7 +1,169 @@ resourceMetrics: - - resource: {} + - resource: + attributes: + - key: postgresql.database.name + value: + stringValue: "N/A" + - key: postgresql.db.version + value: + stringValue: "16.3 (Ubuntu 16.3-1.pgdg22.04+1)" scopeMetrics: - metrics: + - description: The number of active connections to this database. If DBM is enabled, this metric is tagged with state, app, db and user + gauge: + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: postgresql.connection.count + unit: '{connection}' + - description: Total wait time of the normalised timed events in nanaoseconds. + name: postgresql.query.total_exec_time + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "16401" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "416529" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "25141" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: ns + - description: Number of times the statement was executed. + name: postgresql.query.count + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "6366587321661213570" + - key: query_text + value: + stringValue: "SELECT department, COUNT(*) AS num_employees FROM employees GROUP BY department" + - asInt: "5" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "7034792503091443675" + - key: query_text + value: + stringValue: "SELECT datname, count(*) as count from pg_stat_activity WHERE datname IN ($1) GROUP BY datname" + - asInt: "1" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + attributes: + - key: query_id + value: + stringValue: "-5872536860935463852" + - key: query_text + value: + stringValue: "SELECT MIN(salary) AS lowest_salary_in_highest_paying_dept FROM employees WHERE department = (SELECT department FROM employees GROUP BY department ORDER BY AVG(salary) DESC LIMIT $1)" + unit: "1" + - description: The number of disk block hits in the buffer cache, thereby avoiding database reads, tagged with database name. + gauge: + dataPoints: + - asInt: "2148" + attributes: + - key: dbname + value: + stringValue: "" + - asInt: "9053" + attributes: + - key: dbname + value: + stringValue: "postgres" + - asInt: "8527" + attributes: + - key: dbname + value: + stringValue: "template1" + - asInt: "0" + attributes: + - key: dbname + value: + stringValue: "template0" + name: postgresql.buffer_hit + unit: '{hit}/s' + - description: Rows deleted by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "88" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_deleted + unit: '{row}/s' + - description: Rows fetched by queries in this db, tagged with relation name. + gauge: + dataPoints: + - asInt: "0" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_fetched + unit: '{row}/s' + - description: Rows inserted by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "165" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_inserted + unit: '{row}/s' + - description: Rows updated by queries in the db, tagged with relation name. + gauge: + dataPoints: + - asInt: "2" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.rows_updated + unit: '{row}/s' + - description: The approximate number of live rows, tagged with relation name. + gauge: + dataPoints: + - asInt: "77" + attributes: + - key: relation_name + value: + stringValue: public.table1 + name: postgresql.live_rows + unit: '{row}' - description: Number of buffers allocated. name: postgresql.bgwriter.buffers.allocated sum: @@ -106,7 +268,7 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: postgresql.connection.max - unit: '{connections}' + unit: '{connection}' - description: Number of user databases. name: postgresql.database.count sum: