diff --git a/receiver/postgresqlreceiver/client.go b/receiver/postgresqlreceiver/client.go index 7b0daa527789..0a95c917c423 100644 --- a/receiver/postgresqlreceiver/client.go +++ b/receiver/postgresqlreceiver/client.go @@ -46,6 +46,9 @@ type client interface { getMaxConnections(ctx context.Context) (int64, error) getIndexStats(ctx context.Context, database string) (map[indexIdentifer]indexStat, error) listDatabases(ctx context.Context) ([]string, error) + getRowStats(ctx context.Context) ([]RowStats, error) + getQueryStats(ctx context.Context) ([]queryStats, error) + getBufferHit(ctx context.Context) ([]BufferHit, error) } type postgreSQLClient struct { @@ -486,6 +489,165 @@ func (c *postgreSQLClient) getReplicationStats(ctx context.Context) ([]replicati return rs, errors } +type RowStats struct { + relationName string + rowsReturned int64 + rowsFetched int64 + rowsInserted int64 + rowsUpdated int64 + rowsDeleted int64 + rowsHotUpdated int64 + liveRows int64 + deadRows int64 +} + +func (c *postgreSQLClient) getRowStats(ctx context.Context) ([]RowStats, error) { + query := `SELECT + relname, + pg_stat_get_tuples_returned(relid) AS rows_returned, + pg_stat_get_tuples_fetched(relid) AS rows_fetched, + pg_stat_get_tuples_inserted(relid) AS rows_inserted, + pg_stat_get_tuples_updated(relid) AS rows_updated, + pg_stat_get_tuples_deleted(relid) AS rows_deleted, + pg_stat_get_tuples_hot_updated(relid) AS rows_hot_updated, + pg_stat_get_live_tuples(relid) AS live_rows, + pg_stat_get_dead_tuples(relid) AS dead_rows + FROM + pg_stat_all_tables; + ` + + rows, err := c.client.QueryContext(ctx, query) + if err != nil { + return nil, fmt.Errorf("unable to query pg_stat_all_tables:: %w", err) + } + + defer rows.Close() + + var rs []RowStats + var errors error + + for rows.Next() { + var ( + relname sql.NullString + rowsReturned sql.NullInt64 + rowsFetched sql.NullInt64 + rowsInserted sql.NullInt64 + rowsUpdated sql.NullInt64 + rowsDeleted sql.NullInt64 + rowsHotUpdated sql.NullInt64 + liveRows sql.NullInt64 + deadRows sql.NullInt64 + ) + + err := rows.Scan( + &relname, + &rowsReturned, + &rowsFetched, + &rowsInserted, + &rowsUpdated, + &rowsDeleted, + &rowsHotUpdated, + &liveRows, + &deadRows, + ) + + if err != nil { + errors = multierr.Append(errors, err) + } + + rs = append(rs, RowStats{ + relname.String, + rowsReturned.Int64, + rowsFetched.Int64, + rowsInserted.Int64, + rowsUpdated.Int64, + rowsDeleted.Int64, + rowsHotUpdated.Int64, + liveRows.Int64, + deadRows.Int64, + }) + } + return rs, nil +} + +type queryStats struct { + queryId string + queryText string + queryCount int64 + queryExecTime int64 +} + +func (c *postgreSQLClient) getQueryStats(ctx context.Context) ([]queryStats, error) { + query := `SELECT + queryid, + query, + calls, + total_exec_time + FROM pg_stat_statements; + ` + + rows, err := c.client.QueryContext(ctx, query) + if err != nil { + return nil, fmt.Errorf("unable to query pg_stat_statements: %w", err) + } + defer rows.Close() + var qs []queryStats + var errors error + for rows.Next() { + var queryId, queryText string + var queryCount int64 + var queryExecTime float64 + err = rows.Scan(&queryId, &queryText, &queryCount, &queryExecTime) + if err != nil { + errors = multierr.Append(errors, err) + } + queryExectimeNS := int64(queryExecTime * 1000000) + qs = append(qs, queryStats{ + queryId: queryId, + queryText: queryText, + queryCount: queryCount, + queryExecTime: queryExectimeNS, + }) + } + return qs, errors +} + +type BufferHit struct { + dbName string + hits int64 +} + +func (c *postgreSQLClient) getBufferHit(ctx context.Context) ([]BufferHit, error) { + query := `SELECT datname, blks_hit FROM pg_stat_database;` + + rows, err := c.client.QueryContext(ctx, query) + if err != nil { + return nil, fmt.Errorf("unable to query pg_stat_database:: %w", err) + } + + defer rows.Close() + + var bh []BufferHit + var errors error + + for rows.Next() { + var dbname sql.NullString + var hits sql.NullInt64 + + err = rows.Scan(&dbname, &hits) + + if err != nil { + errors = multierr.Append(errors, err) + continue + } + bh = append(bh, BufferHit{ + dbName: dbname.String, + hits: hits.Int64, + }) + } + return bh, errors +} + func (c *postgreSQLClient) getLatestWalAgeSeconds(ctx context.Context) (int64, error) { query := `SELECT coalesce(last_archived_time, CURRENT_TIMESTAMP) AS last_archived_wal, diff --git a/receiver/postgresqlreceiver/documentation.md b/receiver/postgresqlreceiver/documentation.md index 24d8c2fef1fd..ceba4b88ab94 100644 --- a/receiver/postgresqlreceiver/documentation.md +++ b/receiver/postgresqlreceiver/documentation.md @@ -92,6 +92,20 @@ The number of blocks read. | ---- | ----------- | ------ | | source | The block read source type. | Str: ``heap_read``, ``heap_hit``, ``idx_read``, ``idx_hit``, ``toast_read``, ``toast_hit``, ``tidx_read``, ``tidx_hit`` | +### postgresql.buffer_hit + +The number of times disk blocks were found in the buffer cache, preventing the need to read from the database. This metric is tagged with db. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {hit}/s | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| dbname | name of the database | Any Str | + ### postgresql.commits The number of commits. @@ -140,6 +154,20 @@ The size of the index on disk. | ---- | ----------- | ---------- | | By | Gauge | Int | +### postgresql.live_rows + +Enabled with `relations`. The estimated number of live rows. This metric is tagged with db, schema, table. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {row} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| relation_name | name of the relation | Any Str | + ### postgresql.operations The number of db row operations. @@ -154,6 +182,36 @@ The number of db row operations. | ---- | ----------- | ------ | | operation | The database operation. | Str: ``ins``, ``upd``, ``del``, ``hot_upd`` | +### postgresql.query.count + +Number of times the statement was executed + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| 1 | Sum | Int | Cumulative | false | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| query_text | Text of a representative statement | Any Str | +| query_id | Hash code to identify identical normalized queries. | Any Str | + +### postgresql.query.total_exec_time + +The total wait time of the summarized timed events in nanaoseconds. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| ns | Sum | Int | Cumulative | false | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| query_text | Text of a representative statement | Any Str | +| query_id | Hash code to identify identical normalized queries. | Any Str | + ### postgresql.replication.data_delay The amount of data delayed in replication. @@ -190,6 +248,62 @@ The number of rows in the database. | ---- | ----------- | ------ | | state | The tuple (row) state. | Str: ``dead``, ``live`` | +### postgresql.rows_deleted + +Enabled with `relations`. The number of rows deleted by queries in this database. This metric is tagged with db. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {row}/s | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| relation_name | name of the relation | Any Str | + +### postgresql.rows_fetched + +The number of rows fetched by queries in this database. This metric is tagged with db. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {row}/s | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| relation_name | name of the relation | Any Str | + +### postgresql.rows_inserted + +Enabled with `relations`. The number of rows inserted by queries in this database. This metric is tagged with db. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {row}/s | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| relation_name | name of the relation | Any Str | + +### postgresql.rows_updated + +Enabled with `relations`. The number of rows updated by queries in this database. This metric is tagged with db. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {row}/s | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| relation_name | name of the relation | Any Str | + ### postgresql.table.count Number of user tables in a database. diff --git a/receiver/postgresqlreceiver/go.mod b/receiver/postgresqlreceiver/go.mod index c99b55a4b872..65e6acecebf9 100644 --- a/receiver/postgresqlreceiver/go.mod +++ b/receiver/postgresqlreceiver/go.mod @@ -21,6 +21,11 @@ require ( go.uber.org/zap v1.25.0 ) +require ( + github.com/mattn/go-colorable v0.1.6 // indirect + github.com/mattn/go-isatty v0.0.12 // indirect +) + require ( dario.cat/mergo v1.0.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect @@ -39,6 +44,7 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/google/uuid v1.3.1 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/k0kubun/pp v3.0.1+incompatible github.com/klauspost/compress v1.16.7 // indirect github.com/knadh/koanf v1.5.0 // indirect github.com/knadh/koanf/v2 v2.0.1 // indirect diff --git a/receiver/postgresqlreceiver/go.sum b/receiver/postgresqlreceiver/go.sum index 14dce5fce79c..5e75e98ad59d 100644 --- a/receiver/postgresqlreceiver/go.sum +++ b/receiver/postgresqlreceiver/go.sum @@ -187,6 +187,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/k0kubun/pp v3.0.1+incompatible h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd9g2S9Z40= +github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= @@ -211,11 +213,13 @@ github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0V github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_config.go b/receiver/postgresqlreceiver/internal/metadata/generated_config.go index 797b7d7f6eae..a00870293efb 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_config.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_config.go @@ -32,6 +32,7 @@ type MetricsConfig struct { PostgresqlBgwriterDuration MetricConfig `mapstructure:"postgresql.bgwriter.duration"` PostgresqlBgwriterMaxwritten MetricConfig `mapstructure:"postgresql.bgwriter.maxwritten"` PostgresqlBlocksRead MetricConfig `mapstructure:"postgresql.blocks_read"` + PostgresqlBufferHit MetricConfig `mapstructure:"postgresql.buffer_hit"` PostgresqlCommits MetricConfig `mapstructure:"postgresql.commits"` PostgresqlConnectionMax MetricConfig `mapstructure:"postgresql.connection.max"` PostgresqlDatabaseCount MetricConfig `mapstructure:"postgresql.database.count"` @@ -39,10 +40,17 @@ type MetricsConfig struct { PostgresqlDeadlocks MetricConfig `mapstructure:"postgresql.deadlocks"` PostgresqlIndexScans MetricConfig `mapstructure:"postgresql.index.scans"` PostgresqlIndexSize MetricConfig `mapstructure:"postgresql.index.size"` + PostgresqlLiveRows MetricConfig `mapstructure:"postgresql.live_rows"` PostgresqlOperations MetricConfig `mapstructure:"postgresql.operations"` + PostgresqlQueryCount MetricConfig `mapstructure:"postgresql.query.count"` + PostgresqlQueryTotalExecTime MetricConfig `mapstructure:"postgresql.query.total_exec_time"` PostgresqlReplicationDataDelay MetricConfig `mapstructure:"postgresql.replication.data_delay"` PostgresqlRollbacks MetricConfig `mapstructure:"postgresql.rollbacks"` PostgresqlRows MetricConfig `mapstructure:"postgresql.rows"` + PostgresqlRowsDeleted MetricConfig `mapstructure:"postgresql.rows_deleted"` + PostgresqlRowsFetched MetricConfig `mapstructure:"postgresql.rows_fetched"` + PostgresqlRowsInserted MetricConfig `mapstructure:"postgresql.rows_inserted"` + PostgresqlRowsUpdated MetricConfig `mapstructure:"postgresql.rows_updated"` PostgresqlSequentialScans MetricConfig `mapstructure:"postgresql.sequential_scans"` PostgresqlTableCount MetricConfig `mapstructure:"postgresql.table.count"` PostgresqlTableSize MetricConfig `mapstructure:"postgresql.table.size"` @@ -75,6 +83,9 @@ func DefaultMetricsConfig() MetricsConfig { PostgresqlBlocksRead: MetricConfig{ Enabled: true, }, + PostgresqlBufferHit: MetricConfig{ + Enabled: true, + }, PostgresqlCommits: MetricConfig{ Enabled: true, }, @@ -96,9 +107,18 @@ func DefaultMetricsConfig() MetricsConfig { PostgresqlIndexSize: MetricConfig{ Enabled: true, }, + PostgresqlLiveRows: MetricConfig{ + Enabled: true, + }, PostgresqlOperations: MetricConfig{ Enabled: true, }, + PostgresqlQueryCount: MetricConfig{ + Enabled: true, + }, + PostgresqlQueryTotalExecTime: MetricConfig{ + Enabled: true, + }, PostgresqlReplicationDataDelay: MetricConfig{ Enabled: true, }, @@ -108,6 +128,18 @@ func DefaultMetricsConfig() MetricsConfig { PostgresqlRows: MetricConfig{ Enabled: true, }, + PostgresqlRowsDeleted: MetricConfig{ + Enabled: true, + }, + PostgresqlRowsFetched: MetricConfig{ + Enabled: true, + }, + PostgresqlRowsInserted: MetricConfig{ + Enabled: true, + }, + PostgresqlRowsUpdated: MetricConfig{ + Enabled: true, + }, PostgresqlSequentialScans: MetricConfig{ Enabled: false, }, diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go b/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go index 670931ddf2ec..6d2bc572ab31 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go @@ -33,6 +33,7 @@ func TestMetricsBuilderConfig(t *testing.T) { PostgresqlBgwriterDuration: MetricConfig{Enabled: true}, PostgresqlBgwriterMaxwritten: MetricConfig{Enabled: true}, PostgresqlBlocksRead: MetricConfig{Enabled: true}, + PostgresqlBufferHit: MetricConfig{Enabled: true}, PostgresqlCommits: MetricConfig{Enabled: true}, PostgresqlConnectionMax: MetricConfig{Enabled: true}, PostgresqlDatabaseCount: MetricConfig{Enabled: true}, @@ -40,10 +41,17 @@ func TestMetricsBuilderConfig(t *testing.T) { PostgresqlDeadlocks: MetricConfig{Enabled: true}, PostgresqlIndexScans: MetricConfig{Enabled: true}, PostgresqlIndexSize: MetricConfig{Enabled: true}, + PostgresqlLiveRows: MetricConfig{Enabled: true}, PostgresqlOperations: MetricConfig{Enabled: true}, + PostgresqlQueryCount: MetricConfig{Enabled: true}, + PostgresqlQueryTotalExecTime: MetricConfig{Enabled: true}, PostgresqlReplicationDataDelay: MetricConfig{Enabled: true}, PostgresqlRollbacks: MetricConfig{Enabled: true}, PostgresqlRows: MetricConfig{Enabled: true}, + PostgresqlRowsDeleted: MetricConfig{Enabled: true}, + PostgresqlRowsFetched: MetricConfig{Enabled: true}, + PostgresqlRowsInserted: MetricConfig{Enabled: true}, + PostgresqlRowsUpdated: MetricConfig{Enabled: true}, PostgresqlSequentialScans: MetricConfig{Enabled: true}, PostgresqlTableCount: MetricConfig{Enabled: true}, PostgresqlTableSize: MetricConfig{Enabled: true}, @@ -70,6 +78,7 @@ func TestMetricsBuilderConfig(t *testing.T) { PostgresqlBgwriterDuration: MetricConfig{Enabled: false}, PostgresqlBgwriterMaxwritten: MetricConfig{Enabled: false}, PostgresqlBlocksRead: MetricConfig{Enabled: false}, + PostgresqlBufferHit: MetricConfig{Enabled: false}, PostgresqlCommits: MetricConfig{Enabled: false}, PostgresqlConnectionMax: MetricConfig{Enabled: false}, PostgresqlDatabaseCount: MetricConfig{Enabled: false}, @@ -77,10 +86,17 @@ func TestMetricsBuilderConfig(t *testing.T) { PostgresqlDeadlocks: MetricConfig{Enabled: false}, PostgresqlIndexScans: MetricConfig{Enabled: false}, PostgresqlIndexSize: MetricConfig{Enabled: false}, + PostgresqlLiveRows: MetricConfig{Enabled: false}, PostgresqlOperations: MetricConfig{Enabled: false}, + PostgresqlQueryCount: MetricConfig{Enabled: false}, + PostgresqlQueryTotalExecTime: MetricConfig{Enabled: false}, PostgresqlReplicationDataDelay: MetricConfig{Enabled: false}, PostgresqlRollbacks: MetricConfig{Enabled: false}, PostgresqlRows: MetricConfig{Enabled: false}, + PostgresqlRowsDeleted: MetricConfig{Enabled: false}, + PostgresqlRowsFetched: MetricConfig{Enabled: false}, + PostgresqlRowsInserted: MetricConfig{Enabled: false}, + PostgresqlRowsUpdated: MetricConfig{Enabled: false}, PostgresqlSequentialScans: MetricConfig{Enabled: false}, PostgresqlTableCount: MetricConfig{Enabled: false}, PostgresqlTableSize: MetricConfig{Enabled: false}, diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go b/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go index d2e265522c58..cadbdcdaed13 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go @@ -602,6 +602,57 @@ func newMetricPostgresqlBlocksRead(cfg MetricConfig) metricPostgresqlBlocksRead return m } +type metricPostgresqlBufferHit struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.buffer_hit metric with initial data. +func (m *metricPostgresqlBufferHit) init() { + m.data.SetName("postgresql.buffer_hit") + m.data.SetDescription("The number of times disk blocks were found in the buffer cache, preventing the need to read from the database. This metric is tagged with db.") + m.data.SetUnit("{hit}/s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricPostgresqlBufferHit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, dbnameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("dbname", dbnameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlBufferHit) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlBufferHit) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlBufferHit(cfg MetricConfig) metricPostgresqlBufferHit { + m := metricPostgresqlBufferHit{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricPostgresqlCommits struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -955,6 +1006,57 @@ func newMetricPostgresqlIndexSize(cfg MetricConfig) metricPostgresqlIndexSize { return m } +type metricPostgresqlLiveRows struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.live_rows metric with initial data. +func (m *metricPostgresqlLiveRows) init() { + m.data.SetName("postgresql.live_rows") + m.data.SetDescription("Enabled with `relations`. The estimated number of live rows. This metric is tagged with db, schema, table.") + m.data.SetUnit("{row}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricPostgresqlLiveRows) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("relation_name", relationNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlLiveRows) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlLiveRows) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlLiveRows(cfg MetricConfig) metricPostgresqlLiveRows { + m := metricPostgresqlLiveRows{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricPostgresqlOperations struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1008,6 +1110,114 @@ func newMetricPostgresqlOperations(cfg MetricConfig) metricPostgresqlOperations return m } +type metricPostgresqlQueryCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.query.count metric with initial data. +func (m *metricPostgresqlQueryCount) init() { + m.data.SetName("postgresql.query.count") + m.data.SetDescription("Number of times the statement was executed") + m.data.SetUnit("1") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricPostgresqlQueryCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryTextAttributeValue string, queryIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("query_text", queryTextAttributeValue) + dp.Attributes().PutStr("query_id", queryIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlQueryCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlQueryCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlQueryCount(cfg MetricConfig) metricPostgresqlQueryCount { + m := metricPostgresqlQueryCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricPostgresqlQueryTotalExecTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.query.total_exec_time metric with initial data. +func (m *metricPostgresqlQueryTotalExecTime) init() { + m.data.SetName("postgresql.query.total_exec_time") + m.data.SetDescription("The total wait time of the summarized timed events in nanaoseconds.") + m.data.SetUnit("ns") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricPostgresqlQueryTotalExecTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, queryTextAttributeValue string, queryIDAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("query_text", queryTextAttributeValue) + dp.Attributes().PutStr("query_id", queryIDAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlQueryTotalExecTime) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlQueryTotalExecTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlQueryTotalExecTime(cfg MetricConfig) metricPostgresqlQueryTotalExecTime { + m := metricPostgresqlQueryTotalExecTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricPostgresqlReplicationDataDelay struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1163,6 +1373,210 @@ func newMetricPostgresqlRows(cfg MetricConfig) metricPostgresqlRows { return m } +type metricPostgresqlRowsDeleted struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.rows_deleted metric with initial data. +func (m *metricPostgresqlRowsDeleted) init() { + m.data.SetName("postgresql.rows_deleted") + m.data.SetDescription("Enabled with `relations`. The number of rows deleted by queries in this database. This metric is tagged with db.") + m.data.SetUnit("{row}/s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricPostgresqlRowsDeleted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("relation_name", relationNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlRowsDeleted) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlRowsDeleted) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlRowsDeleted(cfg MetricConfig) metricPostgresqlRowsDeleted { + m := metricPostgresqlRowsDeleted{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricPostgresqlRowsFetched struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.rows_fetched metric with initial data. +func (m *metricPostgresqlRowsFetched) init() { + m.data.SetName("postgresql.rows_fetched") + m.data.SetDescription("The number of rows fetched by queries in this database. This metric is tagged with db.") + m.data.SetUnit("{row}/s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricPostgresqlRowsFetched) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("relation_name", relationNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlRowsFetched) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlRowsFetched) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlRowsFetched(cfg MetricConfig) metricPostgresqlRowsFetched { + m := metricPostgresqlRowsFetched{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricPostgresqlRowsInserted struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.rows_inserted metric with initial data. +func (m *metricPostgresqlRowsInserted) init() { + m.data.SetName("postgresql.rows_inserted") + m.data.SetDescription("Enabled with `relations`. The number of rows inserted by queries in this database. This metric is tagged with db.") + m.data.SetUnit("{row}/s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricPostgresqlRowsInserted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("relation_name", relationNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlRowsInserted) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlRowsInserted) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlRowsInserted(cfg MetricConfig) metricPostgresqlRowsInserted { + m := metricPostgresqlRowsInserted{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricPostgresqlRowsUpdated struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.rows_updated metric with initial data. +func (m *metricPostgresqlRowsUpdated) init() { + m.data.SetName("postgresql.rows_updated") + m.data.SetDescription("Enabled with `relations`. The number of rows updated by queries in this database. This metric is tagged with db.") + m.data.SetUnit("{row}/s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricPostgresqlRowsUpdated) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("relation_name", relationNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlRowsUpdated) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlRowsUpdated) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlRowsUpdated(cfg MetricConfig) metricPostgresqlRowsUpdated { + m := metricPostgresqlRowsUpdated{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricPostgresqlSequentialScans struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1534,6 +1948,7 @@ type MetricsBuilder struct { metricPostgresqlBgwriterDuration metricPostgresqlBgwriterDuration metricPostgresqlBgwriterMaxwritten metricPostgresqlBgwriterMaxwritten metricPostgresqlBlocksRead metricPostgresqlBlocksRead + metricPostgresqlBufferHit metricPostgresqlBufferHit metricPostgresqlCommits metricPostgresqlCommits metricPostgresqlConnectionMax metricPostgresqlConnectionMax metricPostgresqlDatabaseCount metricPostgresqlDatabaseCount @@ -1541,10 +1956,17 @@ type MetricsBuilder struct { metricPostgresqlDeadlocks metricPostgresqlDeadlocks metricPostgresqlIndexScans metricPostgresqlIndexScans metricPostgresqlIndexSize metricPostgresqlIndexSize + metricPostgresqlLiveRows metricPostgresqlLiveRows metricPostgresqlOperations metricPostgresqlOperations + metricPostgresqlQueryCount metricPostgresqlQueryCount + metricPostgresqlQueryTotalExecTime metricPostgresqlQueryTotalExecTime metricPostgresqlReplicationDataDelay metricPostgresqlReplicationDataDelay metricPostgresqlRollbacks metricPostgresqlRollbacks metricPostgresqlRows metricPostgresqlRows + metricPostgresqlRowsDeleted metricPostgresqlRowsDeleted + metricPostgresqlRowsFetched metricPostgresqlRowsFetched + metricPostgresqlRowsInserted metricPostgresqlRowsInserted + metricPostgresqlRowsUpdated metricPostgresqlRowsUpdated metricPostgresqlSequentialScans metricPostgresqlSequentialScans metricPostgresqlTableCount metricPostgresqlTableCount metricPostgresqlTableSize metricPostgresqlTableSize @@ -1577,6 +1999,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting metricPostgresqlBgwriterDuration: newMetricPostgresqlBgwriterDuration(mbc.Metrics.PostgresqlBgwriterDuration), metricPostgresqlBgwriterMaxwritten: newMetricPostgresqlBgwriterMaxwritten(mbc.Metrics.PostgresqlBgwriterMaxwritten), metricPostgresqlBlocksRead: newMetricPostgresqlBlocksRead(mbc.Metrics.PostgresqlBlocksRead), + metricPostgresqlBufferHit: newMetricPostgresqlBufferHit(mbc.Metrics.PostgresqlBufferHit), metricPostgresqlCommits: newMetricPostgresqlCommits(mbc.Metrics.PostgresqlCommits), metricPostgresqlConnectionMax: newMetricPostgresqlConnectionMax(mbc.Metrics.PostgresqlConnectionMax), metricPostgresqlDatabaseCount: newMetricPostgresqlDatabaseCount(mbc.Metrics.PostgresqlDatabaseCount), @@ -1584,10 +2007,17 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting metricPostgresqlDeadlocks: newMetricPostgresqlDeadlocks(mbc.Metrics.PostgresqlDeadlocks), metricPostgresqlIndexScans: newMetricPostgresqlIndexScans(mbc.Metrics.PostgresqlIndexScans), metricPostgresqlIndexSize: newMetricPostgresqlIndexSize(mbc.Metrics.PostgresqlIndexSize), + metricPostgresqlLiveRows: newMetricPostgresqlLiveRows(mbc.Metrics.PostgresqlLiveRows), metricPostgresqlOperations: newMetricPostgresqlOperations(mbc.Metrics.PostgresqlOperations), + metricPostgresqlQueryCount: newMetricPostgresqlQueryCount(mbc.Metrics.PostgresqlQueryCount), + metricPostgresqlQueryTotalExecTime: newMetricPostgresqlQueryTotalExecTime(mbc.Metrics.PostgresqlQueryTotalExecTime), metricPostgresqlReplicationDataDelay: newMetricPostgresqlReplicationDataDelay(mbc.Metrics.PostgresqlReplicationDataDelay), metricPostgresqlRollbacks: newMetricPostgresqlRollbacks(mbc.Metrics.PostgresqlRollbacks), metricPostgresqlRows: newMetricPostgresqlRows(mbc.Metrics.PostgresqlRows), + metricPostgresqlRowsDeleted: newMetricPostgresqlRowsDeleted(mbc.Metrics.PostgresqlRowsDeleted), + metricPostgresqlRowsFetched: newMetricPostgresqlRowsFetched(mbc.Metrics.PostgresqlRowsFetched), + metricPostgresqlRowsInserted: newMetricPostgresqlRowsInserted(mbc.Metrics.PostgresqlRowsInserted), + metricPostgresqlRowsUpdated: newMetricPostgresqlRowsUpdated(mbc.Metrics.PostgresqlRowsUpdated), metricPostgresqlSequentialScans: newMetricPostgresqlSequentialScans(mbc.Metrics.PostgresqlSequentialScans), metricPostgresqlTableCount: newMetricPostgresqlTableCount(mbc.Metrics.PostgresqlTableCount), metricPostgresqlTableSize: newMetricPostgresqlTableSize(mbc.Metrics.PostgresqlTableSize), @@ -1663,6 +2093,7 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricPostgresqlBgwriterDuration.emit(ils.Metrics()) mb.metricPostgresqlBgwriterMaxwritten.emit(ils.Metrics()) mb.metricPostgresqlBlocksRead.emit(ils.Metrics()) + mb.metricPostgresqlBufferHit.emit(ils.Metrics()) mb.metricPostgresqlCommits.emit(ils.Metrics()) mb.metricPostgresqlConnectionMax.emit(ils.Metrics()) mb.metricPostgresqlDatabaseCount.emit(ils.Metrics()) @@ -1670,10 +2101,17 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricPostgresqlDeadlocks.emit(ils.Metrics()) mb.metricPostgresqlIndexScans.emit(ils.Metrics()) mb.metricPostgresqlIndexSize.emit(ils.Metrics()) + mb.metricPostgresqlLiveRows.emit(ils.Metrics()) mb.metricPostgresqlOperations.emit(ils.Metrics()) + mb.metricPostgresqlQueryCount.emit(ils.Metrics()) + mb.metricPostgresqlQueryTotalExecTime.emit(ils.Metrics()) mb.metricPostgresqlReplicationDataDelay.emit(ils.Metrics()) mb.metricPostgresqlRollbacks.emit(ils.Metrics()) mb.metricPostgresqlRows.emit(ils.Metrics()) + mb.metricPostgresqlRowsDeleted.emit(ils.Metrics()) + mb.metricPostgresqlRowsFetched.emit(ils.Metrics()) + mb.metricPostgresqlRowsInserted.emit(ils.Metrics()) + mb.metricPostgresqlRowsUpdated.emit(ils.Metrics()) mb.metricPostgresqlSequentialScans.emit(ils.Metrics()) mb.metricPostgresqlTableCount.emit(ils.Metrics()) mb.metricPostgresqlTableSize.emit(ils.Metrics()) @@ -1736,6 +2174,11 @@ func (mb *MetricsBuilder) RecordPostgresqlBlocksReadDataPoint(ts pcommon.Timesta mb.metricPostgresqlBlocksRead.recordDataPoint(mb.startTime, ts, val, sourceAttributeValue.String()) } +// RecordPostgresqlBufferHitDataPoint adds a data point to postgresql.buffer_hit metric. +func (mb *MetricsBuilder) RecordPostgresqlBufferHitDataPoint(ts pcommon.Timestamp, val int64, dbnameAttributeValue string) { + mb.metricPostgresqlBufferHit.recordDataPoint(mb.startTime, ts, val, dbnameAttributeValue) +} + // RecordPostgresqlCommitsDataPoint adds a data point to postgresql.commits metric. func (mb *MetricsBuilder) RecordPostgresqlCommitsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlCommits.recordDataPoint(mb.startTime, ts, val) @@ -1771,11 +2214,26 @@ func (mb *MetricsBuilder) RecordPostgresqlIndexSizeDataPoint(ts pcommon.Timestam mb.metricPostgresqlIndexSize.recordDataPoint(mb.startTime, ts, val) } +// RecordPostgresqlLiveRowsDataPoint adds a data point to postgresql.live_rows metric. +func (mb *MetricsBuilder) RecordPostgresqlLiveRowsDataPoint(ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + mb.metricPostgresqlLiveRows.recordDataPoint(mb.startTime, ts, val, relationNameAttributeValue) +} + // RecordPostgresqlOperationsDataPoint adds a data point to postgresql.operations metric. func (mb *MetricsBuilder) RecordPostgresqlOperationsDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue AttributeOperation) { mb.metricPostgresqlOperations.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String()) } +// RecordPostgresqlQueryCountDataPoint adds a data point to postgresql.query.count metric. +func (mb *MetricsBuilder) RecordPostgresqlQueryCountDataPoint(ts pcommon.Timestamp, val int64, queryTextAttributeValue string, queryIDAttributeValue string) { + mb.metricPostgresqlQueryCount.recordDataPoint(mb.startTime, ts, val, queryTextAttributeValue, queryIDAttributeValue) +} + +// RecordPostgresqlQueryTotalExecTimeDataPoint adds a data point to postgresql.query.total_exec_time metric. +func (mb *MetricsBuilder) RecordPostgresqlQueryTotalExecTimeDataPoint(ts pcommon.Timestamp, val int64, queryTextAttributeValue string, queryIDAttributeValue string) { + mb.metricPostgresqlQueryTotalExecTime.recordDataPoint(mb.startTime, ts, val, queryTextAttributeValue, queryIDAttributeValue) +} + // RecordPostgresqlReplicationDataDelayDataPoint adds a data point to postgresql.replication.data_delay metric. func (mb *MetricsBuilder) RecordPostgresqlReplicationDataDelayDataPoint(ts pcommon.Timestamp, val int64, replicationClientAttributeValue string) { mb.metricPostgresqlReplicationDataDelay.recordDataPoint(mb.startTime, ts, val, replicationClientAttributeValue) @@ -1791,6 +2249,26 @@ func (mb *MetricsBuilder) RecordPostgresqlRowsDataPoint(ts pcommon.Timestamp, va mb.metricPostgresqlRows.recordDataPoint(mb.startTime, ts, val, stateAttributeValue.String()) } +// RecordPostgresqlRowsDeletedDataPoint adds a data point to postgresql.rows_deleted metric. +func (mb *MetricsBuilder) RecordPostgresqlRowsDeletedDataPoint(ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + mb.metricPostgresqlRowsDeleted.recordDataPoint(mb.startTime, ts, val, relationNameAttributeValue) +} + +// RecordPostgresqlRowsFetchedDataPoint adds a data point to postgresql.rows_fetched metric. +func (mb *MetricsBuilder) RecordPostgresqlRowsFetchedDataPoint(ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + mb.metricPostgresqlRowsFetched.recordDataPoint(mb.startTime, ts, val, relationNameAttributeValue) +} + +// RecordPostgresqlRowsInsertedDataPoint adds a data point to postgresql.rows_inserted metric. +func (mb *MetricsBuilder) RecordPostgresqlRowsInsertedDataPoint(ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + mb.metricPostgresqlRowsInserted.recordDataPoint(mb.startTime, ts, val, relationNameAttributeValue) +} + +// RecordPostgresqlRowsUpdatedDataPoint adds a data point to postgresql.rows_updated metric. +func (mb *MetricsBuilder) RecordPostgresqlRowsUpdatedDataPoint(ts pcommon.Timestamp, val int64, relationNameAttributeValue string) { + mb.metricPostgresqlRowsUpdated.recordDataPoint(mb.startTime, ts, val, relationNameAttributeValue) +} + // RecordPostgresqlSequentialScansDataPoint adds a data point to postgresql.sequential_scans metric. func (mb *MetricsBuilder) RecordPostgresqlSequentialScansDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlSequentialScans.recordDataPoint(mb.startTime, ts, val) diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go b/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go index ffbad5ec9a66..792c8d0e84e5 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go @@ -82,6 +82,10 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordPostgresqlBlocksReadDataPoint(ts, 1, AttributeSourceHeapRead) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordPostgresqlBufferHitDataPoint(ts, 1, "dbname-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordPostgresqlCommitsDataPoint(ts, 1) @@ -109,10 +113,22 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordPostgresqlIndexSizeDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordPostgresqlLiveRowsDataPoint(ts, 1, "relation_name-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordPostgresqlOperationsDataPoint(ts, 1, AttributeOperationIns) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordPostgresqlQueryCountDataPoint(ts, 1, "query_text-val", "query_id-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordPostgresqlQueryTotalExecTimeDataPoint(ts, 1, "query_text-val", "query_id-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordPostgresqlReplicationDataDelayDataPoint(ts, 1, "replication_client-val") @@ -125,6 +141,22 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordPostgresqlRowsDataPoint(ts, 1, AttributeStateDead) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordPostgresqlRowsDeletedDataPoint(ts, 1, "relation_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordPostgresqlRowsFetchedDataPoint(ts, 1, "relation_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordPostgresqlRowsInsertedDataPoint(ts, 1, "relation_name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordPostgresqlRowsUpdatedDataPoint(ts, 1, "relation_name-val") + allMetricsCount++ mb.RecordPostgresqlSequentialScansDataPoint(ts, 1) @@ -287,6 +319,21 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("source") assert.True(t, ok) assert.EqualValues(t, "heap_read", attrVal.Str()) + case "postgresql.buffer_hit": + assert.False(t, validatedMetrics["postgresql.buffer_hit"], "Found a duplicate in the metrics slice: postgresql.buffer_hit") + validatedMetrics["postgresql.buffer_hit"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of times disk blocks were found in the buffer cache, preventing the need to read from the database. This metric is tagged with db.", ms.At(i).Description()) + assert.Equal(t, "{hit}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("dbname") + assert.True(t, ok) + assert.EqualValues(t, "dbname-val", attrVal.Str()) case "postgresql.commits": assert.False(t, validatedMetrics["postgresql.commits"], "Found a duplicate in the metrics slice: postgresql.commits") validatedMetrics["postgresql.commits"] = true @@ -381,6 +428,21 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "postgresql.live_rows": + assert.False(t, validatedMetrics["postgresql.live_rows"], "Found a duplicate in the metrics slice: postgresql.live_rows") + validatedMetrics["postgresql.live_rows"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Enabled with `relations`. The estimated number of live rows. This metric is tagged with db, schema, table.", ms.At(i).Description()) + assert.Equal(t, "{row}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("relation_name") + assert.True(t, ok) + assert.EqualValues(t, "relation_name-val", attrVal.Str()) case "postgresql.operations": assert.False(t, validatedMetrics["postgresql.operations"], "Found a duplicate in the metrics slice: postgresql.operations") validatedMetrics["postgresql.operations"] = true @@ -398,6 +460,46 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("operation") assert.True(t, ok) assert.EqualValues(t, "ins", attrVal.Str()) + case "postgresql.query.count": + assert.False(t, validatedMetrics["postgresql.query.count"], "Found a duplicate in the metrics slice: postgresql.query.count") + validatedMetrics["postgresql.query.count"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of times the statement was executed", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("query_text") + assert.True(t, ok) + assert.EqualValues(t, "query_text-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_id") + assert.True(t, ok) + assert.EqualValues(t, "query_id-val", attrVal.Str()) + case "postgresql.query.total_exec_time": + assert.False(t, validatedMetrics["postgresql.query.total_exec_time"], "Found a duplicate in the metrics slice: postgresql.query.total_exec_time") + validatedMetrics["postgresql.query.total_exec_time"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The total wait time of the summarized timed events in nanaoseconds.", ms.At(i).Description()) + assert.Equal(t, "ns", ms.At(i).Unit()) + assert.Equal(t, false, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("query_text") + assert.True(t, ok) + assert.EqualValues(t, "query_text-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_id") + assert.True(t, ok) + assert.EqualValues(t, "query_id-val", attrVal.Str()) case "postgresql.replication.data_delay": assert.False(t, validatedMetrics["postgresql.replication.data_delay"], "Found a duplicate in the metrics slice: postgresql.replication.data_delay") validatedMetrics["postgresql.replication.data_delay"] = true @@ -444,6 +546,66 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("state") assert.True(t, ok) assert.EqualValues(t, "dead", attrVal.Str()) + case "postgresql.rows_deleted": + assert.False(t, validatedMetrics["postgresql.rows_deleted"], "Found a duplicate in the metrics slice: postgresql.rows_deleted") + validatedMetrics["postgresql.rows_deleted"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Enabled with `relations`. The number of rows deleted by queries in this database. This metric is tagged with db.", ms.At(i).Description()) + assert.Equal(t, "{row}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("relation_name") + assert.True(t, ok) + assert.EqualValues(t, "relation_name-val", attrVal.Str()) + case "postgresql.rows_fetched": + assert.False(t, validatedMetrics["postgresql.rows_fetched"], "Found a duplicate in the metrics slice: postgresql.rows_fetched") + validatedMetrics["postgresql.rows_fetched"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The number of rows fetched by queries in this database. This metric is tagged with db.", ms.At(i).Description()) + assert.Equal(t, "{row}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("relation_name") + assert.True(t, ok) + assert.EqualValues(t, "relation_name-val", attrVal.Str()) + case "postgresql.rows_inserted": + assert.False(t, validatedMetrics["postgresql.rows_inserted"], "Found a duplicate in the metrics slice: postgresql.rows_inserted") + validatedMetrics["postgresql.rows_inserted"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Enabled with `relations`. The number of rows inserted by queries in this database. This metric is tagged with db.", ms.At(i).Description()) + assert.Equal(t, "{row}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("relation_name") + assert.True(t, ok) + assert.EqualValues(t, "relation_name-val", attrVal.Str()) + case "postgresql.rows_updated": + assert.False(t, validatedMetrics["postgresql.rows_updated"], "Found a duplicate in the metrics slice: postgresql.rows_updated") + validatedMetrics["postgresql.rows_updated"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Enabled with `relations`. The number of rows updated by queries in this database. This metric is tagged with db.", ms.At(i).Description()) + assert.Equal(t, "{row}/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("relation_name") + assert.True(t, ok) + assert.EqualValues(t, "relation_name-val", attrVal.Str()) case "postgresql.sequential_scans": assert.False(t, validatedMetrics["postgresql.sequential_scans"], "Found a duplicate in the metrics slice: postgresql.sequential_scans") validatedMetrics["postgresql.sequential_scans"] = true diff --git a/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml b/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml index ba6335d13518..ac7ed5e8ff3b 100644 --- a/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml @@ -15,6 +15,8 @@ all_set: enabled: true postgresql.blocks_read: enabled: true + postgresql.buffer_hit: + enabled: true postgresql.commits: enabled: true postgresql.connection.max: @@ -29,14 +31,28 @@ all_set: enabled: true postgresql.index.size: enabled: true + postgresql.live_rows: + enabled: true postgresql.operations: enabled: true + postgresql.query.count: + enabled: true + postgresql.query.total_exec_time: + enabled: true postgresql.replication.data_delay: enabled: true postgresql.rollbacks: enabled: true postgresql.rows: enabled: true + postgresql.rows_deleted: + enabled: true + postgresql.rows_fetched: + enabled: true + postgresql.rows_inserted: + enabled: true + postgresql.rows_updated: + enabled: true postgresql.sequential_scans: enabled: true postgresql.table.count: @@ -74,6 +90,8 @@ none_set: enabled: false postgresql.blocks_read: enabled: false + postgresql.buffer_hit: + enabled: false postgresql.commits: enabled: false postgresql.connection.max: @@ -88,14 +106,28 @@ none_set: enabled: false postgresql.index.size: enabled: false + postgresql.live_rows: + enabled: false postgresql.operations: enabled: false + postgresql.query.count: + enabled: false + postgresql.query.total_exec_time: + enabled: false postgresql.replication.data_delay: enabled: false postgresql.rollbacks: enabled: false postgresql.rows: enabled: false + postgresql.rows_deleted: + enabled: false + postgresql.rows_fetched: + enabled: false + postgresql.rows_inserted: + enabled: false + postgresql.rows_updated: + enabled: false postgresql.sequential_scans: enabled: false postgresql.table.count: diff --git a/receiver/postgresqlreceiver/metadata.yaml b/receiver/postgresqlreceiver/metadata.yaml index 14381ce46999..beaa2454904f 100644 --- a/receiver/postgresqlreceiver/metadata.yaml +++ b/receiver/postgresqlreceiver/metadata.yaml @@ -74,6 +74,18 @@ attributes: description: The operation which is responsible for the lag. type: string enum: [flush, replay, write] + relation_name: + description: name of the relation + type: string + dbname: + description: name of the database + type: string + query_text: + description: Text of a representative statement + type: string + query_id: + description: Hash code to identify identical normalized queries. + type: string metrics: postgresql.bgwriter.buffers.allocated: @@ -281,3 +293,85 @@ metrics: value_type: int extended_documentation: | This metric requires WAL to be enabled with at least one replica. + + + #DBM METRICS: + postgresql.query.total_exec_time: + enabled: true + description: The total wait time of the summarized timed events in nanaoseconds. + unit: ns + sum: + value_type: int + monotonic: false + aggregation_temporality: cumulative + attributes: [query_text, query_id] + + postgresql.query.count: + enabled: true + description: Number of times the statement was executed + unit: 1 + sum: + value_type: int + monotonic: false + aggregation_temporality: cumulative + attributes: [query_text, query_id] + + postgresql.rows_deleted: + attributes: + - relation_name + enabled: true + description: Enabled with `relations`. The number of rows deleted by queries in + this database. This metric is tagged with db. + unit: '{row}/s' + gauge: + value_type: int + + postgresql.rows_fetched: + attributes: + - relation_name + enabled: true + description: The number of rows fetched by queries in this database. This metric + is tagged with db. + unit: '{row}/s' + gauge: + value_type: int + + postgresql.rows_updated: + attributes: + - relation_name + enabled: true + description: Enabled with `relations`. The number of rows updated by queries in + this database. This metric is tagged with db. + unit: '{row}/s' + gauge: + value_type: int + + postgresql.rows_inserted: + attributes: + - relation_name + enabled: true + description: Enabled with `relations`. The number of rows inserted by queries in + this database. This metric is tagged with db. + unit: '{row}/s' + gauge: + value_type: int + + postgresql.live_rows: + attributes: + - relation_name + enabled: true + description: Enabled with `relations`. The estimated number of live rows. This metric + is tagged with db, schema, table. + unit: '{row}' + gauge: + value_type: int + + postgresql.buffer_hit: + attributes: + - dbname + enabled: true + description: The number of times disk blocks were found in the buffer cache, preventing + the need to read from the database. This metric is tagged with db. + unit: '{hit}/s' + gauge: + value_type: int \ No newline at end of file diff --git a/receiver/postgresqlreceiver/scraper.go b/receiver/postgresqlreceiver/scraper.go index 62f6fbf77025..193374ff2275 100644 --- a/receiver/postgresqlreceiver/scraper.go +++ b/receiver/postgresqlreceiver/scraper.go @@ -16,6 +16,7 @@ import ( "go.opentelemetry.io/collector/receiver/scrapererror" "go.uber.org/zap" + "github.com/k0kubun/pp" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver/internal/metadata" ) @@ -132,11 +133,13 @@ func (p *postgreSQLScraper) scrape(ctx context.Context) (pmetric.Metrics, error) p.collectWalAge(ctx, now, listClient, &errs) p.collectReplicationStats(ctx, now, listClient, &errs) p.collectMaxConnections(ctx, now, listClient, &errs) - + p.collectRowStats(ctx, now, listClient, &errs) + p.collectQueryPerfStats(ctx, now, listClient, &errs) + rb := p.mb.NewResourceBuilder() rb.SetPostgresqlDatabaseName("N/A") p.mb.EmitForResource(metadata.WithResource(rb.Emit())) - + return p.mb.Emit(), errs.combine() } @@ -329,6 +332,50 @@ func (p *postgreSQLScraper) collectWalAge( p.mb.RecordPostgresqlWalAgeDataPoint(now, walAge) } +func (p *postgreSQLScraper) collectRowStats( + ctx context.Context, + now pcommon.Timestamp, + client client, + errs *errsMux, +) { + rs, err := client.getRowStats(ctx) + + if err != nil { + errs.addPartial(err) + return + } + pp.Println(rs) + for _, s := range rs { + // p.mb.RecordPostgresqlRowsReturnedDataPoint(now, s.rowsReturned, s.relationName) + p.mb.RecordPostgresqlRowsFetchedDataPoint(now, s.rowsFetched, s.relationName) + p.mb.RecordPostgresqlRowsInsertedDataPoint(now, s.rowsInserted, s.relationName) + p.mb.RecordPostgresqlRowsUpdatedDataPoint(now, s.rowsUpdated, s.relationName) + p.mb.RecordPostgresqlRowsDeletedDataPoint(now, s.rowsDeleted, s.relationName) + // p.mb.RecordPostgresqlRowsHotUpdatedDataPoint(now, s.rowsHotUpdated, s.relationName) + p.mb.RecordPostgresqlLiveRowsDataPoint(now, s.liveRows, s.relationName) + // p.mb.RecordPostgresqlDeadRowsDataPoint(now, s.deadRows, s.relationName) + } + +} + +func (p *postgreSQLScraper) collectQueryPerfStats( + ctx context.Context, + now pcommon.Timestamp, + client client, + errs *errsMux, +) { + queryStats, err := client.getQueryStats(ctx) + if err != nil { + errs.addPartial(err) + return + } + + for _, s := range queryStats { + p.mb.RecordPostgresqlQueryCountDataPoint(now, s.queryCount, s.queryText, s.queryId) + p.mb.RecordPostgresqlQueryTotalExecTimeDataPoint(now, int64(s.queryExecTime), s.queryText, s.queryId) + } +} + func (p *postgreSQLScraper) retrieveDatabaseStats( ctx context.Context, wg *sync.WaitGroup,