diff --git a/receiver/mongodbreceiver/client.go b/receiver/mongodbreceiver/client.go index 579b441eb136..758d5b2e55b2 100644 --- a/receiver/mongodbreceiver/client.go +++ b/receiver/mongodbreceiver/client.go @@ -15,6 +15,7 @@ import ( "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" "go.uber.org/zap" + "golang.org/x/exp/slices" ) // client is an interface that exposes functionality towards a mongo environment @@ -34,6 +35,8 @@ type client interface { JumboStats(ctx context.Context, DBName string) (bson.M, error) CollectionStats(ctx context.Context, DBName, collectionName string) (bson.M, error) ConnPoolStats(ctx context.Context, DBName string) (bson.M, error) + ProfilingStats(ctx context.Context, DBName string) (bson.M, error) + QueryStats(ctx context.Context, DBName string) ([]SlowOperationEvent, error) } // mongodbClient is a mongodb metric scraper client @@ -397,3 +400,87 @@ func (c *mongodbClient) GetFsyncLockInfo(ctx context.Context) (bson.M, error) { return fsynclockinfo, nil } + +type ProfilingStatus struct { + Level int32 `bson:"level"` + Slow int32 `bson:"slowms"` +} + +// ProfilingStats returns the result of db.runCommand({"profile":-1}) or db.getProfilingStatus() +// more information can be found here: https://www.mongodb.com/docs/manual/tutorial/manage-the-database-profiler/ +func (c *mongodbClient) ProfilingStats(ctx context.Context, database string) (bson.M, error) { + excluded_dbs := []string{"local", "admin", "config", "test"} + + if !slices.Contains(excluded_dbs, database) { + + cfgLevel := c.cfg.ProfilingLevel + cfgSlowms := c.cfg.SlowMs + var result bson.M + + db := c.Database(database) + err := db.RunCommand(ctx, bson.D{{"profile", -1}}).Decode(&result) + if err != nil { + return nil, fmt.Errorf("unable to get profiling stats: %w", err) + } + + level := (result["was"].(int32)) + slowms := (result["slowms"].(int32)) + + if ((level != cfgLevel) && slices.Contains([]int32{0, 1, 2}, cfgLevel)) || (slowms != cfgSlowms) { + command := bson.D{ + {"profile", cfgLevel}, + {"slowms", cfgSlowms}, + } + var profile bson.M + err = db.RunCommand(ctx, command).Decode(&profile) + if err != nil { + return nil, fmt.Errorf("unable to set for database:%s profiling: %w", database, err) + } + + result = bson.M{ + "level": profile["was"], + "slowms": profile["slowms"], + } + return result, nil + } else { + result = bson.M{ + "level": level, + "slowms": slowms, + } + return result, nil + } + + } + + return nil, fmt.Errorf("this is excluded database:%s for stats", database) +} + +// QueryStats returns the result of find on system.profile or db.getProfilingStatus() +// more information can be found here: https://www.mongodb.com/docs/manual/tutorial/manage-the-database-profiler/ +func (c *mongodbClient) QueryStats(ctx context.Context, database string) ([]SlowOperationEvent, error) { + excluded_dbs := []string{"local", "admin", "config", "test"} + + if !slices.Contains(excluded_dbs, database) { + var result bson.M + + db := c.Database(database) + err := db.RunCommand(ctx, bson.D{{"profile", -1}}).Decode(&result) + if err != nil { + return nil, fmt.Errorf("unable to get profiling stats: %w", err) + } + + level := (result["was"].(int32)) + + if slices.Contains([]int32{1, 2}, level) { + lastTs := time.Now().Add(-c.cfg.CollectionInterval - time.Second) + events, err := collectSlowOperations(ctx, c.Client, database, lastTs) + if err != nil { + return nil, fmt.Errorf("unable to get query stats: %w", err) + } + return events, nil + } + + } + + return nil, fmt.Errorf("unable to get other database for stats") +} diff --git a/receiver/mongodbreceiver/client_test.go b/receiver/mongodbreceiver/client_test.go index 38af87839a3a..67d48110d270 100644 --- a/receiver/mongodbreceiver/client_test.go +++ b/receiver/mongodbreceiver/client_test.go @@ -102,6 +102,14 @@ func (fc *fakeClient) ConnPoolStats(ctx context.Context, dbName string) (bson.M, args := fc.Called(ctx, dbName) return args.Get(0).(bson.M), args.Error(1) } +func (fc *fakeClient) ProfilingStats(ctx context.Context, dbName string) (bson.M, error) { + args := fc.Called(ctx, dbName) + return args.Get(0).(bson.M), args.Error(1) +} +func (fc *fakeClient) QueryStats(ctx context.Context, dbName string) ([]SlowOperationEvent, error) { + args := fc.Called(ctx, dbName) + return args.Get(0).([]SlowOperationEvent), args.Error(1) +} func TestListDatabaseNames(t *testing.T) { mont := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock)) diff --git a/receiver/mongodbreceiver/config.go b/receiver/mongodbreceiver/config.go index 5b7a5ab1e6c0..be48a5532a70 100644 --- a/receiver/mongodbreceiver/config.go +++ b/receiver/mongodbreceiver/config.go @@ -26,11 +26,13 @@ type Config struct { // MetricsBuilderConfig defines which metrics/attributes to enable for the scraper metadata.MetricsBuilderConfig `mapstructure:",squash"` // Deprecated - Transport option will be removed in v0.102.0 - Hosts []confignet.TCPAddrConfig `mapstructure:"hosts"` - Username string `mapstructure:"username"` - Password configopaque.String `mapstructure:"password"` - ReplicaSet string `mapstructure:"replica_set,omitempty"` - Timeout time.Duration `mapstructure:"timeout"` + Hosts []confignet.TCPAddrConfig `mapstructure:"hosts"` + Username string `mapstructure:"username"` + Password configopaque.String `mapstructure:"password"` + ReplicaSet string `mapstructure:"replica_set,omitempty"` + Timeout time.Duration `mapstructure:"timeout"` + ProfilingLevel int32 `mapstructure:"profiling_level"` + SlowMs int32 `mapstructure:"slow_ms"` } func (c *Config) Validate() error { @@ -79,9 +81,9 @@ func (c *Config) ClientOptions() *options.ClientOptions { if c.Username != "" && c.Password != "" { clientOptions.SetAuth(options.Credential{ AuthMechanism: "SCRAM-SHA-1", - Username: c.Username, - Password: string(c.Password), - AuthSource: "admin", + Username: c.Username, + Password: string(c.Password), + AuthSource: "admin", }) } diff --git a/receiver/mongodbreceiver/documentation.md b/receiver/mongodbreceiver/documentation.md index e6a5ae200cd5..72be22e154c7 100644 --- a/receiver/mongodbreceiver/documentation.md +++ b/receiver/mongodbreceiver/documentation.md @@ -2912,6 +2912,34 @@ Total amount of space used by the oplog. | ---- | ----------- | ------ | | database | The name of a database. | Any Str | +### mongodb.profiling.level + +Specifies which operations should be profiled. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| database | The name of a database. | Any Str | + +### mongodb.profiling.slowms + +Specifies which operations should be profiled based on slowms in milliseconds. Works only for profile level '1', + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| database | The name of a database. | Any Str | + ### mongodb.replset.health Member health value of the replica set: conveys if the member is up (i.e. 1) or down (i.e. 0). @@ -3024,6 +3052,59 @@ The total number of active sessions. | ---- | ----------- | ---------- | ----------------------- | --------- | | {sessions} | Sum | Int | Cumulative | false | +### mongodb.slow_operation.time + +The total time spent performing operations with slowms. Works only for profile level '1' & '2', + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| ms | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| timestamp | The time when the slow operation occurred. | Any Int | +| database | The name of a database. | Any Str | +| operation | The MongoDB operation being counted. | Str: ``insert``, ``query``, ``update``, ``delete``, ``getmore``, ``command`` | +| ns | The namespace of the operation (typically "database.collection"). | Any Str | +| plan_summary | A summary of the execution plan used for the query. | Any Str | +| query_signature | A signature that uniquely identifies the query for performance analysis. | Any Str | +| user | The user who executed the operation (only available with profiling). | Any Str | +| application | The application name that executed the operation (only available with profiling). | Any Str | +| statement | The actual command or query that was executed. | Any Str | +| raw_query | The raw representation of the query as it was sent to MongoDB. | Any Str | +| query_hash | A hash that uniquely identifies the query (only available with profiling). | Any Str | +| query_shape_hash | A hash representing the shape of the query. | Any Str | +| plan_cache_key | A key used to identify the execution plan in the cache (only available with profiling). | Any Str | +| query_framework | The framework used for executing the query. | Any Str | +| comment | Any comments associated with the command. | Any Str | +| mills | Duration of the operation in milliseconds. | Any Int | +| num_yields | Number of times the operation yielded control (for long-running operations). | Any Int | +| response_length | Length of the response returned by the operation. | Any Int | +| nreturned | Number of documents returned by the query. | Any Int | +| nmatched | Number of documents matched by the query. | Any Int | +| nmodified | Number of documents modified by the operation. | Any Int | +| ninserted | Number of documents inserted by the operation. | Any Int | +| ndeleted | Number of documents deleted by the operation. | Any Int | +| keys_examined | Number of index keys examined during execution. | Any Int | +| docs_examined | Number of documents examined during execution. | Any Int | +| keys_inserted | Number of index keys inserted during execution. | Any Int | +| write_conflicts | Number of write conflicts encountered during execution. | Any Int | +| cpu_nanos | CPU time consumed by the operation in nanoseconds. | Any Int | +| planning_time_micros | Time taken to plan the query in microseconds (only available with profiling). | Any Int | +| cursor_exhausted | Indicates whether the cursor was exhausted during execution. | Any Bool | +| upsert | Indicates if an upsert operation was performed (only available with profiling). | Any Bool | +| has_sort_stage | Indicates if a sort stage was present in the operation (only available with profiling). | Any Bool | +| used_disk | Disk usage information related to this operation (only available with profiling). | Any Str | +| from_multi_planner | Indicates if this operation came from a multi-planner (only available with profiling). | Any Str | +| replanned | Indicates if this operation was replanned (only available with profiling). | Any Str | +| replan_reason | Reason for replanning this operation (only available with profiling). | Any Str | +| client | Information about the client that executed this operation (only available with profiling). | Any Str | +| cursor | Cursor details related to this operation (only available with profiling). | Any Str | +| lock_stats | Lock statistics related to this operation (only available with profiling). | Any Str | +| flow_control_stats | Flow control statistics related to this operation (only available with profiling). | Any Str | + ### mongodb.stats.avgobjsize The average size of each document in bytes. diff --git a/receiver/mongodbreceiver/go.mod b/receiver/mongodbreceiver/go.mod index 0ddfa0e0f6b2..77edbf8686f8 100644 --- a/receiver/mongodbreceiver/go.mod +++ b/receiver/mongodbreceiver/go.mod @@ -1,6 +1,8 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbreceiver -go 1.21.0 +go 1.22.0 + +toolchain go1.22.2 require ( github.com/google/go-cmp v0.6.0 @@ -10,7 +12,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.102.0 github.com/stretchr/testify v1.9.0 github.com/testcontainers/testcontainers-go v0.31.0 - go.mongodb.org/mongo-driver v1.15.0 + go.mongodb.org/mongo-driver v1.17.0 go.opentelemetry.io/collector/component v0.102.2-0.20240606174409-6888f8f7a45f go.opentelemetry.io/collector/config/confignet v0.102.2-0.20240606174409-6888f8f7a45f go.opentelemetry.io/collector/config/configopaque v1.9.1-0.20240606174409-6888f8f7a45f @@ -19,7 +21,7 @@ require ( go.opentelemetry.io/collector/consumer v0.102.2-0.20240606174409-6888f8f7a45f go.opentelemetry.io/collector/featuregate v1.9.1-0.20240606174409-6888f8f7a45f go.opentelemetry.io/collector/filter v0.102.2-0.20240606174409-6888f8f7a45f - go.opentelemetry.io/collector/pdata v1.9.1-0.20240606174409-6888f8f7a45f + go.opentelemetry.io/collector/pdata v1.16.0 go.opentelemetry.io/collector/receiver v0.102.2-0.20240606174409-6888f8f7a45f go.opentelemetry.io/otel/metric v1.27.0 go.opentelemetry.io/otel/trace v1.27.0 @@ -68,7 +70,7 @@ require ( github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect + github.com/montanaflynn/stats v0.7.1 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.102.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect @@ -89,7 +91,7 @@ require ( github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect - github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect + github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/collector v0.102.2-0.20240606174409-6888f8f7a45f // indirect go.opentelemetry.io/collector/config/configtelemetry v0.102.2-0.20240606174409-6888f8f7a45f // indirect @@ -98,16 +100,17 @@ require ( go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect go.opentelemetry.io/otel/sdk v1.27.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect - golang.org/x/crypto v0.23.0 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.25.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.16.0 // indirect - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 // indirect - google.golang.org/grpc v1.64.0 // indirect - google.golang.org/protobuf v1.34.1 // indirect + golang.org/x/crypto v0.27.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/tools v0.25.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect + google.golang.org/grpc v1.66.2 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/receiver/mongodbreceiver/go.sum b/receiver/mongodbreceiver/go.sum index 12ee66ad6338..8ddcedd1c933 100644 --- a/receiver/mongodbreceiver/go.sum +++ b/receiver/mongodbreceiver/go.sum @@ -99,6 +99,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -150,6 +152,8 @@ github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6 github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM= +github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= @@ -157,6 +161,8 @@ github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.mongodb.org/mongo-driver v1.15.0 h1:rJCKC8eEliewXjZGf0ddURtl7tTVy1TK3bfl0gkUSLc= go.mongodb.org/mongo-driver v1.15.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= +go.mongodb.org/mongo-driver v1.17.0 h1:Hp4q2MCjvY19ViwimTs00wHi7G4yzxh4/2+nTx8r40k= +go.mongodb.org/mongo-driver v1.17.0/go.mod h1:wwWm/+BuOddhcq3n68LKRmgk2wXzmF6s0SFOa0GINL4= go.opentelemetry.io/collector v0.102.2-0.20240606174409-6888f8f7a45f h1:l2ZMTF7/+2qhoLy7poXJFCdkQDYN3C8D5Bi/8bEmQWE= go.opentelemetry.io/collector v0.102.2-0.20240606174409-6888f8f7a45f/go.mod h1:RxtmSO5a8f4R1kGY7/vnciw8GZTSZCljgYedEbI+iP8= go.opentelemetry.io/collector/component v0.102.2-0.20240606174409-6888f8f7a45f h1:OBqdOlHQqgt991UMBC6B04N/fLZNZS/ik/JC+XH41OE= @@ -179,6 +185,8 @@ go.opentelemetry.io/collector/filter v0.102.2-0.20240606174409-6888f8f7a45f h1:z go.opentelemetry.io/collector/filter v0.102.2-0.20240606174409-6888f8f7a45f/go.mod h1:6vrr9XoD+fJekeTz5G01mCy6XqMBsARgbJruXcUnhQU= go.opentelemetry.io/collector/pdata v1.9.1-0.20240606174409-6888f8f7a45f h1:ZSmt73uc+xxFHuryi4G1qh3VMx069JJGxfRLgIpaOHM= go.opentelemetry.io/collector/pdata v1.9.1-0.20240606174409-6888f8f7a45f/go.mod h1:vk7LrfpyVpGZrRWcpjyy0DDZzL3SZiYMQxfap25551w= +go.opentelemetry.io/collector/pdata v1.16.0 h1:g02K8jlRnmQ7TQDuXpdgVL6vIxIVqr5Gbb1qIR27rto= +go.opentelemetry.io/collector/pdata v1.16.0/go.mod h1:YZZJIt2ehxosYf/Y1pbvexjNWsIGNNrzzlCTO9jC1F4= go.opentelemetry.io/collector/pdata/testdata v0.102.1 h1:S3idZaJxy8M7mCC4PG4EegmtiSaOuh6wXWatKIui8xU= go.opentelemetry.io/collector/pdata/testdata v0.102.1/go.mod h1:JEoSJTMgeTKyGxoMRy48RMYyhkA5vCCq/abJq9B6vXs= go.opentelemetry.io/collector/receiver v0.102.2-0.20240606174409-6888f8f7a45f h1:VtkWNIWgYGNplMa3dNKwLIbB95jaHqigD9QvaDDggzk= @@ -215,11 +223,16 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -228,12 +241,16 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -249,6 +266,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= @@ -259,6 +278,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -268,6 +289,7 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -275,12 +297,19 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 h1:P8OJ/WCl/Xo4E4zoe4/bifHpSmmKwARqyqE4nW6J2GQ= google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= +google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU= google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 h1:Q2RxlXqh1cgzzUgV261vBO2jI5R/3DD1J2pM0nI4NhU= google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config.go b/receiver/mongodbreceiver/internal/metadata/generated_config.go index 9fc3e409438e..69b7ce48ca01 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config.go @@ -247,6 +247,8 @@ type MetricsConfig struct { MongodbOplogLogsizemb MetricConfig `mapstructure:"mongodb.oplog.logsizemb"` MongodbOplogTimediff MetricConfig `mapstructure:"mongodb.oplog.timediff"` MongodbOplogUsedsizemb MetricConfig `mapstructure:"mongodb.oplog.usedsizemb"` + MongodbProfilingLevel MetricConfig `mapstructure:"mongodb.profiling.level"` + MongodbProfilingSlowms MetricConfig `mapstructure:"mongodb.profiling.slowms"` MongodbReplsetHealth MetricConfig `mapstructure:"mongodb.replset.health"` MongodbReplsetOptimeLag MetricConfig `mapstructure:"mongodb.replset.optime_lag"` MongodbReplsetReplicationlag MetricConfig `mapstructure:"mongodb.replset.replicationlag"` @@ -254,6 +256,7 @@ type MetricsConfig struct { MongodbReplsetVotefraction MetricConfig `mapstructure:"mongodb.replset.votefraction"` MongodbReplsetVotes MetricConfig `mapstructure:"mongodb.replset.votes"` MongodbSessionCount MetricConfig `mapstructure:"mongodb.session.count"` + MongodbSlowOperationTime MetricConfig `mapstructure:"mongodb.slow_operation.time"` MongodbStatsAvgobjsize MetricConfig `mapstructure:"mongodb.stats.avgobjsize"` MongodbStatsCollections MetricConfig `mapstructure:"mongodb.stats.collections"` MongodbStatsDatasize MetricConfig `mapstructure:"mongodb.stats.datasize"` @@ -983,6 +986,12 @@ func DefaultMetricsConfig() MetricsConfig { MongodbOplogUsedsizemb: MetricConfig{ Enabled: true, }, + MongodbProfilingLevel: MetricConfig{ + Enabled: true, + }, + MongodbProfilingSlowms: MetricConfig{ + Enabled: true, + }, MongodbReplsetHealth: MetricConfig{ Enabled: true, }, @@ -1004,6 +1013,9 @@ func DefaultMetricsConfig() MetricsConfig { MongodbSessionCount: MetricConfig{ Enabled: true, }, + MongodbSlowOperationTime: MetricConfig{ + Enabled: true, + }, MongodbStatsAvgobjsize: MetricConfig{ Enabled: true, }, diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go index 8e2ce3ddf53b..2afb0649b293 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go @@ -244,6 +244,8 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbOplogLogsizemb: MetricConfig{Enabled: true}, MongodbOplogTimediff: MetricConfig{Enabled: true}, MongodbOplogUsedsizemb: MetricConfig{Enabled: true}, + MongodbProfilingLevel: MetricConfig{Enabled: true}, + MongodbProfilingSlowms: MetricConfig{Enabled: true}, MongodbReplsetHealth: MetricConfig{Enabled: true}, MongodbReplsetOptimeLag: MetricConfig{Enabled: true}, MongodbReplsetReplicationlag: MetricConfig{Enabled: true}, @@ -251,6 +253,7 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbReplsetVotefraction: MetricConfig{Enabled: true}, MongodbReplsetVotes: MetricConfig{Enabled: true}, MongodbSessionCount: MetricConfig{Enabled: true}, + MongodbSlowOperationTime: MetricConfig{Enabled: true}, MongodbStatsAvgobjsize: MetricConfig{Enabled: true}, MongodbStatsCollections: MetricConfig{Enabled: true}, MongodbStatsDatasize: MetricConfig{Enabled: true}, @@ -549,6 +552,8 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbOplogLogsizemb: MetricConfig{Enabled: false}, MongodbOplogTimediff: MetricConfig{Enabled: false}, MongodbOplogUsedsizemb: MetricConfig{Enabled: false}, + MongodbProfilingLevel: MetricConfig{Enabled: false}, + MongodbProfilingSlowms: MetricConfig{Enabled: false}, MongodbReplsetHealth: MetricConfig{Enabled: false}, MongodbReplsetOptimeLag: MetricConfig{Enabled: false}, MongodbReplsetReplicationlag: MetricConfig{Enabled: false}, @@ -556,6 +561,7 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbReplsetVotefraction: MetricConfig{Enabled: false}, MongodbReplsetVotes: MetricConfig{Enabled: false}, MongodbSessionCount: MetricConfig{Enabled: false}, + MongodbSlowOperationTime: MetricConfig{Enabled: false}, MongodbStatsAvgobjsize: MetricConfig{Enabled: false}, MongodbStatsCollections: MetricConfig{Enabled: false}, MongodbStatsDatasize: MetricConfig{Enabled: false}, diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go index fbf798edd1af..1784bd6cdd56 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go @@ -11455,6 +11455,108 @@ func newMetricMongodbOplogUsedsizemb(cfg MetricConfig) metricMongodbOplogUsedsiz return m } +type metricMongodbProfilingLevel struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.profiling.level metric with initial data. +func (m *metricMongodbProfilingLevel) init() { + m.data.SetName("mongodb.profiling.level") + m.data.SetDescription("Specifies which operations should be profiled.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbProfilingLevel) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("database", databaseAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbProfilingLevel) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbProfilingLevel) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbProfilingLevel(cfg MetricConfig) metricMongodbProfilingLevel { + m := metricMongodbProfilingLevel{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricMongodbProfilingSlowms struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.profiling.slowms metric with initial data. +func (m *metricMongodbProfilingSlowms) init() { + m.data.SetName("mongodb.profiling.slowms") + m.data.SetDescription("Specifies which operations should be profiled based on slowms in milliseconds. Works only for profile level '1',") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbProfilingSlowms) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("database", databaseAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbProfilingSlowms) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbProfilingSlowms) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbProfilingSlowms(cfg MetricConfig) metricMongodbProfilingSlowms { + m := metricMongodbProfilingSlowms{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbReplsetHealth struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -11832,6 +11934,96 @@ func newMetricMongodbSessionCount(cfg MetricConfig) metricMongodbSessionCount { return m } +type metricMongodbSlowOperationTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.slow_operation.time metric with initial data. +func (m *metricMongodbSlowOperationTime) init() { + m.data.SetName("mongodb.slow_operation.time") + m.data.SetDescription("The total time spent performing operations with slowms. Works only for profile level '1' & '2',") + m.data.SetUnit("ms") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbSlowOperationTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, timestampAttributeValue int64, databaseAttributeValue string, operationAttributeValue string, nsAttributeValue string, planSummaryAttributeValue string, querySignatureAttributeValue string, userAttributeValue string, applicationAttributeValue string, statementAttributeValue string, rawQueryAttributeValue string, queryHashAttributeValue string, queryShapeHashAttributeValue string, planCacheKeyAttributeValue string, queryFrameworkAttributeValue string, commentAttributeValue string, millsAttributeValue int64, numYieldsAttributeValue int64, responseLengthAttributeValue int64, nreturnedAttributeValue int64, nmatchedAttributeValue int64, nmodifiedAttributeValue int64, ninsertedAttributeValue int64, ndeletedAttributeValue int64, keysExaminedAttributeValue int64, docsExaminedAttributeValue int64, keysInsertedAttributeValue int64, writeConflictsAttributeValue int64, cpuNanosAttributeValue int64, planningTimeMicrosAttributeValue int64, cursorExhaustedAttributeValue bool, upsertAttributeValue bool, hasSortStageAttributeValue bool, usedDiskAttributeValue string, fromMultiPlannerAttributeValue string, replannedAttributeValue string, replanReasonAttributeValue string, clientAttributeValue string, cursorAttributeValue string, lockStatsAttributeValue string, flowControlStatsAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutInt("timestamp", timestampAttributeValue) + dp.Attributes().PutStr("database", databaseAttributeValue) + dp.Attributes().PutStr("operation", operationAttributeValue) + dp.Attributes().PutStr("ns", nsAttributeValue) + dp.Attributes().PutStr("plan_summary", planSummaryAttributeValue) + dp.Attributes().PutStr("query_signature", querySignatureAttributeValue) + dp.Attributes().PutStr("user", userAttributeValue) + dp.Attributes().PutStr("application", applicationAttributeValue) + dp.Attributes().PutStr("statement", statementAttributeValue) + dp.Attributes().PutStr("raw_query", rawQueryAttributeValue) + dp.Attributes().PutStr("query_hash", queryHashAttributeValue) + dp.Attributes().PutStr("query_shape_hash", queryShapeHashAttributeValue) + dp.Attributes().PutStr("plan_cache_key", planCacheKeyAttributeValue) + dp.Attributes().PutStr("query_framework", queryFrameworkAttributeValue) + dp.Attributes().PutStr("comment", commentAttributeValue) + dp.Attributes().PutInt("mills", millsAttributeValue) + dp.Attributes().PutInt("num_yields", numYieldsAttributeValue) + dp.Attributes().PutInt("response_length", responseLengthAttributeValue) + dp.Attributes().PutInt("nreturned", nreturnedAttributeValue) + dp.Attributes().PutInt("nmatched", nmatchedAttributeValue) + dp.Attributes().PutInt("nmodified", nmodifiedAttributeValue) + dp.Attributes().PutInt("ninserted", ninsertedAttributeValue) + dp.Attributes().PutInt("ndeleted", ndeletedAttributeValue) + dp.Attributes().PutInt("keys_examined", keysExaminedAttributeValue) + dp.Attributes().PutInt("docs_examined", docsExaminedAttributeValue) + dp.Attributes().PutInt("keys_inserted", keysInsertedAttributeValue) + dp.Attributes().PutInt("write_conflicts", writeConflictsAttributeValue) + dp.Attributes().PutInt("cpu_nanos", cpuNanosAttributeValue) + dp.Attributes().PutInt("planning_time_micros", planningTimeMicrosAttributeValue) + dp.Attributes().PutBool("cursor_exhausted", cursorExhaustedAttributeValue) + dp.Attributes().PutBool("upsert", upsertAttributeValue) + dp.Attributes().PutBool("has_sort_stage", hasSortStageAttributeValue) + dp.Attributes().PutStr("used_disk", usedDiskAttributeValue) + dp.Attributes().PutStr("from_multi_planner", fromMultiPlannerAttributeValue) + dp.Attributes().PutStr("replanned", replannedAttributeValue) + dp.Attributes().PutStr("replan_reason", replanReasonAttributeValue) + dp.Attributes().PutStr("client", clientAttributeValue) + dp.Attributes().PutStr("cursor", cursorAttributeValue) + dp.Attributes().PutStr("lock_stats", lockStatsAttributeValue) + dp.Attributes().PutStr("flow_control_stats", flowControlStatsAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbSlowOperationTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbSlowOperationTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbSlowOperationTime(cfg MetricConfig) metricMongodbSlowOperationTime { + m := metricMongodbSlowOperationTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbStatsAvgobjsize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -15556,6 +15748,8 @@ type MetricsBuilder struct { metricMongodbOplogLogsizemb metricMongodbOplogLogsizemb metricMongodbOplogTimediff metricMongodbOplogTimediff metricMongodbOplogUsedsizemb metricMongodbOplogUsedsizemb + metricMongodbProfilingLevel metricMongodbProfilingLevel + metricMongodbProfilingSlowms metricMongodbProfilingSlowms metricMongodbReplsetHealth metricMongodbReplsetHealth metricMongodbReplsetOptimeLag metricMongodbReplsetOptimeLag metricMongodbReplsetReplicationlag metricMongodbReplsetReplicationlag @@ -15563,6 +15757,7 @@ type MetricsBuilder struct { metricMongodbReplsetVotefraction metricMongodbReplsetVotefraction metricMongodbReplsetVotes metricMongodbReplsetVotes metricMongodbSessionCount metricMongodbSessionCount + metricMongodbSlowOperationTime metricMongodbSlowOperationTime metricMongodbStatsAvgobjsize metricMongodbStatsAvgobjsize metricMongodbStatsCollections metricMongodbStatsCollections metricMongodbStatsDatasize metricMongodbStatsDatasize @@ -15868,6 +16063,8 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricMongodbOplogLogsizemb: newMetricMongodbOplogLogsizemb(mbc.Metrics.MongodbOplogLogsizemb), metricMongodbOplogTimediff: newMetricMongodbOplogTimediff(mbc.Metrics.MongodbOplogTimediff), metricMongodbOplogUsedsizemb: newMetricMongodbOplogUsedsizemb(mbc.Metrics.MongodbOplogUsedsizemb), + metricMongodbProfilingLevel: newMetricMongodbProfilingLevel(mbc.Metrics.MongodbProfilingLevel), + metricMongodbProfilingSlowms: newMetricMongodbProfilingSlowms(mbc.Metrics.MongodbProfilingSlowms), metricMongodbReplsetHealth: newMetricMongodbReplsetHealth(mbc.Metrics.MongodbReplsetHealth), metricMongodbReplsetOptimeLag: newMetricMongodbReplsetOptimeLag(mbc.Metrics.MongodbReplsetOptimeLag), metricMongodbReplsetReplicationlag: newMetricMongodbReplsetReplicationlag(mbc.Metrics.MongodbReplsetReplicationlag), @@ -15875,6 +16072,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricMongodbReplsetVotefraction: newMetricMongodbReplsetVotefraction(mbc.Metrics.MongodbReplsetVotefraction), metricMongodbReplsetVotes: newMetricMongodbReplsetVotes(mbc.Metrics.MongodbReplsetVotes), metricMongodbSessionCount: newMetricMongodbSessionCount(mbc.Metrics.MongodbSessionCount), + metricMongodbSlowOperationTime: newMetricMongodbSlowOperationTime(mbc.Metrics.MongodbSlowOperationTime), metricMongodbStatsAvgobjsize: newMetricMongodbStatsAvgobjsize(mbc.Metrics.MongodbStatsAvgobjsize), metricMongodbStatsCollections: newMetricMongodbStatsCollections(mbc.Metrics.MongodbStatsCollections), metricMongodbStatsDatasize: newMetricMongodbStatsDatasize(mbc.Metrics.MongodbStatsDatasize), @@ -16238,6 +16436,8 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricMongodbOplogLogsizemb.emit(ils.Metrics()) mb.metricMongodbOplogTimediff.emit(ils.Metrics()) mb.metricMongodbOplogUsedsizemb.emit(ils.Metrics()) + mb.metricMongodbProfilingLevel.emit(ils.Metrics()) + mb.metricMongodbProfilingSlowms.emit(ils.Metrics()) mb.metricMongodbReplsetHealth.emit(ils.Metrics()) mb.metricMongodbReplsetOptimeLag.emit(ils.Metrics()) mb.metricMongodbReplsetReplicationlag.emit(ils.Metrics()) @@ -16245,6 +16445,7 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricMongodbReplsetVotefraction.emit(ils.Metrics()) mb.metricMongodbReplsetVotes.emit(ils.Metrics()) mb.metricMongodbSessionCount.emit(ils.Metrics()) + mb.metricMongodbSlowOperationTime.emit(ils.Metrics()) mb.metricMongodbStatsAvgobjsize.emit(ils.Metrics()) mb.metricMongodbStatsCollections.emit(ils.Metrics()) mb.metricMongodbStatsDatasize.emit(ils.Metrics()) @@ -17439,6 +17640,16 @@ func (mb *MetricsBuilder) RecordMongodbOplogUsedsizembDataPoint(ts pcommon.Times mb.metricMongodbOplogUsedsizemb.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } +// RecordMongodbProfilingLevelDataPoint adds a data point to mongodb.profiling.level metric. +func (mb *MetricsBuilder) RecordMongodbProfilingLevelDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { + mb.metricMongodbProfilingLevel.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) +} + +// RecordMongodbProfilingSlowmsDataPoint adds a data point to mongodb.profiling.slowms metric. +func (mb *MetricsBuilder) RecordMongodbProfilingSlowmsDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { + mb.metricMongodbProfilingSlowms.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) +} + // RecordMongodbReplsetHealthDataPoint adds a data point to mongodb.replset.health metric. func (mb *MetricsBuilder) RecordMongodbReplsetHealthDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, replicaSetAttributeValue string, memberNameAttributeValue string, memberIDAttributeValue string, memberStateAttributeValue string) { mb.metricMongodbReplsetHealth.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, replicaSetAttributeValue, memberNameAttributeValue, memberIDAttributeValue, memberStateAttributeValue) @@ -17474,6 +17685,11 @@ func (mb *MetricsBuilder) RecordMongodbSessionCountDataPoint(ts pcommon.Timestam mb.metricMongodbSessionCount.recordDataPoint(mb.startTime, ts, val) } +// RecordMongodbSlowOperationTimeDataPoint adds a data point to mongodb.slow_operation.time metric. +func (mb *MetricsBuilder) RecordMongodbSlowOperationTimeDataPoint(ts pcommon.Timestamp, val int64, timestampAttributeValue int64, databaseAttributeValue string, operationAttributeValue AttributeOperation, nsAttributeValue string, planSummaryAttributeValue string, querySignatureAttributeValue string, userAttributeValue string, applicationAttributeValue string, statementAttributeValue string, rawQueryAttributeValue string, queryHashAttributeValue string, queryShapeHashAttributeValue string, planCacheKeyAttributeValue string, queryFrameworkAttributeValue string, commentAttributeValue string, millsAttributeValue int64, numYieldsAttributeValue int64, responseLengthAttributeValue int64, nreturnedAttributeValue int64, nmatchedAttributeValue int64, nmodifiedAttributeValue int64, ninsertedAttributeValue int64, ndeletedAttributeValue int64, keysExaminedAttributeValue int64, docsExaminedAttributeValue int64, keysInsertedAttributeValue int64, writeConflictsAttributeValue int64, cpuNanosAttributeValue int64, planningTimeMicrosAttributeValue int64, cursorExhaustedAttributeValue bool, upsertAttributeValue bool, hasSortStageAttributeValue bool, usedDiskAttributeValue string, fromMultiPlannerAttributeValue string, replannedAttributeValue string, replanReasonAttributeValue string, clientAttributeValue string, cursorAttributeValue string, lockStatsAttributeValue string, flowControlStatsAttributeValue string) { + mb.metricMongodbSlowOperationTime.recordDataPoint(mb.startTime, ts, val, timestampAttributeValue, databaseAttributeValue, operationAttributeValue.String(), nsAttributeValue, planSummaryAttributeValue, querySignatureAttributeValue, userAttributeValue, applicationAttributeValue, statementAttributeValue, rawQueryAttributeValue, queryHashAttributeValue, queryShapeHashAttributeValue, planCacheKeyAttributeValue, queryFrameworkAttributeValue, commentAttributeValue, millsAttributeValue, numYieldsAttributeValue, responseLengthAttributeValue, nreturnedAttributeValue, nmatchedAttributeValue, nmodifiedAttributeValue, ninsertedAttributeValue, ndeletedAttributeValue, keysExaminedAttributeValue, docsExaminedAttributeValue, keysInsertedAttributeValue, writeConflictsAttributeValue, cpuNanosAttributeValue, planningTimeMicrosAttributeValue, cursorExhaustedAttributeValue, upsertAttributeValue, hasSortStageAttributeValue, usedDiskAttributeValue, fromMultiPlannerAttributeValue, replannedAttributeValue, replanReasonAttributeValue, clientAttributeValue, cursorAttributeValue, lockStatsAttributeValue, flowControlStatsAttributeValue) +} + // RecordMongodbStatsAvgobjsizeDataPoint adds a data point to mongodb.stats.avgobjsize metric. func (mb *MetricsBuilder) RecordMongodbStatsAvgobjsizeDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricMongodbStatsAvgobjsize.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go index a0f1a5006797..9e92d0b84c4c 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go @@ -937,6 +937,14 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbOplogUsedsizembDataPoint(ts, 1, "database-val") + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbProfilingLevelDataPoint(ts, 1, "database-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbProfilingSlowmsDataPoint(ts, 1, "database-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbReplsetHealthDataPoint(ts, 1, "database-val", "replica_set-val", "member_name-val", "member_id-val", "member_state-val") @@ -965,6 +973,10 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbSessionCountDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbSlowOperationTimeDataPoint(ts, 1, 9, "database-val", AttributeOperationInsert, "ns-val", "plan_summary-val", "query_signature-val", "user-val", "application-val", "statement-val", "raw_query-val", "query_hash-val", "query_shape_hash-val", "plan_cache_key-val", "query_framework-val", "comment-val", 5, 10, 15, 9, 8, 9, 9, 8, 13, 13, 13, 15, 9, 20, true, true, true, "used_disk-val", "from_multi_planner-val", "replanned-val", "replan_reason-val", "client-val", "cursor-val", "lock_stats-val", "flow_control_stats-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbStatsAvgobjsizeDataPoint(ts, 1, "database-val") @@ -4596,6 +4608,36 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("database") assert.True(t, ok) assert.EqualValues(t, "database-val", attrVal.Str()) + case "mongodb.profiling.level": + assert.False(t, validatedMetrics["mongodb.profiling.level"], "Found a duplicate in the metrics slice: mongodb.profiling.level") + validatedMetrics["mongodb.profiling.level"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Specifies which operations should be profiled.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("database") + assert.True(t, ok) + assert.EqualValues(t, "database-val", attrVal.Str()) + case "mongodb.profiling.slowms": + assert.False(t, validatedMetrics["mongodb.profiling.slowms"], "Found a duplicate in the metrics slice: mongodb.profiling.slowms") + validatedMetrics["mongodb.profiling.slowms"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Specifies which operations should be profiled based on slowms in milliseconds. Works only for profile level '1',", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("database") + assert.True(t, ok) + assert.EqualValues(t, "database-val", attrVal.Str()) case "mongodb.replset.health": assert.False(t, validatedMetrics["mongodb.replset.health"], "Found a duplicate in the metrics slice: mongodb.replset.health") validatedMetrics["mongodb.replset.health"] = true @@ -4760,6 +4802,138 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "mongodb.slow_operation.time": + assert.False(t, validatedMetrics["mongodb.slow_operation.time"], "Found a duplicate in the metrics slice: mongodb.slow_operation.time") + validatedMetrics["mongodb.slow_operation.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The total time spent performing operations with slowms. Works only for profile level '1' & '2',", ms.At(i).Description()) + assert.Equal(t, "ms", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("timestamp") + assert.True(t, ok) + assert.EqualValues(t, 9, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("database") + assert.True(t, ok) + assert.EqualValues(t, "database-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("operation") + assert.True(t, ok) + assert.EqualValues(t, "insert", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("ns") + assert.True(t, ok) + assert.EqualValues(t, "ns-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("plan_summary") + assert.True(t, ok) + assert.EqualValues(t, "plan_summary-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_signature") + assert.True(t, ok) + assert.EqualValues(t, "query_signature-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("user") + assert.True(t, ok) + assert.EqualValues(t, "user-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("application") + assert.True(t, ok) + assert.EqualValues(t, "application-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("statement") + assert.True(t, ok) + assert.EqualValues(t, "statement-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("raw_query") + assert.True(t, ok) + assert.EqualValues(t, "raw_query-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_hash") + assert.True(t, ok) + assert.EqualValues(t, "query_hash-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_shape_hash") + assert.True(t, ok) + assert.EqualValues(t, "query_shape_hash-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("plan_cache_key") + assert.True(t, ok) + assert.EqualValues(t, "plan_cache_key-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_framework") + assert.True(t, ok) + assert.EqualValues(t, "query_framework-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("comment") + assert.True(t, ok) + assert.EqualValues(t, "comment-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("mills") + assert.True(t, ok) + assert.EqualValues(t, 5, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("num_yields") + assert.True(t, ok) + assert.EqualValues(t, 10, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("response_length") + assert.True(t, ok) + assert.EqualValues(t, 15, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("nreturned") + assert.True(t, ok) + assert.EqualValues(t, 9, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("nmatched") + assert.True(t, ok) + assert.EqualValues(t, 8, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("nmodified") + assert.True(t, ok) + assert.EqualValues(t, 9, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("ninserted") + assert.True(t, ok) + assert.EqualValues(t, 9, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("ndeleted") + assert.True(t, ok) + assert.EqualValues(t, 8, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("keys_examined") + assert.True(t, ok) + assert.EqualValues(t, 13, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("docs_examined") + assert.True(t, ok) + assert.EqualValues(t, 13, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("keys_inserted") + assert.True(t, ok) + assert.EqualValues(t, 13, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("write_conflicts") + assert.True(t, ok) + assert.EqualValues(t, 15, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("cpu_nanos") + assert.True(t, ok) + assert.EqualValues(t, 9, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("planning_time_micros") + assert.True(t, ok) + assert.EqualValues(t, 20, attrVal.Int()) + attrVal, ok = dp.Attributes().Get("cursor_exhausted") + assert.True(t, ok) + assert.EqualValues(t, true, attrVal.Bool()) + attrVal, ok = dp.Attributes().Get("upsert") + assert.True(t, ok) + assert.EqualValues(t, true, attrVal.Bool()) + attrVal, ok = dp.Attributes().Get("has_sort_stage") + assert.True(t, ok) + assert.EqualValues(t, true, attrVal.Bool()) + attrVal, ok = dp.Attributes().Get("used_disk") + assert.True(t, ok) + assert.EqualValues(t, "used_disk-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("from_multi_planner") + assert.True(t, ok) + assert.EqualValues(t, "from_multi_planner-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("replanned") + assert.True(t, ok) + assert.EqualValues(t, "replanned-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("replan_reason") + assert.True(t, ok) + assert.EqualValues(t, "replan_reason-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("client") + assert.True(t, ok) + assert.EqualValues(t, "client-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("cursor") + assert.True(t, ok) + assert.EqualValues(t, "cursor-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("lock_stats") + assert.True(t, ok) + assert.EqualValues(t, "lock_stats-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("flow_control_stats") + assert.True(t, ok) + assert.EqualValues(t, "flow_control_stats-val", attrVal.Str()) case "mongodb.stats.avgobjsize": assert.False(t, validatedMetrics["mongodb.stats.avgobjsize"], "Found a duplicate in the metrics slice: mongodb.stats.avgobjsize") validatedMetrics["mongodb.stats.avgobjsize"] = true diff --git a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml index 0fab27cf5553..d9cf3b8ab211 100644 --- a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml @@ -439,6 +439,10 @@ all_set: enabled: true mongodb.oplog.usedsizemb: enabled: true + mongodb.profiling.level: + enabled: true + mongodb.profiling.slowms: + enabled: true mongodb.replset.health: enabled: true mongodb.replset.optime_lag: @@ -453,6 +457,8 @@ all_set: enabled: true mongodb.session.count: enabled: true + mongodb.slow_operation.time: + enabled: true mongodb.stats.avgobjsize: enabled: true mongodb.stats.collections: @@ -1034,6 +1040,10 @@ none_set: enabled: false mongodb.oplog.usedsizemb: enabled: false + mongodb.profiling.level: + enabled: false + mongodb.profiling.slowms: + enabled: false mongodb.replset.health: enabled: false mongodb.replset.optime_lag: @@ -1048,6 +1058,8 @@ none_set: enabled: false mongodb.session.count: enabled: false + mongodb.slow_operation.time: + enabled: false mongodb.stats.avgobjsize: enabled: false mongodb.stats.collections: diff --git a/receiver/mongodbreceiver/metadata.yaml b/receiver/mongodbreceiver/metadata.yaml index 9c433a2e54ca..8c0cc83261c6 100644 --- a/receiver/mongodbreceiver/metadata.yaml +++ b/receiver/mongodbreceiver/metadata.yaml @@ -106,6 +106,121 @@ attributes: - intent_shared - intent_exclusive + timestamp: + description: The time when the slow operation occurred. + type: int + ns: + description: The namespace of the operation (typically "database.collection"). + type: string + plan_summary: + description: A summary of the execution plan used for the query. + type: string + query_signature: + description: A signature that uniquely identifies the query for performance analysis. + type: string + user: + description: The user who executed the operation (only available with profiling). + type: string + application: + description: The application name that executed the operation (only available with profiling). + type: string + statement: + description: The actual command or query that was executed. + type: string + raw_query: + description: The raw representation of the query as it was sent to MongoDB. + type: string + query_hash: + description: A hash that uniquely identifies the query (only available with profiling). + type: string + query_shape_hash: + description: A hash representing the shape of the query. + type: string + plan_cache_key: + description: A key used to identify the execution plan in the cache (only available with profiling). + type: string + query_framework: + description: The framework used for executing the query. + type: string + comment: + description: Any comments associated with the command. + type: string + mills: + description: Duration of the operation in milliseconds. + type: int + num_yields: + description: Number of times the operation yielded control (for long-running operations). + type: int + response_length: + description: Length of the response returned by the operation. + type: int + nreturned: + description: Number of documents returned by the query. + type: int + nmatched: + description: Number of documents matched by the query. + type: int + nmodified: + description: Number of documents modified by the operation. + type: int + ninserted: + description: Number of documents inserted by the operation. + type: int + ndeleted: + description: Number of documents deleted by the operation. + type: int + keys_examined: + description: Number of index keys examined during execution. + type: int + docs_examined: + description: Number of documents examined during execution. + type: int + keys_inserted: + description: Number of index keys inserted during execution. + type: int + write_conflicts: + description: Number of write conflicts encountered during execution. + type: int + cpu_nanos: + description: CPU time consumed by the operation in nanoseconds. + type: int + planning_time_micros: + description: Time taken to plan the query in microseconds (only available with profiling). + type: int + cursor_exhausted: + description: Indicates whether the cursor was exhausted during execution. + type: bool + upsert: + description: Indicates if an upsert operation was performed (only available with profiling). + type: bool + has_sort_stage: + description: Indicates if a sort stage was present in the operation (only available with profiling). + type: bool + used_disk: + description: Disk usage information related to this operation (only available with profiling). + type: string + from_multi_planner: + description: Indicates if this operation came from a multi-planner (only available with profiling). + type: string + replanned : + description: Indicates if this operation was replanned (only available with profiling). + type: string + replan_reason: + description: Reason for replanning this operation (only available with profiling). + type: string + client: + description: Information about the client that executed this operation (only available with profiling). + type: string + cursor: + description: Cursor details related to this operation (only available with profiling). + type: string + lock_stats: + description: Lock statistics related to this operation (only available with profiling). + type: string + flow_control_stats: + description: Flow control statistics related to this operation (only available with profiling). + type: string + metrics: mongodb.cache.operations: description: The number of cache operations of the instance. @@ -2335,6 +2450,68 @@ metrics: gauge: value_type: int attributes: [database] + mongodb.profiling.level: + enabled: true + description: Specifies which operations should be profiled. + unit: "1" + gauge: + value_type: int + attributes: [database] + mongodb.profiling.slowms: + enabled: true + description: Specifies which operations should be profiled based on slowms in milliseconds. Works only for profile level '1', + unit: ms + gauge: + value_type: int + attributes: [database] + mongodb.slow_operation.time: + enabled: true + description: The total time spent performing operations with slowms. Works only for profile level '1' & '2', + unit: ms + gauge: + value_type: int + attributes: [ + timestamp, + database, + operation, + ns, + plan_summary, + query_signature, + user, + application, + statement, + raw_query, + query_hash, + query_shape_hash, + plan_cache_key, + query_framework, + comment, + mills, + num_yields, + response_length, + nreturned, + nmatched, + nmodified, + ninserted, + ndeleted, + keys_examined, + docs_examined, + keys_inserted, + write_conflicts, + cpu_nanos, + planning_time_micros, + cursor_exhausted, + upsert, + has_sort_stage, + used_disk, + from_multi_planner, + replanned, + replan_reason, + client, + cursor, + lock_stats, + flow_control_stats + ] # TODO: Update the receiver to pass the tests tests: skip_lifecycle: true diff --git a/receiver/mongodbreceiver/metrics.go b/receiver/mongodbreceiver/metrics.go index 698294774f58..5026d88a23cb 100644 --- a/receiver/mongodbreceiver/metrics.go +++ b/receiver/mongodbreceiver/metrics.go @@ -3739,3 +3739,78 @@ func (s *mongodbScraper) recordMongodbWiredtigerConcurrenttransactionsWriteTotal } s.mb.RecordMongodbWiredtigerConcurrenttransactionsWriteTotalticketsDataPoint(now, val, database) } + +func (s *mongodbScraper) recordMongodbProflilingLevel(now pcommon.Timestamp, doc bson.M, database string, errs *scrapererror.ScrapeErrors) { + metricPath := []string{"level"} + metricName := "mongodb.profiling.level" + val, err := collectMetric(doc, metricPath) + if err != nil { + errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, database, err)) + return + } + s.mb.RecordMongodbProfilingLevelDataPoint(now, val, database) +} + +func (s *mongodbScraper) recordMongodbProflilingSlowms(now pcommon.Timestamp, doc bson.M, database string, errs *scrapererror.ScrapeErrors) { + metricPath := []string{"slowms"} + metricName := "mongodb.profiling.slowms" + val, err := collectMetric(doc, metricPath) + if err != nil { + errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, database, err)) + return + } + s.mb.RecordMongodbProfilingSlowmsDataPoint(now, val, database) +} +func (s *mongodbScraper) RecordMongodbSlowOperationTime(now pcommon.Timestamp, doc []SlowOperationEvent, database string, errs *scrapererror.ScrapeErrors) { + metricName := "mongodb.slow_operation.time" + if doc == nil { + errs.AddPartial(1, fmt.Errorf(collectMetricWithAttributes, metricName, database, "error no slow operation event found")) + return + } + for _, ops := range doc { + s.mb.RecordMongodbSlowOperationTimeDataPoint( + now, + ops.Millis, + ops.Timestamp, + ops.Database, + metadata.AttributeOperation(metadata.MapAttributeOperation[ops.Operation]), + ops.NS, + ops.PlanSummary, + ops.QuerySignature, + ops.User, + ops.Application, + ConvertToJSONString(ops.Statement), + ConvertToJSONString(ops.RawQuery), + ops.QueryHash, + ops.QueryShapeHash, + ops.PlanCacheKey, + ops.QueryFramework, + ops.Comment, + ops.Millis, + ops.NumYields, + ops.ResponseLength, + ops.NReturned, + ops.NMatched, + ops.NModified, + ops.NInserted, + ops.NDeleted, + ops.KeysExamined, + ops.DocsExamined, + ops.KeysInserted, + ops.WriteConflicts, + ops.CpuNanos, + ops.PlanningTimeMicros, + ops.CursorExhausted, + ops.Upsert, + ops.HasSortStage, + ops.UsedDisk, + ops.FromMultiPlanner, + ops.Replanned, + ops.ReplanReason, + ops.Client, + ConvertToJSONString(ops.Cursor), + ConvertToJSONString(ops.LockStats), + ConvertToJSONString(ops.FlowControlStats), + ) + } +} diff --git a/receiver/mongodbreceiver/scraper.go b/receiver/mongodbreceiver/scraper.go index cacfb9c84be5..4fa5ed58c681 100644 --- a/receiver/mongodbreceiver/scraper.go +++ b/receiver/mongodbreceiver/scraper.go @@ -156,6 +156,20 @@ func (s *mongodbScraper) collectDatabase(ctx context.Context, now pcommon.Timest s.recordConnPoolStats(now, connPoolStats, databaseName, errs) } + profilingStats, err := s.client.ProfilingStats(ctx, databaseName) + if err != nil { + errs.AddPartial(1, fmt.Errorf("failed to fetch profilingStats metrics: %w", err)) + } else { + s.recordProfilingStats(now, profilingStats, databaseName, errs) + } + + queryStats, err := s.client.QueryStats(ctx, databaseName) + if err != nil { + errs.AddPartial(1, fmt.Errorf("failed to fetch queryStats metrics: %w", err)) + } else { + s.recordQueryStats(now, queryStats, databaseName, errs) + } + s.recordNormalServerStats(now, serverStatus, databaseName, errs) rb := s.mb.NewResourceBuilder() @@ -715,3 +729,14 @@ func (s *mongodbScraper) recordConnPoolStats(now pcommon.Timestamp, doc bson.M, s.recordMongodbConnectionPoolTotalinuse(now, doc, database, errs) s.recordMongodbConnectionPoolTotalrefreshing(now, doc, database, errs) } + +func (s *mongodbScraper) recordProfilingStats(now pcommon.Timestamp, doc bson.M, database string, errs *scrapererror.ScrapeErrors) { + // profiling_stats + s.recordMongodbProflilingLevel(now, doc, database, errs) + s.recordMongodbProflilingSlowms(now, doc, database, errs) +} + +func (s *mongodbScraper) recordQueryStats(now pcommon.Timestamp, doc []SlowOperationEvent, database string, errs *scrapererror.ScrapeErrors) { + // query_stats + s.RecordMongodbSlowOperationTime(now, doc, database, errs) +} diff --git a/receiver/mongodbreceiver/slow_query.go b/receiver/mongodbreceiver/slow_query.go new file mode 100644 index 000000000000..92e6de751eda --- /dev/null +++ b/receiver/mongodbreceiver/slow_query.go @@ -0,0 +1,413 @@ +package mongodbreceiver + +import ( + "context" + "encoding/json" + "fmt" + "hash/fnv" + "log" + "strings" + "time" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/mongo" +) + +// SlowOperationEvent represents the structure of a slow operation event +type SlowOperationEvent struct { + Timestamp int64 `json:"timestamp"` + Database string `json:"database"` + Operation string `json:"operation"` + NS string `json:"ns,omitempty"` + PlanSummary string `json:"plan_summary,omitempty"` + QuerySignature string `json:"query_signature,omitempty"` + User string `json:"user,omitempty"` + Application string `json:"application,omitempty"` + Statement bson.M `json:"statement"` + RawQuery bson.M `json:"raw_query"` + QueryHash string `json:"query_hash,omitempty"` + QueryShapeHash string `json:"query_shape_hash,omitempty"` + PlanCacheKey string `json:"plan_cache_key,omitempty"` + QueryFramework string `json:"query_framework,omitempty"` + Comment string `json:"comment,omitempty"` + Millis int64 `json:"millis,omitempty"` + NumYields int64 `json:"num_yields,omitempty"` + ResponseLength int64 `json:"response_length,omitempty"` + NReturned int64 `json:"nreturned,omitempty"` + NMatched int64 `json:"nmatched,omitempty"` + NModified int64 `json:"nmodified,omitempty"` + NInserted int64 `json:"ninserted,omitempty"` + NDeleted int64 `json:"ndeleted,omitempty"` + KeysExamined int64 `json:"keys_examined,omitempty"` + DocsExamined int64 `json:"docs_examined,omitempty"` + KeysInserted int64 `json:"keys_inserted,omitempty"` + WriteConflicts int64 `json:"write_conflicts,omitempty"` + CpuNanos int64 `json:"cpu_nanos,omitempty"` + PlanningTimeMicros int64 `json:"planning_time_micros,omitempty"` + CursorExhausted bool `json:"cursor_exhausted,omitempty"` + Upsert bool `json:"upsert,omitempty"` + HasSortStage bool `json:"has_sort_stage,omitempty"` + UsedDisk string `json:"used_disk,omitempty"` + FromMultiPlanner string `json:"from_multi_planner,omitempty"` + Replanned string `json:"replanned,omitempty"` + ReplanReason string `json:"replan_reason,omitempty"` + Client string `json:"client,omitempty"` + Cursor bson.M `json:"cursor,omitempty"` + LockStats bson.M `json:"lock_stats,omitempty"` + FlowControlStats bson.M `json:"flow_control_stats,omitempty"` +} + +// Create a slow operation event from a BSON map +func createSlowOperationEvent(slowOperation bson.M) SlowOperationEvent { + var event SlowOperationEvent + if ts, ok := slowOperation["ts"].(primitive.DateTime); ok { + event.Timestamp = ts.Time().UnixMilli() // Convert to milliseconds + } + event.Database = getStringValue(slowOperation, "dbname") + event.Operation = getSlowOperationOpType(slowOperation) + event.NS = getStringValue(slowOperation, "ns") + event.PlanSummary = getStringValue(slowOperation, "planSummary") + event.QuerySignature = getStringValue(slowOperation, "query_signature") + event.User = getStringValue(slowOperation, "user") + event.Application = getStringValue(slowOperation, "appName") + event.Statement = slowOperation["obfuscated_command"].(bson.M) + event.RawQuery = slowOperation["command"].(bson.M) + event.QueryHash = _getSlowOperationQueryHash(slowOperation) + event.QueryShapeHash = getStringValue(slowOperation, "queryShapeHash") + event.PlanCacheKey = getStringValue(slowOperation, "planCacheKey") + event.QueryFramework = getStringValue(slowOperation, "queryFramework") + event.Comment = getStringValue(slowOperation["command"].(bson.M), "comment") + event.Millis = getIntValue(slowOperation, "millis") + event.NumYields = getIntValue(slowOperation, "numYield") + event.ResponseLength = getIntValue(slowOperation, "responseLength") + event.NReturned = getIntValue(slowOperation, "nreturned") + event.NMatched = getIntValue(slowOperation, "nMatched") + event.NModified = getIntValue(slowOperation, "nModified") + event.NInserted = getIntValue(slowOperation, "ninserted") + event.NDeleted = getIntValue(slowOperation, "ndeleted") + event.KeysExamined = getIntValue(slowOperation, "keysExamined") + event.DocsExamined = getIntValue(slowOperation, "docsExamined") + event.KeysInserted = getIntValue(slowOperation, "keysInserted") + event.WriteConflicts = getIntValue(slowOperation, "writeConflicts") + event.CpuNanos = getIntValue(slowOperation, "cpuNanos") + event.PlanningTimeMicros = getIntValue(slowOperation, "planningTimeMicros") + event.CursorExhausted = getBoolValue(slowOperation, "cursorExhausted") + event.Upsert = getBoolValue(slowOperation, "upsert") + event.HasSortStage = getBoolValue(slowOperation, "hasSortStage") + event.UsedDisk = getStringValue(slowOperation, "usedDisk") + event.FromMultiPlanner = getStringValue(slowOperation, "fromMultiPlanner") + event.Replanned = getStringValue(slowOperation, "replanned") + event.ReplanReason = getStringValue(slowOperation, "replanReason") + + // Add client information using the helper function + event.Client = _getSlowOperationClient(slowOperation) + + // Add cursor information using the helper function + if cursorInfo := _getSlowOperationCursor(slowOperation); cursorInfo != nil { + event.Cursor = cursorInfo + } + + // Add lock stats using the helper function + if lockStats := _getSlowOperationLockStats(slowOperation); lockStats != nil { + event.LockStats = lockStats + } + + // Add flow control stats using the helper function + if flowControlStats := _getSlowOperationFlowControlStats(slowOperation); flowControlStats != nil { + event.FlowControlStats = flowControlStats + } + + return event +} + +// Function to get the slow operation type +func getSlowOperationOpType(slowOperation bson.M) string { + // Check for "op" first, otherwise check for "type" + if op, ok := slowOperation["op"]; ok { + return op.(string) + } + if typ, ok := slowOperation["type"]; ok { + return typ.(string) + } + return "" +} + +// Helper function to safely extract strings from bson.M +func getStringValue(m bson.M, key string) string { + if value, ok := m[key]; ok { + return value.(string) + } + return "" +} + +// Helper function to safely extract integers from bson.M +func getIntValue(m bson.M, key string) int64 { + if value, ok := m[key]; ok { + return int64(value.(int32)) + } + return 0 +} + +// Helper function to safely extract booleans from bson.M +func getBoolValue(m bson.M, key string) bool { + if value, ok := m[key]; ok { + return value.(bool) + } + return false +} + +// Function to retrieve client information from a slow operation BSON map. +func _getSlowOperationClient(slowOperation bson.M) string { + callingClientHostname := slowOperation["client"].(string) + if callingClientHostname == "" { + callingClientHostname = slowOperation["remote"].(string) + } + + if callingClientHostname != "" { + return callingClientHostname + } + + return "" +} + +// Function to retrieve client information from a slow operation BSON map. +func _getSlowOperationQueryHash(slowOperation bson.M) string { + hash := slowOperation["queryHash"] + if hash == nil { + hash = slowOperation["planCacheShapeHash"] + } + + if hash != nil { + return hash.(string) + } + + return "" +} + +// Function to retrieve cursor information from a slow operation BSON map. +func _getSlowOperationCursor(slowOperation bson.M) bson.M { + cursorID := slowOperation["cursorid"] + originatingCommand := slowOperation["originatingCommand"] + + if cursorID != nil || originatingCommand != nil { + return bson.M{ + "cursor_id": cursorID, + "originating_command": originatingCommand, + "comment": slowOperation["originatingCommandComment"], + } + } + + return nil +} + +// Function to retrieve lock statistics from a slow operation BSON map. +func _getSlowOperationLockStats(slowOperation bson.M) bson.M { + lockStats := slowOperation["locks"] + if lockStats != nil { + if lockStatsMap, ok := lockStats.(map[string]interface{}); ok { + return formatKeyName(toSnakeCase, lockStatsMap) + } + } + return nil +} + +// Function to retrieve flow control statistics from a slow operation BSON map. +func _getSlowOperationFlowControlStats(slowOperation bson.M) bson.M { + flowControlStats := slowOperation["flowControl"] + if flowControlStats != nil { + if flowControlMap, ok := flowControlStats.(map[string]interface{}); ok { + return formatKeyName(toSnakeCase, flowControlMap) + } + } + return nil +} + +// formatKeyName converts camelCase keys in metricDict to snake_case. +func formatKeyName(formatter func(string) string, metricDict map[string]interface{}) map[string]interface{} { + formatted := make(map[string]interface{}) + + for key, value := range metricDict { + // Convert the key using the provided formatter + formattedKey := toSnakeCase(formatter(key)) + + // Check for key conflicts + if _, exists := formatted[formattedKey]; exists { + // If the formatted key already exists, use the original key + formattedKey = key + } + + // If the value is a nested map, recursively format it + if nestedMap, ok := value.(map[string]interface{}); ok { + formatted[formattedKey] = formatKeyName(formatter, nestedMap) + } else { + formatted[formattedKey] = value + } + } + + return formatted +} + +// toSnakeCase converts camelCase string to snake_case. +func toSnakeCase(str string) string { + var result strings.Builder + for i, char := range str { + if i > 0 && 'A' <= char && char <= 'Z' { + result.WriteRune('_') + } + result.WriteRune(char) + } + return strings.ToLower(result.String()) +} + +// Constants for keys to remove +var RemovedKeys = map[string]struct{}{ + "comment": {}, + "lsid": {}, + "$clusterTime": {}, + "_id": {}, + "txnNumber": {}, +} + +// obfuscateCommand removes sensitive information from the command. +func obfuscateCommand(command bson.M) bson.M { + // Create a new map to hold the obfuscated command + obfuscatedCommand := bson.M{} + for key, value := range command { + // Check if the key should be removed + if _, exists := RemovedKeys[key]; exists { + continue // Skip this key + } + + // If the value is a nested bson.M, recursively obfuscate it + switch v := value.(type) { + case bson.M: + obfuscatedCommand[key] = obfuscateCommand(v) + case bson.A: + // If the value is a slice, process each element + obfuscatedSlice := make([]interface{}, len(v)) + for i, item := range v { + if nestedMap, ok := item.(bson.M); ok { + obfuscatedSlice[i] = obfuscateCommand(nestedMap) + } else { + obfuscatedSlice[i] = item // Keep non-map items as they are + } + } + obfuscatedCommand[key] = obfuscatedSlice + default: + // For all other types, just copy the value + obfuscatedCommand[key] = value + } + } + + return obfuscatedCommand +} + +// Compute execution plan signature based on normalized JSON plan +func computeExecPlanSignature(normalizedJsonPlan string) string { + if normalizedJsonPlan == "" { + return "" + } + + // Sort keys and marshal to JSON (this is a simplified version) + var jsonObj interface{} + json.Unmarshal([]byte(normalizedJsonPlan), &jsonObj) + + // Create a hash of the sorted JSON (here we just use a simple hash function) + h := fnv.New64a() + h.Write([]byte(normalizedJsonPlan)) // In reality, you'd want to sort keys before hashing + return fmt.Sprintf("%x", h.Sum64()) +} + +// Function to obfuscate a slow operation +func obfuscateSlowOperation(slowOperation bson.M, dbName string) bson.M { + // Obfuscate the command + obfuscatedCommand := obfuscateCommand(slowOperation["command"].(bson.M)) + + // Compute query signature + jsonCommand, _ := json.Marshal(obfuscatedCommand) + querySignature := computeExecPlanSignature(string(jsonCommand)) + + // Update slow operation with new fields + slowOperation["dbname"] = dbName + slowOperation["obfuscated_command"] = obfuscatedCommand + slowOperation["query_signature"] = querySignature + + // Handle originating command if it exists + if originatingCommand, ok := slowOperation["originatingCommand"]; ok { + if origCmdMap, ok := originatingCommand.(bson.M); ok { + slowOperation["originatingCommandComment"] = origCmdMap["comment"] + origCmdMap["command"] = obfuscateCommand(origCmdMap) + slowOperation["originatingCommand"] = origCmdMap + } + } + + return slowOperation +} + +// Function to collect slow operations from the profiler +func collectSlowOperationsFromProfiler(ctx context.Context, client *mongo.Client, dbName string, lastTs time.Time) ([]bson.M, error) { + + // Query for profiling data from the system.profile collection + filter := bson.D{ + {"ts", bson.D{{"$gte", lastTs}}}, // Filter for timestamps greater than or equal to lastTs(collection_interval) + } + + // Execute the query + cursor, err := client.Database(dbName).Collection("system.profile").Find(ctx, filter) + if err != nil { + return nil, fmt.Errorf("failed to find profiling data: %v", err) + } + defer cursor.Close(ctx) + + var slowOperations []bson.M + + for cursor.Next(ctx) { + var profile bson.M + if err := cursor.Decode(&profile); err != nil { + return nil, fmt.Errorf("failed to decode cursor result: %v", err) + } + // Check if 'command' is present in the profile document + if _, ok := profile["command"]; !ok { + continue // Skip profiles without a command + } + if ns, ok := profile["ns"].(string); ok { + if strings.Contains(ns, ".system.profile") { + continue // Skip query if collection is system.profile + } + } + // Obfuscate the slow operation before yielding + obfuscatedProfile := obfuscateSlowOperation(profile, dbName) + slowOperations = append(slowOperations, obfuscatedProfile) + } + + if err := cursor.Err(); err != nil { + return nil, fmt.Errorf("cursor error: %v", err) + } + + return slowOperations, nil +} + +func collectSlowOperations(ctx context.Context, client *mongo.Client, dbName string, lastTs time.Time) ([]SlowOperationEvent, error) { + // Collect slow operations for the specified database + slowOperations, err := collectSlowOperationsFromProfiler(ctx, client, dbName, lastTs) + if err != nil { + return nil, fmt.Errorf("error retrieving slow operations: %w", err) + } + + var events []SlowOperationEvent + for _, ops := range slowOperations { + event := createSlowOperationEvent(ops) + events = append(events, event) + } + return events, nil +} + +func ConvertToJSONString(data interface{}) string { + jsonData, err := json.Marshal(data) + if err != nil { + log.Printf("error convert to json string: %w", err) + return "" + } + return string(jsonData) +}