From cfc1bfc4052c3061b21e98935cbffa1b0fdd9aef Mon Sep 17 00:00:00 2001 From: Nicholas Steicke <59816443+nicholass-alcidion@users.noreply.github.com> Date: Wed, 10 Apr 2024 17:01:04 +0930 Subject: [PATCH 01/10] Update RabbitMQ HTTP Connections with TLS Config (#5672) Signed-off-by: Nicholas Steicke --- CHANGELOG.md | 1 + pkg/scalers/rabbitmq_scaler.go | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index aad16f3f023..f6f1dc1c5fc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ Here is an overview of all new **experimental** features: - **General**: Validate empty array value of triggers in ScaledObject/ScaledJob creation ([#5520](https://github.com/kedacore/keda/issues/5520)) - **GitHub Runner Scaler**: Fixed `in_progress` detection on running jobs instead of just `queued` ([#5604](https://github.com/kedacore/keda/issues/5604)) - **New Relic Scaler**: Consider empty results set from query executer ([#5619](https://github.com/kedacore/keda/pull/5619)) +- **RabbitMQ Scaler**: HTTP Connections respect TLS configuration ([#5668](https://github.com/kedacore/keda/issues/5668)) ### Deprecations diff --git a/pkg/scalers/rabbitmq_scaler.go b/pkg/scalers/rabbitmq_scaler.go index 4e8cf3c2b5e..6037aa45944 100644 --- a/pkg/scalers/rabbitmq_scaler.go +++ b/pkg/scalers/rabbitmq_scaler.go @@ -137,6 +137,14 @@ func NewRabbitMQScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { s.metadata = meta s.httpClient = kedautil.CreateHTTPClient(meta.timeout, meta.unsafeSsl) + if meta.enableTLS { + tlsConfig, tlsErr := kedautil.NewTLSConfigWithPassword(meta.cert, meta.key, meta.keyPassword, meta.ca, meta.unsafeSsl) + if tlsErr != nil { + return nil, tlsErr + } + s.httpClient.Transport = kedautil.CreateHTTPTransportWithTLSConfig(tlsConfig) + } + if meta.protocol == amqpProtocol { // Override vhost if requested. host := meta.host From 9be5740b0ab2844ea72c81a01975c248b44ae8c9 Mon Sep 17 00:00:00 2001 From: Adarsh Verma <113962919+Adarsh-verma-14@users.noreply.github.com> Date: Wed, 10 Apr 2024 21:23:32 +0530 Subject: [PATCH 02/10] add tls authentication for NATS Streaming (#5662) Signed-off-by: Adarsh-verma-14 --- CHANGELOG.md | 1 + pkg/scalers/stan_scaler.go | 41 +++++++++++++++++++++-- pkg/scalers/stan_scaler_test.go | 58 +++++++++++++++++++++++++++++++++ 3 files changed, 98 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f6f1dc1c5fc..60049b4ad86 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,6 +54,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ### New - **General**: TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) +- **NATS Scaler**: Add TLS authentication ([#2296](https://github.com/kedacore/keda/issues/2296)) #### Experimental diff --git a/pkg/scalers/stan_scaler.go b/pkg/scalers/stan_scaler.go index c9bc49756c1..701073e7d12 100644 --- a/pkg/scalers/stan_scaler.go +++ b/pkg/scalers/stan_scaler.go @@ -7,6 +7,7 @@ import ( "fmt" "net/http" "strconv" + "strings" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" @@ -53,6 +54,12 @@ type stanMetadata struct { lagThreshold int64 activationLagThreshold int64 triggerIndex int + + // TLS + enableTLS bool + cert string + key string + ca string } const ( @@ -60,6 +67,8 @@ const ( defaultStanLagThreshold = 10 natsStreamingHTTPProtocol = "http" natsStreamingHTTPSProtocol = "https" + stanTLSEnable = "enable" + stanTLSDisable = "disable" ) // NewStanScaler creates a new stanScaler @@ -73,12 +82,19 @@ func NewStanScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { if err != nil { return nil, fmt.Errorf("error parsing stan metadata: %w", err) } - + httpClient := kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, false) + if stanMetadata.enableTLS { + config, err := kedautil.NewTLSConfig(stanMetadata.cert, stanMetadata.key, stanMetadata.ca, false) + if err != nil { + return nil, err + } + httpClient.Transport = kedautil.CreateHTTPTransportWithTLSConfig(config) + } return &stanScaler{ channelInfo: &monitorChannelInfo{}, metricType: metricType, metadata: stanMetadata, - httpClient: kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, false), + httpClient: httpClient, logger: InitializeLogger(config, "stan_scaler"), }, nil } @@ -123,12 +139,33 @@ func parseStanMetadata(config *scalersconfig.ScalerConfig) (stanMetadata, error) meta.triggerIndex = config.TriggerIndex var err error + + meta.enableTLS = false // Default value for enableTLS useHTTPS := false if val, ok := config.TriggerMetadata["useHttps"]; ok { useHTTPS, err = strconv.ParseBool(val) if err != nil { return meta, fmt.Errorf("useHTTPS parsing error %w", err) } + if val, ok := config.AuthParams["tls"]; ok { + val = strings.TrimSpace(val) + if val == stanTLSEnable { + certGiven := config.AuthParams["cert"] != "" + keyGiven := config.AuthParams["key"] != "" + if certGiven && !keyGiven { + return meta, errors.New("no key given") + } + if keyGiven && !certGiven { + return meta, errors.New("no cert given") + } + meta.cert = config.AuthParams["cert"] + meta.key = config.AuthParams["key"] + meta.ca = config.AuthParams["ca"] + meta.enableTLS = true + } else if val != stanTLSDisable { + return meta, fmt.Errorf("err incorrect value for TLS given: %s", val) + } + } } natsServerEndpoint, err := GetFromAuthOrMeta(config, "natsServerMonitoringEndpoint") if err != nil { diff --git a/pkg/scalers/stan_scaler_test.go b/pkg/scalers/stan_scaler_test.go index 6825b4c778a..5b89b17cc04 100644 --- a/pkg/scalers/stan_scaler_test.go +++ b/pkg/scalers/stan_scaler_test.go @@ -17,12 +17,29 @@ type parseStanMetadataTestData struct { isError bool } +type parseStanTLSTestData struct { + metadata map[string]string + authParams map[string]string + isError bool + enableTLS bool +} + type stanMetricIdentifier struct { metadataTestData *parseStanMetadataTestData triggerIndex int name string } +var validStanMetadata = map[string]string{ + "natsServerMonitoringEndpoint": "stan-nats-ss.stan.svc.cluster.local:8222", + "queueGroup": "grp1", + "durableName": "ImDurable", + "subject": "Test", + "lagThreshold": "10", + "activationLagThreshold": "5", + "useHttps": "true", +} + var testStanMetadata = []parseStanMetadataTestData{ // nothing passed {map[string]string{}, map[string]string{}, true}, @@ -44,6 +61,20 @@ var testStanMetadata = []parseStanMetadataTestData{ {map[string]string{"natsServerMonitoringEndpoint": "stan-nats-ss", "queueGroup": "grp1", "durableName": "ImDurable", "subject": "mySubject", "useHttps": "error"}, map[string]string{}, true}, } +var parseStanAuthParamsTestDataset = []parseStanTLSTestData{ + // success + {map[string]string{"natsServerMonitoringEndpoint": "stan-nats-ss", "queueGroup": "grp1", "durableName": "ImDurable", "subject": "mySubject", "useHttps": "true"}, map[string]string{"tls": "enable", "ca": "caaa", "cert": "ceert", "key": "keey"}, false, true}, + // success, TLS cert/key and assumed public CA + {map[string]string{"natsServerMonitoringEndpoint": "stan-nats-ss", "queueGroup": "grp1", "durableName": "ImDurable", "subject": "mySubject", "useHttps": "true"}, map[string]string{"tls": "enable", "cert": "ceert", "key": "keey"}, false, true}, + // success, TLS CA only + {map[string]string{"natsServerMonitoringEndpoint": "stan-nats-ss", "queueGroup": "grp1", "durableName": "ImDurable", "subject": "mySubject", "useHttps": "true"}, map[string]string{"tls": "enable", "ca": "caa"}, false, true}, + // Missing TLS key, should fail. + {map[string]string{"natsServerMonitoringEndpoint": "stan-nats-ss", "queueGroup": "grp1", "durableName": "ImDurable", "subject": "mySubject", "useHttps": "true"}, map[string]string{"tls": "enable", "cert": "ceert"}, true, false}, + // Missing TLS cert, should fail. + {map[string]string{"natsServerMonitoringEndpoint": "stan-nats-ss", "queueGroup": "grp1", "durableName": "ImDurable", "subject": "mySubject", "useHttps": "true"}, map[string]string{"tls": "enable", "key": "keey"}, true, false}, + // TLS invalid, should fail. + {map[string]string{"natsServerMonitoringEndpoint": "stan-nats-ss", "queueGroup": "grp1", "durableName": "ImDurable", "subject": "mySubject", "useHttps": "true"}, map[string]string{"tls": "yes", "ca": "caa", "cert": "ceert", "key": "keey"}, true, false}, +} var stanMetricIdentifiers = []stanMetricIdentifier{ {&testStanMetadata[4], 0, "s0-stan-mySubject"}, {&testStanMetadata[4], 1, "s1-stan-mySubject"}, @@ -60,6 +91,33 @@ func TestStanParseMetadata(t *testing.T) { } } +func TestParseStanAuthParams(t *testing.T) { + for _, testData := range parseStanAuthParamsTestDataset { + meta, err := parseStanMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: validStanMetadata, AuthParams: testData.authParams}) + + if err != nil && !testData.isError { + t.Error("Expected success but got error", err) + } + if testData.isError && err == nil { + t.Error("Expected error but got success") + } + if meta.enableTLS != testData.enableTLS { + t.Errorf("Expected enableTLS to be set to %v but got %v\n", testData.enableTLS, meta.enableTLS) + } + if meta.enableTLS { + if meta.ca != testData.authParams["ca"] { + t.Errorf("Expected ca to be set to %v but got %v\n", testData.authParams["ca"], meta.enableTLS) + } + if meta.cert != testData.authParams["cert"] { + t.Errorf("Expected cert to be set to %v but got %v\n", testData.authParams["cert"], meta.cert) + } + if meta.key != testData.authParams["key"] { + t.Errorf("Expected key to be set to %v but got %v\n", testData.authParams["key"], meta.key) + } + } + } +} + func TestStanGetMetricSpecForScaling(t *testing.T) { for _, testData := range stanMetricIdentifiers { ctx := context.Background() From 08aeb57e9f5024d99c4a2c41fe3934c25a8a5e37 Mon Sep 17 00:00:00 2001 From: aliaqel-stripe <120822631+aliaqel-stripe@users.noreply.github.com> Date: Wed, 10 Apr 2024 09:59:55 -0700 Subject: [PATCH 03/10] Add GRPC client and server metrics (#5615) Signed-off-by: Ali Aqel --- CHANGELOG.md | 1 + cmd/adapter/main.go | 30 ++- go.mod | 3 + go.sum | 7 + pkg/metricscollector/metricscollectors.go | 17 +- pkg/metricscollector/prommetrics.go | 27 +++ pkg/metricsservice/client.go | 9 +- pkg/metricsservice/server.go | 16 +- .../prometheus_metrics_test.go | 162 ++++++++++++++ .../providers/prometheus/LICENSE | 201 ++++++++++++++++++ .../providers/prometheus/client_metrics.go | 117 ++++++++++ .../providers/prometheus/client_options.go | 77 +++++++ .../providers/prometheus/constants.go | 23 ++ .../providers/prometheus/doc.go | 8 + .../providers/prometheus/options.go | 129 +++++++++++ .../providers/prometheus/reporter.go | 113 ++++++++++ .../providers/prometheus/server_metrics.go | 123 +++++++++++ .../providers/prometheus/server_options.go | 48 +++++ .../go-grpc-middleware/v2/COPYRIGHT | 2 + .../go-grpc-middleware/v2/LICENSE | 201 ++++++++++++++++++ .../v2/interceptors/client.go | 83 ++++++++ .../go-grpc-middleware/v2/interceptors/doc.go | 12 ++ .../v2/interceptors/reporter.go | 116 ++++++++++ .../v2/interceptors/server.go | 74 +++++++ vendor/modules.txt | 6 + 25 files changed, 1601 insertions(+), 4 deletions(-) create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/LICENSE create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/client_metrics.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/client_options.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/constants.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/options.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/reporter.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/server_metrics.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/server_options.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/client.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/reporter.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/server.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 60049b4ad86..bcfe110db29 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,7 @@ Here is an overview of all new **experimental** features: - **General**: Add active trigger name in ScaledObject's scale out event ([#5577](https://github.com/kedacore/keda/issues/5577)) - **General**: Add command-line flag in Adapter to allow override of gRPC Authority Header ([#5449](https://github.com/kedacore/keda/issues/5449)) +- **General**: Add GRPC Client and Server metrics ([#5502](https://github.com/kedacore/keda/issues/5502)) - **General**: Add GRPC Healthchecks ([#5590](https://github.com/kedacore/keda/issues/5590)) - **General**: Add OPENTELEMETRY flag in e2e test YAML ([#5375](https://github.com/kedacore/keda/issues/5375)) - **General**: Add support for cross tenant/cloud authentication when using Azure Workload Identity for TriggerAuthentication ([#5441](https://github.com/kedacore/keda/issues/5441)) diff --git a/cmd/adapter/main.go b/cmd/adapter/main.go index b54acc69911..b5762fabef3 100644 --- a/cmd/adapter/main.go +++ b/cmd/adapter/main.go @@ -23,6 +23,8 @@ import ( "net/http" "os" + grpcprom "github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" _ "go.uber.org/automaxprocs" appsv1 "k8s.io/api/apps/v1" @@ -105,6 +107,8 @@ func (a *Adapter) makeProvider(ctx context.Context) (provider.ExternalMetricsPro cfg.Burst = adapterClientRequestBurst cfg.DisableCompression = disableCompression + clientMetrics := getMetricInterceptor() + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ Metrics: server.Options{ BindAddress: "0", // disabled since we use our own server to serve metrics @@ -124,7 +128,7 @@ func (a *Adapter) makeProvider(ctx context.Context) (provider.ExternalMetricsPro } logger.Info("Connecting Metrics Service gRPC client to the server", "address", metricsServiceAddr) - grpcClient, err := metricsservice.NewGrpcClient(metricsServiceAddr, a.SecureServing.ServerCert.CertDirectory, metricsServiceGRPCAuthority) + grpcClient, err := metricsservice.NewGrpcClient(metricsServiceAddr, a.SecureServing.ServerCert.CertDirectory, metricsServiceGRPCAuthority, clientMetrics) if err != nil { logger.Error(err, "error connecting Metrics Service gRPC client to the server", "address", metricsServiceAddr) return nil, nil, err @@ -158,6 +162,30 @@ func getMetricHandler() http.HandlerFunc { } } +// getMetricInterceptor returns a metrics inceptor that records metrics between the adapter and opertaor +func getMetricInterceptor() *grpcprom.ClientMetrics { + metricsNamespace := "keda_internal_metricsservice" + + counterNamespace := func(o *prometheus.CounterOpts) { + o.Namespace = metricsNamespace + } + + histogramNamespace := func(o *prometheus.HistogramOpts) { + o.Namespace = metricsNamespace + } + + clientMetrics := grpcprom.NewClientMetrics( + grpcprom.WithClientHandlingTimeHistogram( + grpcprom.WithHistogramBuckets([]float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}), + histogramNamespace, + ), + grpcprom.WithClientCounterOptions(counterNamespace), + ) + legacyregistry.Registerer().MustRegister(clientMetrics) + + return clientMetrics +} + // RunMetricsServer runs a http listener and handles the /metrics endpoint // this is needed to consolidate apiserver and controller-runtime metrics // we have to use a separate http server & can't rely on the controller-runtime implementation diff --git a/go.mod b/go.mod index d5b3ff5481d..a46f13757ae 100644 --- a/go.mod +++ b/go.mod @@ -53,6 +53,7 @@ require ( github.com/google/go-github/v50 v50.2.0 github.com/google/uuid v1.6.0 github.com/gophercloud/gophercloud v1.8.0 + github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 github.com/hashicorp/vault/api v1.11.0 github.com/influxdata/influxdb-client-go/v2 v2.13.0 github.com/jackc/pgx/v5 v5.5.2 @@ -112,6 +113,8 @@ require ( sigs.k8s.io/kustomize/kustomize/v5 v5.3.0 ) +require github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3 // indirect + replace ( // pin k8s.io to v0.28.5 github.com/google/cel-go => github.com/google/cel-go v0.16.1 diff --git a/go.sum b/go.sum index 37d4db5212a..95b5dcce8ca 100644 --- a/go.sum +++ b/go.sum @@ -1348,6 +1348,10 @@ github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZH github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 h1:f4tggROQKKcnh4eItay6z/HbHLqghBxS8g7pyMhmDio= +github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0/go.mod h1:hKAkSgNkL0FII46ZkJcpVEAai4KV+swlIWCKfekd1pA= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3 h1:o95KDiV/b1xdkumY5YbLR0/n2+wBxUpgf3HgfKgTyLI= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3/go.mod h1:hTxjzRcX49ogbTGVJ1sM5mz5s+SSgiGIyL3jjPxl32E= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= @@ -1944,6 +1948,7 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= @@ -2283,6 +2288,7 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2432,6 +2438,7 @@ google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= +google.golang.org/grpc/examples v0.0.0-20210424002626-9572fd6faeae/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/pkg/metricscollector/metricscollectors.go b/pkg/metricscollector/metricscollectors.go index 296bc6b86b6..ef6458b76b7 100644 --- a/pkg/metricscollector/metricscollectors.go +++ b/pkg/metricscollector/metricscollectors.go @@ -16,6 +16,10 @@ limitations under the License. package metricscollector +import ( + grpcprom "github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus" +) + const ( ClusterTriggerAuthenticationResource = "cluster_trigger_authentication" TriggerAuthenticationResource = "trigger_authentication" @@ -27,7 +31,8 @@ const ( ) var ( - collectors []MetricsCollector + collectors []MetricsCollector + promServerMetrics *grpcprom.ServerMetrics ) type MetricsCollector interface { @@ -76,6 +81,10 @@ func NewMetricsCollectors(enablePrometheusMetrics bool, enableOpenTelemetryMetri if enablePrometheusMetrics { promometrics := NewPromMetrics() collectors = append(collectors, promometrics) + + if promServerMetrics == nil { + promServerMetrics = newPromServerMetrics() + } } if enableOpenTelemetryMetrics { @@ -184,3 +193,9 @@ func RecordCloudEventQueueStatus(namespace string, value int) { element.RecordCloudEventQueueStatus(namespace, value) } } + +// Returns the ServerMetrics object for GRPC Server metrics. Used to initialize the GRPC server with the proper intercepts +// Currently, only Prometheus metrics are supported. +func GetServerMetrics() *grpcprom.ServerMetrics { + return promServerMetrics +} diff --git a/pkg/metricscollector/prommetrics.go b/pkg/metricscollector/prommetrics.go index 29bf749fe76..2f77bf06f30 100644 --- a/pkg/metricscollector/prommetrics.go +++ b/pkg/metricscollector/prommetrics.go @@ -20,6 +20,7 @@ import ( "runtime" "strconv" + grpcprom "github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus" "github.com/prometheus/client_golang/prometheus" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/metrics" @@ -329,3 +330,29 @@ func (p *PromMetrics) RecordCloudEventEmittedError(namespace string, cloudevents func (p *PromMetrics) RecordCloudEventQueueStatus(namespace string, value int) { cloudeventQueueStatus.With(prometheus.Labels{"namespace": namespace}).Set(float64(value)) } + +// Returns a grpcprom server Metrics object and registers the metrics. The object contains +// interceptors to chain to the server so that all requests served are observed. Intended to be called +// as part of initialization of metricscollector, hence why this function is not exported +func newPromServerMetrics() *grpcprom.ServerMetrics { + metricsNamespace := "keda_internal_metricsservice" + + counterNamespace := func(o *prometheus.CounterOpts) { + o.Namespace = metricsNamespace + } + + histogramNamespace := func(o *prometheus.HistogramOpts) { + o.Namespace = metricsNamespace + } + + serverMetrics := grpcprom.NewServerMetrics( + grpcprom.WithServerHandlingTimeHistogram( + grpcprom.WithHistogramBuckets([]float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}), + histogramNamespace, + ), + grpcprom.WithServerCounterOptions(counterNamespace), + ) + metrics.Registry.MustRegister(serverMetrics) + + return serverMetrics +} diff --git a/pkg/metricsservice/client.go b/pkg/metricsservice/client.go index 6f3c66b7567..7c01c77cebc 100644 --- a/pkg/metricsservice/client.go +++ b/pkg/metricsservice/client.go @@ -22,6 +22,7 @@ import ( "time" "github.com/go-logr/logr" + grpcprom "github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" "k8s.io/metrics/pkg/apis/external_metrics" @@ -36,7 +37,7 @@ type GrpcClient struct { connection *grpc.ClientConn } -func NewGrpcClient(url, certDir, authority string) (*GrpcClient, error) { +func NewGrpcClient(url, certDir, authority string, clientMetrics *grpcprom.ClientMetrics) (*GrpcClient, error) { defaultConfig := fmt.Sprintf(`{ "methodConfig": [{ "timeout": "3s", @@ -64,6 +65,12 @@ func NewGrpcClient(url, certDir, authority string) (*GrpcClient, error) { grpc.WithDefaultServiceConfig(defaultConfig), } + opts = append( + opts, + grpc.WithChainUnaryInterceptor(clientMetrics.UnaryClientInterceptor()), + grpc.WithChainStreamInterceptor(clientMetrics.StreamClientInterceptor()), + ) + if authority != "" { // If an Authority header override is specified, add it to the client so it is set on every request. // This is useful when the address used to dial the GRPC server does not match any hosts provided in the TLS certificate's diff --git a/pkg/metricsservice/server.go b/pkg/metricsservice/server.go index 8a4e5965424..1c9a948b857 100644 --- a/pkg/metricsservice/server.go +++ b/pkg/metricsservice/server.go @@ -27,6 +27,7 @@ import ( "k8s.io/metrics/pkg/apis/external_metrics/v1beta1" logf "sigs.k8s.io/controller-runtime/pkg/log" + "github.com/kedacore/keda/v2/pkg/metricscollector" "github.com/kedacore/keda/v2/pkg/metricsservice/api" "github.com/kedacore/keda/v2/pkg/metricsservice/utils" "github.com/kedacore/keda/v2/pkg/scaling" @@ -96,7 +97,20 @@ func (s *GrpcServer) Start(ctx context.Context) error { if err != nil { return err } - s.server = grpc.NewServer(grpc.Creds(creds)) + + grpcServerOpts := []grpc.ServerOption{ + grpc.Creds(creds), + } + + if metricscollector.GetServerMetrics() != nil { + grpcServerOpts = append( + grpcServerOpts, + grpc.ChainStreamInterceptor(metricscollector.GetServerMetrics().StreamServerInterceptor()), + grpc.ChainUnaryInterceptor(metricscollector.GetServerMetrics().UnaryServerInterceptor()), + ) + } + + s.server = grpc.NewServer(grpcServerOpts...) api.RegisterMetricsServiceServer(s.server, s) s.healthServer = health.NewServer() diff --git a/tests/sequential/prometheus_metrics/prometheus_metrics_test.go b/tests/sequential/prometheus_metrics/prometheus_metrics_test.go index e516f6b8391..fe55813f701 100644 --- a/tests/sequential/prometheus_metrics/prometheus_metrics_test.go +++ b/tests/sequential/prometheus_metrics/prometheus_metrics_test.go @@ -918,6 +918,7 @@ func testWebhookMetricValues(t *testing.T) { func testMetricServerMetrics(t *testing.T) { families := fetchAndParsePrometheusMetrics(t, fmt.Sprintf("curl --insecure %s", kedaMetricsServerPrometheusURL)) checkMetricServerValues(t, families) + checkGRPCClientMetrics(t, families) } func testOperatorMetricValues(t *testing.T, kc *kubernetes.Clientset) { @@ -926,6 +927,7 @@ func testOperatorMetricValues(t *testing.T, kc *kubernetes.Clientset) { checkTriggerTotalValues(t, families, expectedTriggerTotals) checkCRTotalValues(t, families, expectedCrTotals) + checkGRPCServerMetrics(t, families) checkBuildInfo(t, families) } @@ -1024,6 +1026,166 @@ func checkCRTotalValues(t *testing.T, families map[string]*prommodel.MetricFamil } } +func checkGRPCServerMetrics(t *testing.T, families map[string]*prommodel.MetricFamily) { + t.Log("--- testing grpc server metrics ---") + + family, ok := families["keda_internal_metricsservice_grpc_server_handled_total"] + if !ok { + t.Errorf("metric keda_internal_metricsservice_grpc_server_handled_total not available") + return + } + + metricValue := 0.0 + metrics := family.GetMetric() + for _, metric := range metrics { + labels := metric.GetLabel() + for _, label := range labels { + if *label.Name == namespaceString && *label.Value != testNamespace { + continue + } + } + metricValue += *metric.Counter.Value + } + assert.GreaterOrEqual(t, metricValue, 1.0, "keda_internal_metricsservice_grpc_server_handled_total has to be greater than 0") + + family, ok = families["keda_internal_metricsservice_grpc_server_started_total"] + if !ok { + t.Errorf("metric keda_internal_metricsservice_grpc_server_started_total not available") + return + } + + metricValue = 0.0 + metrics = family.GetMetric() + for _, metric := range metrics { + labels := metric.GetLabel() + for _, label := range labels { + if *label.Name == namespaceString && *label.Value != testNamespace { + continue + } + } + metricValue += *metric.Counter.Value + } + assert.GreaterOrEqual(t, metricValue, 1.0, "keda_internal_metricsservice_grpc_server_started_total has to be greater than 0") + + family, ok = families["keda_internal_metricsservice_grpc_server_msg_received_total"] + if !ok { + t.Errorf("metric keda_internal_metricsservice_grpc_server_msg_received_total not available") + return + } + + metricValue = 0.0 + metrics = family.GetMetric() + for _, metric := range metrics { + labels := metric.GetLabel() + for _, label := range labels { + if *label.Name == namespaceString && *label.Value != testNamespace { + continue + } + } + metricValue += *metric.Counter.Value + } + assert.GreaterOrEqual(t, metricValue, 1.0, "keda_internal_metricsservice_grpc_server_msg_received_total has to be greater than 0") + + family, ok = families["keda_internal_metricsservice_grpc_server_msg_sent_total"] + if !ok { + t.Errorf("metric keda_internal_metricsservice_grpc_server_msg_sent_total not available") + return + } + + metricValue = 0.0 + metrics = family.GetMetric() + for _, metric := range metrics { + labels := metric.GetLabel() + for _, label := range labels { + if *label.Name == namespaceString && *label.Value != testNamespace { + continue + } + } + metricValue += *metric.Counter.Value + } + assert.GreaterOrEqual(t, metricValue, 1.0, "keda_internal_metricsservice_grpc_server_msg_sent_total has to be greater than 0") +} + +func checkGRPCClientMetrics(t *testing.T, families map[string]*prommodel.MetricFamily) { + t.Log("--- testing grpc client metrics ---") + + family, ok := families["keda_internal_metricsservice_grpc_client_handled_total"] + if !ok { + t.Errorf("metric keda_internal_metricsservice_grpc_client_handled_total not available") + return + } + + metricValue := 0.0 + metrics := family.GetMetric() + for _, metric := range metrics { + labels := metric.GetLabel() + for _, label := range labels { + if *label.Name == namespaceString && *label.Value != testNamespace { + continue + } + } + metricValue += *metric.Counter.Value + } + assert.GreaterOrEqual(t, metricValue, 1.0, "keda_internal_metricsservice_grpc_client_handled_total has to be greater than 0") + + family, ok = families["keda_internal_metricsservice_grpc_client_started_total"] + if !ok { + t.Errorf("metric keda_internal_metricsservice_grpc_client_started_total not available") + return + } + + metricValue = 0.0 + metrics = family.GetMetric() + for _, metric := range metrics { + labels := metric.GetLabel() + for _, label := range labels { + if *label.Name == namespaceString && *label.Value != testNamespace { + continue + } + } + metricValue += *metric.Counter.Value + } + assert.GreaterOrEqual(t, metricValue, 1.0, "keda_internal_metricsservice_grpc_client_started_total has to be greater than 0") + + family, ok = families["keda_internal_metricsservice_grpc_client_msg_received_total"] + if !ok { + t.Errorf("metric keda_internal_metricsservice_grpc_client_msg_received_total not available") + return + } + + metricValue = 0.0 + metrics = family.GetMetric() + for _, metric := range metrics { + labels := metric.GetLabel() + for _, label := range labels { + if *label.Name == namespaceString && *label.Value != testNamespace { + continue + } + } + metricValue += *metric.Counter.Value + } + assert.GreaterOrEqual(t, metricValue, 1.0, "keda_internal_metricsservice_grpc_client_msg_received_total has to be greater than 0") + + family, ok = families["keda_internal_metricsservice_grpc_client_msg_sent_total"] + if !ok { + t.Errorf("metric keda_internal_metricsservice_grpc_client_msg_sent_total not available") + return + } + + metricValue = 0.0 + metrics = family.GetMetric() + for _, metric := range metrics { + labels := metric.GetLabel() + for _, label := range labels { + if *label.Name == namespaceString && *label.Value != testNamespace { + continue + } + } + metricValue += *metric.Counter.Value + } + assert.GreaterOrEqual(t, metricValue, 1.0, "keda_internal_metricsservice_grpc_client_msg_sent_total has to be greater than 0") +} + func checkWebhookValues(t *testing.T, families map[string]*prommodel.MetricFamily) { t.Log("--- testing webhook metrics ---") diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/LICENSE new file mode 100644 index 00000000000..b2b065037fc --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/client_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/client_metrics.go new file mode 100644 index 00000000000..5c8ba20765e --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/client_metrics.go @@ -0,0 +1,117 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package prometheus + +import ( + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors" + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" +) + +// ClientMetrics represents a collection of metrics to be registered on a +// Prometheus metrics registry for a gRPC client. +type ClientMetrics struct { + clientStartedCounter *prometheus.CounterVec + clientHandledCounter *prometheus.CounterVec + clientStreamMsgReceived *prometheus.CounterVec + clientStreamMsgSent *prometheus.CounterVec + + // clientHandledHistogram can be nil + clientHandledHistogram *prometheus.HistogramVec + // clientStreamRecvHistogram can be nil + clientStreamRecvHistogram *prometheus.HistogramVec + // clientStreamSendHistogram can be nil + clientStreamSendHistogram *prometheus.HistogramVec +} + +// NewClientMetrics returns a new ClientMetrics object. +// NOTE: Remember to register ClientMetrics object using prometheus registry +// e.g. prometheus.MustRegister(myClientMetrics). +func NewClientMetrics(opts ...ClientMetricsOption) *ClientMetrics { + var config clientMetricsConfig + config.apply(opts) + return &ClientMetrics{ + clientStartedCounter: prometheus.NewCounterVec( + config.counterOpts.apply(prometheus.CounterOpts{ + Name: "grpc_client_started_total", + Help: "Total number of RPCs started on the client.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + + clientHandledCounter: prometheus.NewCounterVec( + config.counterOpts.apply(prometheus.CounterOpts{ + Name: "grpc_client_handled_total", + Help: "Total number of RPCs completed by the client, regardless of success or failure.", + }), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}), + + clientStreamMsgReceived: prometheus.NewCounterVec( + config.counterOpts.apply(prometheus.CounterOpts{ + Name: "grpc_client_msg_received_total", + Help: "Total number of RPC stream messages received by the client.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + + clientStreamMsgSent: prometheus.NewCounterVec( + config.counterOpts.apply(prometheus.CounterOpts{ + Name: "grpc_client_msg_sent_total", + Help: "Total number of gRPC stream messages sent by the client.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + + clientHandledHistogram: config.clientHandledHistogram, + clientStreamRecvHistogram: config.clientStreamRecvHistogram, + clientStreamSendHistogram: config.clientStreamSendHistogram, + } +} + +// Describe sends the super-set of all possible descriptors of metrics +// collected by this Collector to the provided channel and returns once +// the last descriptor has been sent. +func (m *ClientMetrics) Describe(ch chan<- *prometheus.Desc) { + m.clientStartedCounter.Describe(ch) + m.clientHandledCounter.Describe(ch) + m.clientStreamMsgReceived.Describe(ch) + m.clientStreamMsgSent.Describe(ch) + if m.clientHandledHistogram != nil { + m.clientHandledHistogram.Describe(ch) + } + if m.clientStreamRecvHistogram != nil { + m.clientStreamRecvHistogram.Describe(ch) + } + if m.clientStreamSendHistogram != nil { + m.clientStreamSendHistogram.Describe(ch) + } +} + +// Collect is called by the Prometheus registry when collecting +// metrics. The implementation sends each collected metric via the +// provided channel and returns once the last metric has been sent. +func (m *ClientMetrics) Collect(ch chan<- prometheus.Metric) { + m.clientStartedCounter.Collect(ch) + m.clientHandledCounter.Collect(ch) + m.clientStreamMsgReceived.Collect(ch) + m.clientStreamMsgSent.Collect(ch) + if m.clientHandledHistogram != nil { + m.clientHandledHistogram.Collect(ch) + } + if m.clientStreamRecvHistogram != nil { + m.clientStreamRecvHistogram.Collect(ch) + } + if m.clientStreamSendHistogram != nil { + m.clientStreamSendHistogram.Collect(ch) + } +} + +// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs. +func (m *ClientMetrics) UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { + return interceptors.UnaryClientInterceptor(&reportable{ + opts: opts, + clientMetrics: m, + }) +} + +// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs. +func (m *ClientMetrics) StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { + return interceptors.StreamClientInterceptor(&reportable{ + opts: opts, + clientMetrics: m, + }) +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/client_options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/client_options.go new file mode 100644 index 00000000000..c2671679c60 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/client_options.go @@ -0,0 +1,77 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package prometheus + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +type clientMetricsConfig struct { + counterOpts counterOptions + // clientHandledHistogram can be nil. + clientHandledHistogram *prometheus.HistogramVec + // clientStreamRecvHistogram can be nil. + clientStreamRecvHistogram *prometheus.HistogramVec + // clientStreamSendHistogram can be nil. + clientStreamSendHistogram *prometheus.HistogramVec +} + +type ClientMetricsOption func(*clientMetricsConfig) + +func (c *clientMetricsConfig) apply(opts []ClientMetricsOption) { + for _, o := range opts { + o(c) + } +} + +func WithClientCounterOptions(opts ...CounterOption) ClientMetricsOption { + return func(o *clientMetricsConfig) { + o.counterOpts = opts + } +} + +// WithClientHandlingTimeHistogram turns on recording of handling time of RPCs. +// Histogram metrics can be very expensive for Prometheus to retain and query. +func WithClientHandlingTimeHistogram(opts ...HistogramOption) ClientMetricsOption { + return func(o *clientMetricsConfig) { + o.clientHandledHistogram = prometheus.NewHistogramVec( + histogramOptions(opts).apply(prometheus.HistogramOpts{ + Name: "grpc_client_handling_seconds", + Help: "Histogram of response latency (seconds) of the gRPC until it is finished by the application.", + Buckets: prometheus.DefBuckets, + }), + []string{"grpc_type", "grpc_service", "grpc_method"}, + ) + } +} + +// WithClientStreamRecvHistogram turns on recording of single message receive time of streaming RPCs. +// Histogram metrics can be very expensive for Prometheus to retain and query. +func WithClientStreamRecvHistogram(opts ...HistogramOption) ClientMetricsOption { + return func(o *clientMetricsConfig) { + o.clientStreamRecvHistogram = prometheus.NewHistogramVec( + histogramOptions(opts).apply(prometheus.HistogramOpts{ + Name: "grpc_client_msg_recv_handling_seconds", + Help: "Histogram of response latency (seconds) of the gRPC single message receive.", + Buckets: prometheus.DefBuckets, + }), + []string{"grpc_type", "grpc_service", "grpc_method"}, + ) + } +} + +// WithClientStreamSendHistogram turns on recording of single message send time of streaming RPCs. +// Histogram metrics can be very expensive for Prometheus to retain and query. +func WithClientStreamSendHistogram(opts ...HistogramOption) ClientMetricsOption { + return func(o *clientMetricsConfig) { + o.clientStreamSendHistogram = prometheus.NewHistogramVec( + histogramOptions(opts).apply(prometheus.HistogramOpts{ + Name: "grpc_client_msg_send_handling_seconds", + Help: "Histogram of response latency (seconds) of the gRPC single message send.", + Buckets: prometheus.DefBuckets, + }), + []string{"grpc_type", "grpc_service", "grpc_method"}, + ) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/constants.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/constants.go new file mode 100644 index 00000000000..5c36923f7ea --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/constants.go @@ -0,0 +1,23 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package prometheus + +type grpcType string + +// grpcType describes all types of grpc connection. +const ( + Unary grpcType = "unary" + ClientStream grpcType = "client_stream" + ServerStream grpcType = "server_stream" + BidiStream grpcType = "bidi_stream" +) + +// Kind describes whether interceptor is a client or server type. +type Kind string + +// Enum for Client and Server Kind. +const ( + KindClient Kind = "client" + KindServer Kind = "server" +) diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/doc.go new file mode 100644 index 00000000000..b62f17efb7d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/doc.go @@ -0,0 +1,8 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +/* +Package prometheus provides a standalone interceptor for metrics. It's next iteration of deprecated https://github.com/grpc-ecosystem/go-grpc-prometheus. +See https://github.com/grpc-ecosystem/go-grpc-middleware/tree/main/examples for example. +*/ +package prometheus diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/options.go new file mode 100644 index 00000000000..bdd171e295f --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/options.go @@ -0,0 +1,129 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package prometheus + +import ( + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" + "google.golang.org/grpc/status" +) + +// FromError returns a grpc status. If the error code is neither a valid grpc status nor a context error, codes.Unknown +// will be set. +func FromError(err error) *status.Status { + s, ok := status.FromError(err) + // Mirror what the grpc server itself does, i.e. also convert context errors to status + if !ok { + s = status.FromContextError(err) + } + return s +} + +// A CounterOption lets you add options to Counter metrics using With* funcs. +type CounterOption func(*prometheus.CounterOpts) + +type counterOptions []CounterOption + +func (co counterOptions) apply(o prometheus.CounterOpts) prometheus.CounterOpts { + for _, f := range co { + f(&o) + } + return o +} + +// WithConstLabels allows you to add ConstLabels to Counter metrics. +func WithConstLabels(labels prometheus.Labels) CounterOption { + return func(o *prometheus.CounterOpts) { + o.ConstLabels = labels + } +} + +// WithSubsystem allows you to add a Subsystem to Counter metrics. +func WithSubsystem(subsystem string) CounterOption { + return func(o *prometheus.CounterOpts) { + o.Subsystem = subsystem + } +} + +// A HistogramOption lets you add options to Histogram metrics using With* +// funcs. +type HistogramOption func(*prometheus.HistogramOpts) + +type histogramOptions []HistogramOption + +func (ho histogramOptions) apply(o prometheus.HistogramOpts) prometheus.HistogramOpts { + for _, f := range ho { + f(&o) + } + return o +} + +// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on. +func WithHistogramBuckets(buckets []float64) HistogramOption { + return func(o *prometheus.HistogramOpts) { o.Buckets = buckets } +} + +// WithHistogramOpts allows you to specify HistogramOpts but makes sure the correct name and label is used. +// This function is helpful when specifying more than just the buckets, like using NativeHistograms. +func WithHistogramOpts(opts *prometheus.HistogramOpts) HistogramOption { + // TODO: This isn't ideal either if new fields are added to prometheus.HistogramOpts. + // Maybe we can change the interface to accept abitrary HistogramOpts and + // only make sure to overwrite the necessary fields (name, labels). + return func(o *prometheus.HistogramOpts) { + o.Buckets = opts.Buckets + o.NativeHistogramBucketFactor = opts.NativeHistogramBucketFactor + o.NativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold + o.NativeHistogramMaxBucketNumber = opts.NativeHistogramMaxBucketNumber + o.NativeHistogramMinResetDuration = opts.NativeHistogramMinResetDuration + o.NativeHistogramMaxZeroThreshold = opts.NativeHistogramMaxZeroThreshold + } +} + +// WithHistogramConstLabels allows you to add custom ConstLabels to +// histograms metrics. +func WithHistogramConstLabels(labels prometheus.Labels) HistogramOption { + return func(o *prometheus.HistogramOpts) { + o.ConstLabels = labels + } +} + +// WithHistogramSubsystem allows you to add a Subsystem to histograms metrics. +func WithHistogramSubsystem(subsystem string) HistogramOption { + return func(o *prometheus.HistogramOpts) { + o.Subsystem = subsystem + } +} + +func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType { + if !mInfo.IsClientStream && !mInfo.IsServerStream { + return Unary + } + if mInfo.IsClientStream && !mInfo.IsServerStream { + return ClientStream + } + if !mInfo.IsClientStream && mInfo.IsServerStream { + return ServerStream + } + return BidiStream +} + +// An Option lets you add options to prometheus interceptors using With* funcs. +type Option func(*config) + +type config struct { + exemplarFn exemplarFromCtxFn +} + +func (c *config) apply(opts []Option) { + for _, o := range opts { + o(c) + } +} + +// WithExemplarFromContext sets function that will be used to deduce exemplar for all counter and histogram metrics. +func WithExemplarFromContext(exemplarFn exemplarFromCtxFn) Option { + return func(o *config) { + o.exemplarFn = exemplarFn + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/reporter.go new file mode 100644 index 00000000000..96c49ad93ac --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/reporter.go @@ -0,0 +1,113 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package prometheus + +import ( + "context" + "time" + + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors" + "github.com/prometheus/client_golang/prometheus" +) + +type reporter struct { + clientMetrics *ClientMetrics + serverMetrics *ServerMetrics + typ interceptors.GRPCType + service, method string + kind Kind + exemplar prometheus.Labels +} + +func (r *reporter) PostCall(err error, rpcDuration time.Duration) { + // get status code from error + status := FromError(err) + code := status.Code() + + // perform handling of metrics from code + switch r.kind { + case KindServer: + r.incrementWithExemplar(r.serverMetrics.serverHandledCounter, string(r.typ), r.service, r.method, code.String()) + if r.serverMetrics.serverHandledHistogram != nil { + r.observeWithExemplar(r.serverMetrics.serverHandledHistogram, rpcDuration.Seconds(), string(r.typ), r.service, r.method) + } + + case KindClient: + r.incrementWithExemplar(r.clientMetrics.clientHandledCounter, string(r.typ), r.service, r.method, code.String()) + if r.clientMetrics.clientHandledHistogram != nil { + r.observeWithExemplar(r.clientMetrics.clientHandledHistogram, rpcDuration.Seconds(), string(r.typ), r.service, r.method) + } + } +} + +func (r *reporter) PostMsgSend(_ any, _ error, sendDuration time.Duration) { + switch r.kind { + case KindServer: + r.incrementWithExemplar(r.serverMetrics.serverStreamMsgSent, string(r.typ), r.service, r.method) + case KindClient: + r.incrementWithExemplar(r.clientMetrics.clientStreamMsgSent, string(r.typ), r.service, r.method) + if r.clientMetrics.clientStreamSendHistogram != nil { + r.observeWithExemplar(r.clientMetrics.clientStreamSendHistogram, sendDuration.Seconds(), string(r.typ), r.service, r.method) + } + } +} + +func (r *reporter) PostMsgReceive(_ any, _ error, recvDuration time.Duration) { + switch r.kind { + case KindServer: + r.incrementWithExemplar(r.serverMetrics.serverStreamMsgReceived, string(r.typ), r.service, r.method) + case KindClient: + r.incrementWithExemplar(r.clientMetrics.clientStreamMsgReceived, string(r.typ), r.service, r.method) + if r.clientMetrics.clientStreamRecvHistogram != nil { + r.observeWithExemplar(r.clientMetrics.clientStreamRecvHistogram, recvDuration.Seconds(), string(r.typ), r.service, r.method) + } + } +} + +type reportable struct { + clientMetrics *ClientMetrics + serverMetrics *ServerMetrics + + opts []Option +} + +func (rep *reportable) ServerReporter(ctx context.Context, meta interceptors.CallMeta) (interceptors.Reporter, context.Context) { + return rep.reporter(ctx, rep.serverMetrics, nil, meta, KindServer) +} + +func (rep *reportable) ClientReporter(ctx context.Context, meta interceptors.CallMeta) (interceptors.Reporter, context.Context) { + return rep.reporter(ctx, nil, rep.clientMetrics, meta, KindClient) +} + +func (rep *reportable) reporter(ctx context.Context, sm *ServerMetrics, cm *ClientMetrics, meta interceptors.CallMeta, kind Kind) (interceptors.Reporter, context.Context) { + var c config + c.apply(rep.opts) + r := &reporter{ + clientMetrics: cm, + serverMetrics: sm, + typ: meta.Typ, + service: meta.Service, + method: meta.Method, + kind: kind, + } + if c.exemplarFn != nil { + r.exemplar = c.exemplarFn(ctx) + } + + switch kind { + case KindClient: + r.incrementWithExemplar(r.clientMetrics.clientStartedCounter, string(r.typ), r.service, r.method) + case KindServer: + r.incrementWithExemplar(r.serverMetrics.serverStartedCounter, string(r.typ), r.service, r.method) + } + return r, ctx +} + +func (r *reporter) incrementWithExemplar(c *prometheus.CounterVec, lvals ...string) { + c.WithLabelValues(lvals...).(prometheus.ExemplarAdder).AddWithExemplar(1, r.exemplar) +} + +func (r *reporter) observeWithExemplar(h *prometheus.HistogramVec, value float64, lvals ...string) { + h.WithLabelValues(lvals...).(prometheus.ExemplarObserver).ObserveWithExemplar(value, r.exemplar) +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/server_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/server_metrics.go new file mode 100644 index 00000000000..9b2f3e6d8dd --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/server_metrics.go @@ -0,0 +1,123 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package prometheus + +import ( + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors" + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" +) + +// ServerMetrics represents a collection of metrics to be registered on a +// Prometheus metrics registry for a gRPC server. +type ServerMetrics struct { + serverStartedCounter *prometheus.CounterVec + serverHandledCounter *prometheus.CounterVec + serverStreamMsgReceived *prometheus.CounterVec + serverStreamMsgSent *prometheus.CounterVec + // serverHandledHistogram can be nil. + serverHandledHistogram *prometheus.HistogramVec +} + +// NewServerMetrics returns a new ServerMetrics object that has server interceptor methods. +// NOTE: Remember to register ServerMetrics object by using prometheus registry +// e.g. prometheus.MustRegister(myServerMetrics). +func NewServerMetrics(opts ...ServerMetricsOption) *ServerMetrics { + var config serverMetricsConfig + config.apply(opts) + return &ServerMetrics{ + serverStartedCounter: prometheus.NewCounterVec( + config.counterOpts.apply(prometheus.CounterOpts{ + Name: "grpc_server_started_total", + Help: "Total number of RPCs started on the server.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + serverHandledCounter: prometheus.NewCounterVec( + config.counterOpts.apply(prometheus.CounterOpts{ + Name: "grpc_server_handled_total", + Help: "Total number of RPCs completed on the server, regardless of success or failure.", + }), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}), + serverStreamMsgReceived: prometheus.NewCounterVec( + config.counterOpts.apply(prometheus.CounterOpts{ + Name: "grpc_server_msg_received_total", + Help: "Total number of RPC stream messages received on the server.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + serverStreamMsgSent: prometheus.NewCounterVec( + config.counterOpts.apply(prometheus.CounterOpts{ + Name: "grpc_server_msg_sent_total", + Help: "Total number of gRPC stream messages sent by the server.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + serverHandledHistogram: config.serverHandledHistogram, + } +} + +// Describe sends the super-set of all possible descriptors of metrics +// collected by this Collector to the provided channel and returns once +// the last descriptor has been sent. +func (m *ServerMetrics) Describe(ch chan<- *prometheus.Desc) { + m.serverStartedCounter.Describe(ch) + m.serverHandledCounter.Describe(ch) + m.serverStreamMsgReceived.Describe(ch) + m.serverStreamMsgSent.Describe(ch) + if m.serverHandledHistogram != nil { + m.serverHandledHistogram.Describe(ch) + } +} + +// Collect is called by the Prometheus registry when collecting +// metrics. The implementation sends each collected metric via the +// provided channel and returns once the last metric has been sent. +func (m *ServerMetrics) Collect(ch chan<- prometheus.Metric) { + m.serverStartedCounter.Collect(ch) + m.serverHandledCounter.Collect(ch) + m.serverStreamMsgReceived.Collect(ch) + m.serverStreamMsgSent.Collect(ch) + if m.serverHandledHistogram != nil { + m.serverHandledHistogram.Collect(ch) + } +} + +// InitializeMetrics initializes all metrics, with their appropriate null +// value, for all gRPC methods registered on a gRPC server. This is useful, to +// ensure that all metrics exist when collecting and querying. +// NOTE: This might add significant cardinality and might not be needed in future version of Prometheus (created timestamp). +func (m *ServerMetrics) InitializeMetrics(server *grpc.Server) { + serviceInfo := server.GetServiceInfo() + for serviceName, info := range serviceInfo { + for _, mInfo := range info.Methods { + m.preRegisterMethod(serviceName, &mInfo) + } + } +} + +// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated. +func (m *ServerMetrics) preRegisterMethod(serviceName string, mInfo *grpc.MethodInfo) { + methodName := mInfo.Name + methodType := string(typeFromMethodInfo(mInfo)) + // These are just references (no increments), as just referencing will create the labels but not set values. + _, _ = m.serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName) + _, _ = m.serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName) + _, _ = m.serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName) + if m.serverHandledHistogram != nil { + _, _ = m.serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName) + } + for _, code := range interceptors.AllCodes { + _, _ = m.serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String()) + } +} + +// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs. +func (m *ServerMetrics) UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { + return interceptors.UnaryServerInterceptor(&reportable{ + opts: opts, + serverMetrics: m, + }) +} + +// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs. +func (m *ServerMetrics) StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { + return interceptors.StreamServerInterceptor(&reportable{ + opts: opts, + serverMetrics: m, + }) +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/server_options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/server_options.go new file mode 100644 index 00000000000..39d422042cb --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/server_options.go @@ -0,0 +1,48 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package prometheus + +import ( + "context" + + "github.com/prometheus/client_golang/prometheus" +) + +type exemplarFromCtxFn func(ctx context.Context) prometheus.Labels + +type serverMetricsConfig struct { + counterOpts counterOptions + // serverHandledHistogram can be nil. + serverHandledHistogram *prometheus.HistogramVec +} + +type ServerMetricsOption func(*serverMetricsConfig) + +func (c *serverMetricsConfig) apply(opts []ServerMetricsOption) { + for _, o := range opts { + o(c) + } +} + +// WithServerCounterOptions sets counter options. +func WithServerCounterOptions(opts ...CounterOption) ServerMetricsOption { + return func(o *serverMetricsConfig) { + o.counterOpts = opts + } +} + +// WithServerHandlingTimeHistogram turns on recording of handling time of RPCs. +// Histogram metrics can be very expensive for Prometheus to retain and query. +func WithServerHandlingTimeHistogram(opts ...HistogramOption) ServerMetricsOption { + return func(o *serverMetricsConfig) { + o.serverHandledHistogram = prometheus.NewHistogramVec( + histogramOptions(opts).apply(prometheus.HistogramOpts{ + Name: "grpc_server_handling_seconds", + Help: "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.", + Buckets: prometheus.DefBuckets, + }), + []string{"grpc_type", "grpc_service", "grpc_method"}, + ) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT new file mode 100644 index 00000000000..3b13627cdbb --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT @@ -0,0 +1,2 @@ +Copyright (c) The go-grpc-middleware Authors. +Licensed under the Apache License 2.0. diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE new file mode 100644 index 00000000000..b2b065037fc --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/client.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/client.go new file mode 100644 index 00000000000..86c51a07924 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/client.go @@ -0,0 +1,83 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +// Go gRPC Middleware monitoring interceptors for client-side gRPC. + +package interceptors + +import ( + "context" + "io" + "time" + + "google.golang.org/grpc" +) + +// UnaryClientInterceptor is a gRPC client-side interceptor that provides reporting for Unary RPCs. +func UnaryClientInterceptor(reportable ClientReportable) grpc.UnaryClientInterceptor { + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + r := newReport(Unary, method) + reporter, newCtx := reportable.ClientReporter(ctx, CallMeta{ReqProtoOrNil: req, Typ: r.rpcType, Service: r.service, Method: r.method}) + + reporter.PostMsgSend(req, nil, time.Since(r.startTime)) + err := invoker(newCtx, method, req, reply, cc, opts...) + reporter.PostMsgReceive(reply, err, time.Since(r.startTime)) + reporter.PostCall(err, time.Since(r.startTime)) + return err + } +} + +// StreamClientInterceptor is a gRPC client-side interceptor that provides reporting for Stream RPCs. +func StreamClientInterceptor(reportable ClientReportable) grpc.StreamClientInterceptor { + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + r := newReport(clientStreamType(desc), method) + reporter, newCtx := reportable.ClientReporter(ctx, CallMeta{ReqProtoOrNil: nil, Typ: r.rpcType, Service: r.service, Method: r.method}) + + clientStream, err := streamer(newCtx, desc, cc, method, opts...) + if err != nil { + reporter.PostCall(err, time.Since(r.startTime)) + return nil, err + } + return &monitoredClientStream{ClientStream: clientStream, startTime: r.startTime, reporter: reporter}, nil + } +} + +func clientStreamType(desc *grpc.StreamDesc) GRPCType { + if desc.ClientStreams && !desc.ServerStreams { + return ClientStream + } else if !desc.ClientStreams && desc.ServerStreams { + return ServerStream + } + return BidiStream +} + +// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to report. +type monitoredClientStream struct { + grpc.ClientStream + + startTime time.Time + reporter Reporter +} + +func (s *monitoredClientStream) SendMsg(m interface{}) error { + start := time.Now() + err := s.ClientStream.SendMsg(m) + s.reporter.PostMsgSend(m, err, time.Since(start)) + return err +} + +func (s *monitoredClientStream) RecvMsg(m interface{}) error { + start := time.Now() + err := s.ClientStream.RecvMsg(m) + s.reporter.PostMsgReceive(m, err, time.Since(start)) + + if err == nil { + return nil + } + var postErr error + if err != io.EOF { + postErr = err + } + s.reporter.PostCall(postErr, time.Since(s.startTime)) + return err +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/doc.go new file mode 100644 index 00000000000..2608b9a4f8e --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/doc.go @@ -0,0 +1,12 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +// +/* +interceptor is an internal package used by higher level middlewares. It allows injecting custom code in various +places of the gRPC lifecycle. + +This particular package is intended for use by other middleware, metric, logging or otherwise. +This allows code to be shared between different implementations. +*/ +package interceptors diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/reporter.go new file mode 100644 index 00000000000..cc3b4f136d4 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/reporter.go @@ -0,0 +1,116 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package interceptors + +import ( + "context" + "fmt" + "strings" + "time" + + "google.golang.org/grpc/codes" +) + +type GRPCType string + +// Timer is a helper interface to time functions. +// Useful for interceptors to record the total +// time elapsed since completion of a call. +type Timer interface { + ObserveDuration() time.Duration +} + +// zeroTimer. +type zeroTimer struct { +} + +func (zeroTimer) ObserveDuration() time.Duration { + return 0 +} + +var EmptyTimer = &zeroTimer{} + +const ( + Unary GRPCType = "unary" + ClientStream GRPCType = "client_stream" + ServerStream GRPCType = "server_stream" + BidiStream GRPCType = "bidi_stream" +) + +var ( + AllCodes = []codes.Code{ + codes.OK, codes.Canceled, codes.Unknown, codes.InvalidArgument, codes.DeadlineExceeded, codes.NotFound, + codes.AlreadyExists, codes.PermissionDenied, codes.Unauthenticated, codes.ResourceExhausted, + codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.Unimplemented, codes.Internal, + codes.Unavailable, codes.DataLoss, + } +) + +func SplitMethodName(fullMethod string) (string, string) { + fullMethod = strings.TrimPrefix(fullMethod, "/") // remove leading slash + if i := strings.Index(fullMethod, "/"); i >= 0 { + return fullMethod[:i], fullMethod[i+1:] + } + return "unknown", "unknown" +} + +type CallMeta struct { + ReqProtoOrNil interface{} + Typ GRPCType + Service string + Method string +} + +func (c CallMeta) FullMethod() string { + return fmt.Sprintf("/%s/%s", c.Service, c.Method) +} + +type ClientReportable interface { + ClientReporter(context.Context, CallMeta) (Reporter, context.Context) +} + +type ServerReportable interface { + ServerReporter(context.Context, CallMeta) (Reporter, context.Context) +} + +// CommonReportableFunc helper allows an easy way to implement reporter with common client and server logic. +type CommonReportableFunc func(ctx context.Context, c CallMeta, isClient bool) (Reporter, context.Context) + +func (f CommonReportableFunc) ClientReporter(ctx context.Context, c CallMeta) (Reporter, context.Context) { + return f(ctx, c, true) +} + +func (f CommonReportableFunc) ServerReporter(ctx context.Context, c CallMeta) (Reporter, context.Context) { + return f(ctx, c, false) +} + +type Reporter interface { + PostCall(err error, rpcDuration time.Duration) + PostMsgSend(reqProto interface{}, err error, sendDuration time.Duration) + PostMsgReceive(replyProto interface{}, err error, recvDuration time.Duration) +} + +var _ Reporter = NoopReporter{} + +type NoopReporter struct{} + +func (NoopReporter) PostCall(error, time.Duration) {} +func (NoopReporter) PostMsgSend(interface{}, error, time.Duration) {} +func (NoopReporter) PostMsgReceive(interface{}, error, time.Duration) {} + +type report struct { + rpcType GRPCType + service string + method string + startTime time.Time +} + +func newReport(typ GRPCType, fullMethod string) report { + r := report{ + startTime: time.Now(), + rpcType: typ, + } + r.service, r.method = SplitMethodName(fullMethod) + return r +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/server.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/server.go new file mode 100644 index 00000000000..1fcb8e4e97b --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/server.go @@ -0,0 +1,74 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +// Go gRPC Middleware monitoring interceptors for server-side gRPC. + +package interceptors + +import ( + "context" + "time" + + "google.golang.org/grpc" +) + +// UnaryServerInterceptor is a gRPC server-side interceptor that provides reporting for Unary RPCs. +func UnaryServerInterceptor(reportable ServerReportable) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + r := newReport(Unary, info.FullMethod) + reporter, newCtx := reportable.ServerReporter(ctx, CallMeta{ReqProtoOrNil: req, Typ: r.rpcType, Service: r.service, Method: r.method}) + + reporter.PostMsgReceive(req, nil, time.Since(r.startTime)) + resp, err := handler(newCtx, req) + reporter.PostMsgSend(resp, err, time.Since(r.startTime)) + + reporter.PostCall(err, time.Since(r.startTime)) + return resp, err + } +} + +// StreamServerInterceptor is a gRPC server-side interceptor that provides reporting for Streaming RPCs. +func StreamServerInterceptor(reportable ServerReportable) grpc.StreamServerInterceptor { + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + r := newReport(ServerStream, info.FullMethod) + reporter, newCtx := reportable.ServerReporter(ss.Context(), CallMeta{ReqProtoOrNil: nil, Typ: StreamRPCType(info), Service: r.service, Method: r.method}) + err := handler(srv, &monitoredServerStream{ServerStream: ss, newCtx: newCtx, reporter: reporter}) + reporter.PostCall(err, time.Since(r.startTime)) + return err + } +} + +func StreamRPCType(info *grpc.StreamServerInfo) GRPCType { + if info.IsClientStream && !info.IsServerStream { + return ClientStream + } else if !info.IsClientStream && info.IsServerStream { + return ServerStream + } + return BidiStream +} + +// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to report. +type monitoredServerStream struct { + grpc.ServerStream + + newCtx context.Context + reporter Reporter +} + +func (s *monitoredServerStream) Context() context.Context { + return s.newCtx +} + +func (s *monitoredServerStream) SendMsg(m interface{}) error { + start := time.Now() + err := s.ServerStream.SendMsg(m) + s.reporter.PostMsgSend(m, err, time.Since(start)) + return err +} + +func (s *monitoredServerStream) RecvMsg(m interface{}) error { + start := time.Now() + err := s.ServerStream.RecvMsg(m) + s.reporter.PostMsgReceive(m, err, time.Since(start)) + return err +} diff --git a/vendor/modules.txt b/vendor/modules.txt index a799febaa81..cf666a56679 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -780,6 +780,12 @@ github.com/gophercloud/gophercloud/pagination # github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 ## explicit; go 1.14 github.com/grpc-ecosystem/go-grpc-middleware +# github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 +## explicit; go 1.19 +github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus +# github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3 +## explicit; go 1.14 +github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 ## explicit github.com/grpc-ecosystem/go-grpc-prometheus From 241c2e027738c41733dd21fa871a00ceae868fb8 Mon Sep 17 00:00:00 2001 From: Jirka Kremser <535866+jkremser@users.noreply.github.com> Date: Wed, 10 Apr 2024 19:03:42 +0200 Subject: [PATCH 04/10] Support csv-format for WATCH_NAMESPACE env var (#5631) Signed-off-by: Jirka Kremser Signed-off-by: Jirka Kremser <535866+jkremser@users.noreply.github.com> --- CHANGELOG.md | 1 + .../eventing/cloudeventsource_controller.go | 2 ++ controllers/keda/scaledjob_controller.go | 2 ++ controllers/keda/scaledobject_controller.go | 2 ++ .../keda/triggerauthentication_controller.go | 2 ++ pkg/metricscollector/opentelemetry.go | 2 +- pkg/scalers/aws/aws_common.go | 2 +- pkg/scalers/aws/aws_sigv4.go | 2 +- pkg/util/watch.go | 29 ++++++++++++++++--- 9 files changed, 37 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bcfe110db29..706593e1a5d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -70,6 +70,7 @@ Here is an overview of all new **experimental** features: - **General**: Add GRPC Healthchecks ([#5590](https://github.com/kedacore/keda/issues/5590)) - **General**: Add OPENTELEMETRY flag in e2e test YAML ([#5375](https://github.com/kedacore/keda/issues/5375)) - **General**: Add support for cross tenant/cloud authentication when using Azure Workload Identity for TriggerAuthentication ([#5441](https://github.com/kedacore/keda/issues/5441)) +- **General**: Support csv-format for WATCH_NAMESPACE env var ([#5670](https://github.com/kedacore/keda/issues/5670)) - **Azure Event Hub Scaler**: Remove usage of checkpoint offsets to account for SDK checkpointing implementation changes ([#5574](https://github.com/kedacore/keda/issues/5574)) - **GCP Stackdriver Scaler**: Add missing parameters 'rate' and 'count' for GCP Stackdriver Scaler alignment ([#5633](https://github.com/kedacore/keda/issues/5633)) - **Metrics API Scaler**: Add support for various formats: json, xml, yaml, prometheus ([#2633](https://github.com/kedacore/keda/issues/2633)) diff --git a/controllers/eventing/cloudeventsource_controller.go b/controllers/eventing/cloudeventsource_controller.go index e378cc99f26..62c972076fd 100644 --- a/controllers/eventing/cloudeventsource_controller.go +++ b/controllers/eventing/cloudeventsource_controller.go @@ -33,6 +33,7 @@ import ( "github.com/kedacore/keda/v2/pkg/eventemitter" "github.com/kedacore/keda/v2/pkg/metricscollector" kedastatus "github.com/kedacore/keda/v2/pkg/status" + "github.com/kedacore/keda/v2/pkg/util" ) // CloudEventSourceReconciler reconciles a EventSource object @@ -115,6 +116,7 @@ func (r *CloudEventSourceReconciler) Reconcile(ctx context.Context, req ctrl.Req func (r *CloudEventSourceReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&eventingv1alpha1.CloudEventSource{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + WithEventFilter(util.IgnoreOtherNamespaces()). Complete(r) } diff --git a/controllers/keda/scaledjob_controller.go b/controllers/keda/scaledjob_controller.go index f381983e720..98c1ce87cc8 100755 --- a/controllers/keda/scaledjob_controller.go +++ b/controllers/keda/scaledjob_controller.go @@ -45,6 +45,7 @@ import ( "github.com/kedacore/keda/v2/pkg/metricscollector" "github.com/kedacore/keda/v2/pkg/scaling" kedastatus "github.com/kedacore/keda/v2/pkg/status" + "github.com/kedacore/keda/v2/pkg/util" ) // +kubebuilder:rbac:groups=keda.sh,resources=scaledjobs;scaledjobs/finalizers;scaledjobs/status,verbs="*" @@ -91,6 +92,7 @@ func (r *ScaledJobReconciler) SetupWithManager(mgr ctrl.Manager, options control kedacontrollerutil.PausedPredicate{}, predicate.GenerationChangedPredicate{}, ))). + WithEventFilter(util.IgnoreOtherNamespaces()). Complete(r) } diff --git a/controllers/keda/scaledobject_controller.go b/controllers/keda/scaledobject_controller.go index dec4c5f0c23..f634738d3c3 100755 --- a/controllers/keda/scaledobject_controller.go +++ b/controllers/keda/scaledobject_controller.go @@ -50,6 +50,7 @@ import ( "github.com/kedacore/keda/v2/pkg/metricscollector" "github.com/kedacore/keda/v2/pkg/scaling" kedastatus "github.com/kedacore/keda/v2/pkg/status" + "github.com/kedacore/keda/v2/pkg/util" ) // +kubebuilder:rbac:groups=keda.sh,resources=scaledobjects;scaledobjects/finalizers;scaledobjects/status,verbs="*" @@ -134,6 +135,7 @@ func (r *ScaledObjectReconciler) SetupWithManager(mgr ctrl.Manager, options cont predicate.GenerationChangedPredicate{}, ), )). + WithEventFilter(util.IgnoreOtherNamespaces()). // Trigger a reconcile only when the HPA spec,label or annotation changes. // Ignore updates to HPA status Owns(&autoscalingv2.HorizontalPodAutoscaler{}, builder.WithPredicates( diff --git a/controllers/keda/triggerauthentication_controller.go b/controllers/keda/triggerauthentication_controller.go index 8221d7d2ea3..edca7ea8efc 100755 --- a/controllers/keda/triggerauthentication_controller.go +++ b/controllers/keda/triggerauthentication_controller.go @@ -32,6 +32,7 @@ import ( kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/pkg/eventreason" "github.com/kedacore/keda/v2/pkg/metricscollector" + "github.com/kedacore/keda/v2/pkg/util" ) // TriggerAuthenticationReconciler reconciles a TriggerAuthentication object @@ -90,6 +91,7 @@ func (r *TriggerAuthenticationReconciler) Reconcile(ctx context.Context, req ctr func (r *TriggerAuthenticationReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&kedav1alpha1.TriggerAuthentication{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + WithEventFilter(util.IgnoreOtherNamespaces()). Complete(r) } diff --git a/pkg/metricscollector/opentelemetry.go b/pkg/metricscollector/opentelemetry.go index 97a811ef9ee..f66a0d55038 100644 --- a/pkg/metricscollector/opentelemetry.go +++ b/pkg/metricscollector/opentelemetry.go @@ -268,7 +268,7 @@ func (o *OtelMetrics) RecordScaledObjectPaused(namespace string, scaledObject st attribute.Key("scaledObject").String(scaledObject), ) - cback := func(ctx context.Context, obsrv api.Float64Observer) error { + cback := func(_ context.Context, obsrv api.Float64Observer) error { obsrv.Observe(float64(activeVal), opt) return nil } diff --git a/pkg/scalers/aws/aws_common.go b/pkg/scalers/aws/aws_common.go index b581a4f8d30..38a460b5a3e 100644 --- a/pkg/scalers/aws/aws_common.go +++ b/pkg/scalers/aws/aws_common.go @@ -75,7 +75,7 @@ func GetAwsConfig(ctx context.Context, awsRegion string, awsAuthorization Author if metadata.awsAuthorization.AwsRoleArn != "" { stsSvc := sts.NewFromConfig(cfg) - stsCredentialProvider := stscreds.NewAssumeRoleProvider(stsSvc, metadata.awsAuthorization.AwsRoleArn, func(options *stscreds.AssumeRoleOptions) {}) + stsCredentialProvider := stscreds.NewAssumeRoleProvider(stsSvc, metadata.awsAuthorization.AwsRoleArn, func(_ *stscreds.AssumeRoleOptions) {}) cfg.Credentials = aws.NewCredentialsCache(stsCredentialProvider) } return &cfg, err diff --git a/pkg/scalers/aws/aws_sigv4.go b/pkg/scalers/aws/aws_sigv4.go index a417fa8f2e3..2abde772f1c 100644 --- a/pkg/scalers/aws/aws_sigv4.go +++ b/pkg/scalers/aws/aws_sigv4.go @@ -109,7 +109,7 @@ func NewSigV4RoundTripper(config *scalersconfig.ScalerConfig) (http.RoundTripper return nil, err } - client := amp.NewFromConfig(*awsCfg, func(o *amp.Options) {}) + client := amp.NewFromConfig(*awsCfg, func(_ *amp.Options) {}) rt := &roundTripper{ client: client, } diff --git a/pkg/util/watch.go b/pkg/util/watch.go index 29261d638e9..81baa49f28d 100644 --- a/pkg/util/watch.go +++ b/pkg/util/watch.go @@ -3,8 +3,11 @@ package util import ( "fmt" "os" + "strings" "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" ) // GetWatchNamespaces returns the namespaces the operator should be watching for changes @@ -15,11 +18,29 @@ func GetWatchNamespaces() (map[string]cache.Config, error) { return map[string]cache.Config{}, fmt.Errorf("%s must be set", WatchNamespaceEnvVar) } - if ns == "" { + if ns == "" || ns == "\"\"" { return map[string]cache.Config{}, nil } + nss := strings.Split(ns, ",") + nssMap := make(map[string]cache.Config) + for _, n := range nss { + nssMap[n] = cache.Config{} + } + + return nssMap, nil +} - return map[string]cache.Config{ - ns: {}, - }, nil +// IgnoreOtherNamespaces returns the predicate for watched events that will filter out those that are not coming +// from a watched namespace (empty namespace or unset env var denotes all) +func IgnoreOtherNamespaces() predicate.Predicate { + nss, e := GetWatchNamespaces() + if len(nss) == 0 || e != nil { + return predicate.And() // no-op predicate that returns always true + } + return predicate.Funcs{ + GenericFunc: func(e event.GenericEvent) bool { + _, ok := nss[e.Object.GetNamespace()] + return ok + }, + } } From 5ea0e258980fd3d53f5f81fdf6b8336ed4bdc244 Mon Sep 17 00:00:00 2001 From: goodfirm <166383463+goodfirm@users.noreply.github.com> Date: Thu, 11 Apr 2024 01:07:15 +0800 Subject: [PATCH 05/10] chore: fix function names in comment (#5673) Signed-off-by: goodfirm --- apis/keda/v1alpha1/scaledobject_types.go | 2 +- pkg/eventemitter/eventemitter.go | 2 +- pkg/scalers/datadog_scaler.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apis/keda/v1alpha1/scaledobject_types.go b/apis/keda/v1alpha1/scaledobject_types.go index 0e8ddf614fe..70853774cd4 100644 --- a/apis/keda/v1alpha1/scaledobject_types.go +++ b/apis/keda/v1alpha1/scaledobject_types.go @@ -199,7 +199,7 @@ func (so *ScaledObject) HasPausedReplicaAnnotation() bool { return pausedReplicasAnnotationFound } -// HasPausedAnnotition returns whether this ScaledObject has PausedAnnotation or PausedReplicasAnnotation +// HasPausedAnnotation returns whether this ScaledObject has PausedAnnotation or PausedReplicasAnnotation func (so *ScaledObject) HasPausedAnnotation() bool { _, pausedAnnotationFound := so.GetAnnotations()[PausedAnnotation] _, pausedReplicasAnnotationFound := so.GetAnnotations()[PausedReplicasAnnotation] diff --git a/pkg/eventemitter/eventemitter.go b/pkg/eventemitter/eventemitter.go index 1d1cbb4b55c..90ba7153383 100644 --- a/pkg/eventemitter/eventemitter.go +++ b/pkg/eventemitter/eventemitter.go @@ -207,7 +207,7 @@ func (e *EventEmitter) clearEventHandlersCache(cloudEventSource *eventingv1alpha } } -// clearEventHandlersCache will check if the event handlers that were created by passing CloudEventSource exist +// checkIfEventHandlersExist will check if the event handlers that were created by passing CloudEventSource exist func (e *EventEmitter) checkIfEventHandlersExist(cloudEventSource *eventingv1alpha1.CloudEventSource) bool { e.eventHandlersCacheLock.RLock() defer e.eventHandlersCacheLock.RUnlock() diff --git a/pkg/scalers/datadog_scaler.go b/pkg/scalers/datadog_scaler.go index 2e0b8882a8c..c06ede68aeb 100644 --- a/pkg/scalers/datadog_scaler.go +++ b/pkg/scalers/datadog_scaler.go @@ -229,7 +229,7 @@ func parseDatadogMetadata(config *scalersconfig.ScalerConfig, logger logr.Logger return &meta, nil } -// newDatddogConnection tests a connection to the Datadog API +// newDatadogConnection tests a connection to the Datadog API func newDatadogConnection(ctx context.Context, meta *datadogMetadata, config *scalersconfig.ScalerConfig) (*datadog.APIClient, error) { ctx = context.WithValue( ctx, From 7611438243be06a83b4cede07357223ffbfacc34 Mon Sep 17 00:00:00 2001 From: Jorge Turrado Ferrero Date: Wed, 10 Apr 2024 23:41:19 +0200 Subject: [PATCH 06/10] fix: reports errors during upstream requests (#5664) * fix: reports errors during upstream requests Signed-off-by: Jorge Turrado * . Signed-off-by: Jorge Turrado * move the metric Signed-off-by: Jorge Turrado --------- Signed-off-by: Jorge Turrado Signed-off-by: Jorge Turrado Ferrero --- CHANGELOG.md | 2 +- pkg/scaling/scale_handler.go | 12 +++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 706593e1a5d..944b9f6109d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -80,7 +80,7 @@ Here is an overview of all new **experimental** features: - **General**: Fix CVE-2024-28180 in github.com/go-jose/go-jose/v3 ([#5617](https://github.com/kedacore/keda/pull/5617)) - **General**: Log field `ScaledJob` no longer have conflicting types ([#5592](https://github.com/kedacore/keda/pull/5592)) -- **General**: Prometheus metrics shows errors correctly ([#5597](https://github.com/kedacore/keda/issues/5597)) +- **General**: Prometheus metrics shows errors correctly ([#5597](https://github.com/kedacore/keda/issues/5597)|[#5663](https://github.com/kedacore/keda/issues/5663)) - **General**: Validate empty array value of triggers in ScaledObject/ScaledJob creation ([#5520](https://github.com/kedacore/keda/issues/5520)) - **GitHub Runner Scaler**: Fixed `in_progress` detection on running jobs instead of just `queued` ([#5604](https://github.com/kedacore/keda/issues/5604)) - **New Relic Scaler**: Consider empty results set from query executer ([#5619](https://github.com/kedacore/keda/pull/5619)) diff --git a/pkg/scaling/scale_handler.go b/pkg/scaling/scale_handler.go index 0849c844caa..a62263c7166 100644 --- a/pkg/scaling/scale_handler.go +++ b/pkg/scaling/scale_handler.go @@ -637,7 +637,7 @@ func (h *scaleHandler) getScaledObjectState(ctx context.Context, scaledObject *k isScaledObjectActive = true activeTriggers = append(activeTriggers, result.TriggerName) } - if result.IsError { + if result.Err != nil { isScaledObjectError = true } matchingMetrics = append(matchingMetrics, result.Metrics...) @@ -647,6 +647,8 @@ func (h *scaleHandler) getScaledObjectState(ctx context.Context, scaledObject *k for k, v := range result.Records { metricsRecord[k] = v } + + metricscollector.RecordScaledObjectError(scaledObject.Namespace, scaledObject.Name, result.Err) } // invalidate the cache for the ScaledObject, if we hit an error in any scaler @@ -707,11 +709,11 @@ func (h *scaleHandler) getScaledObjectState(ctx context.Context, scaledObject *k type scalerState struct { // IsActive will be overrided by formula calculation IsActive bool - IsError bool TriggerName string Metrics []external_metrics.ExternalMetricValue Pairs map[string]string Records map[string]metricscache.MetricsRecord + Err error } // getScalerState returns getStateScalerResult with the state @@ -722,7 +724,7 @@ func (*scaleHandler) getScalerState(ctx context.Context, scaler scalers.Scaler, cache *cache.ScalersCache, logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) scalerState { result := scalerState{ IsActive: false, - IsError: false, + Err: nil, TriggerName: "", Metrics: []external_metrics.ExternalMetricValue{}, Pairs: map[string]string{}, @@ -736,7 +738,7 @@ func (*scaleHandler) getScalerState(ctx context.Context, scaler scalers.Scaler, metricSpecs, err := cache.GetMetricSpecForScalingForScaler(ctx, triggerIndex) if err != nil { - result.IsError = true + result.Err = err logger.Error(err, "error getting metric spec for the scaler", "scaler", result.TriggerName) cache.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.KEDAScalerFailed, err.Error()) } @@ -766,7 +768,7 @@ func (*scaleHandler) getScalerState(ctx context.Context, scaler scalers.Scaler, } if err != nil { - result.IsError = true + result.Err = err if scaledObject.IsUsingModifiers() { logger.Error(err, "error getting metric source", "source", result.TriggerName) cache.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.KEDAMetricSourceFailed, err.Error()) From c20c47e682ef14162267089867aa0c5ecd0117ad Mon Sep 17 00:00:00 2001 From: SpiritZhou Date: Thu, 11 Apr 2024 05:43:45 +0800 Subject: [PATCH 07/10] Introduce Filter CloudEvents Feature (#5424) * update on spiritzhou/filtercloudevent Signed-off-by: SpiritZhou * Update CHANGELOG.md Co-authored-by: Tom Kerkhove Signed-off-by: SpiritZhou * Update log Signed-off-by: SpiritZhou * add crd validation Signed-off-by: SpiritZhou * update Signed-off-by: SpiritZhou * Fix Signed-off-by: SpiritZhou * Update pkg/eventemitter/eventemitter.go Co-authored-by: Jorge Turrado Ferrero Signed-off-by: SpiritZhou * Update Signed-off-by: SpiritZhou * Fix Signed-off-by: SpiritZhou * Fix Signed-off-by: SpiritZhou * Update Signed-off-by: SpiritZhou * Update Signed-off-by: SpiritZhou --------- Signed-off-by: SpiritZhou Signed-off-by: Jorge Turrado Ferrero Co-authored-by: Tom Kerkhove Co-authored-by: Jorge Turrado Ferrero --- CHANGELOG.md | 2 +- .../eventing/v1alpha1/cloudevent_types.go | 12 +- .../v1alpha1/cloudeventsource_types.go | 16 +- .../v1alpha1/cloudeventsource_webhook.go | 97 +++++++ .../v1alpha1/cloudeventsource_webhook_test.go | 242 ++++++++++++++++++ .../v1alpha1/zz_generated.deepcopy.go | 28 +- cmd/webhooks/main.go | 10 +- .../eventing.keda.sh_cloudeventsources.yaml | 22 ++ config/webhooks/validation_webhooks.yaml | 24 ++ controllers/keda/scaledobject_controller.go | 9 +- pkg/eventemitter/cloudevent_http_handler.go | 2 +- .../cloudevent_http_handler_test.go | 12 +- pkg/eventemitter/eventdata/eventdata.go | 22 +- pkg/eventemitter/eventemitter.go | 55 +++- pkg/eventemitter/eventemitter_test.go | 32 ++- pkg/eventemitter/eventfilter.go | 51 ++++ pkg/mock/mock_eventemitter/mock_interface.go | 2 +- .../cloudevent_source_test.go | 170 +++++++++++- 18 files changed, 754 insertions(+), 54 deletions(-) rename pkg/eventemitter/eventtypes.go => apis/eventing/v1alpha1/cloudevent_types.go (63%) create mode 100644 apis/eventing/v1alpha1/cloudeventsource_webhook.go create mode 100644 apis/eventing/v1alpha1/cloudeventsource_webhook_test.go create mode 100644 pkg/eventemitter/eventfilter.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 944b9f6109d..d9429608c3c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,7 +53,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ### New -- **General**: TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) +- **General**: Provide capability to filter CloudEvents ([#3533](https://github.com/kedacore/keda/issues/3533)) - **NATS Scaler**: Add TLS authentication ([#2296](https://github.com/kedacore/keda/issues/2296)) #### Experimental diff --git a/pkg/eventemitter/eventtypes.go b/apis/eventing/v1alpha1/cloudevent_types.go similarity index 63% rename from pkg/eventemitter/eventtypes.go rename to apis/eventing/v1alpha1/cloudevent_types.go index 3d00a96f758..89e14109882 100644 --- a/pkg/eventemitter/eventtypes.go +++ b/apis/eventing/v1alpha1/cloudevent_types.go @@ -14,12 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package eventemitter +package v1alpha1 + +// CloudEventType contains the list of cloudevent types +// +kubebuilder:validation:Enum=keda.scaledobject.ready.v1;keda.scaledobject.failed.v1 +type CloudEventType string const ( // ScaledObjectReadyType is for event when a new ScaledObject is ready - ScaledObjectReadyType = "keda.scaledobject.ready.v1" + ScaledObjectReadyType CloudEventType = "keda.scaledobject.ready.v1" // ScaledObjectFailedType is for event when creating ScaledObject failed - ScaledObjectFailedType = "keda.scaledobject.failed.v1" + ScaledObjectFailedType CloudEventType = "keda.scaledobject.failed.v1" ) + +var AllEventTypes = []CloudEventType{ScaledObjectFailedType, ScaledObjectReadyType} diff --git a/apis/eventing/v1alpha1/cloudeventsource_types.go b/apis/eventing/v1alpha1/cloudeventsource_types.go index 2ede7dbcd10..8b81700eaad 100644 --- a/apis/eventing/v1alpha1/cloudeventsource_types.go +++ b/apis/eventing/v1alpha1/cloudeventsource_types.go @@ -51,6 +51,9 @@ type CloudEventSourceSpec struct { ClusterName string `json:"clusterName,omitempty"` Destination Destination `json:"destination"` + + // +optional + EventSubscription EventSubscription `json:"eventSubscription,omitempty"` } // CloudEventSourceStatus defines the observed state of CloudEventSource @@ -70,13 +73,22 @@ type CloudEventHTTP struct { URI string `json:"uri"` } +// EventSubscription defines filters for events +type EventSubscription struct { + // +optional + IncludedEventTypes []CloudEventType `json:"includedEventTypes,omitempty"` + + // +optional + ExcludedEventTypes []CloudEventType `json:"excludedEventTypes,omitempty"` +} + func init() { SchemeBuilder.Register(&CloudEventSource{}, &CloudEventSourceList{}) } // GenerateIdentifier returns identifier for the object in for "kind.namespace.name" -func (t *CloudEventSource) GenerateIdentifier() string { - return v1alpha1.GenerateIdentifier("CloudEventSource", t.Namespace, t.Name) +func (ces *CloudEventSource) GenerateIdentifier() string { + return v1alpha1.GenerateIdentifier("CloudEventSource", ces.Namespace, ces.Name) } // GetCloudEventSourceInitializedConditions returns CloudEventSource Conditions initialized to the default -> Status: Unknown diff --git a/apis/eventing/v1alpha1/cloudeventsource_webhook.go b/apis/eventing/v1alpha1/cloudeventsource_webhook.go new file mode 100644 index 00000000000..b520fc4f27f --- /dev/null +++ b/apis/eventing/v1alpha1/cloudeventsource_webhook.go @@ -0,0 +1,97 @@ +/* +Copyright 2024 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "encoding/json" + "fmt" + + "golang.org/x/exp/slices" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +var cloudeventsourcelog = logf.Log.WithName("cloudeventsource-validation-webhook") + +func (ces *CloudEventSource) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(ces). + Complete() +} + +// +kubebuilder:webhook:path=/validate-eventing-keda-sh-v1alpha1-cloudeventsource,mutating=false,failurePolicy=ignore,sideEffects=None,groups=eventing.keda.sh,resources=cloudeventsources,verbs=create;update,versions=v1alpha1,name=vcloudeventsource.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &CloudEventSource{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (ces *CloudEventSource) ValidateCreate() (admission.Warnings, error) { + val, _ := json.MarshalIndent(ces, "", " ") + cloudeventsourcelog.Info(fmt.Sprintf("validating cloudeventsource creation for %s", string(val))) + return validateSpec(&ces.Spec) +} + +func (ces *CloudEventSource) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + val, _ := json.MarshalIndent(ces, "", " ") + cloudeventsourcelog.V(1).Info(fmt.Sprintf("validating cloudeventsource update for %s", string(val))) + + oldCes := old.(*CloudEventSource) + if isCloudEventSourceRemovingFinalizer(ces.ObjectMeta, oldCes.ObjectMeta, ces.Spec, oldCes.Spec) { + cloudeventsourcelog.V(1).Info("finalizer removal, skipping validation") + return nil, nil + } + return validateSpec(&ces.Spec) +} + +func (ces *CloudEventSource) ValidateDelete() (admission.Warnings, error) { + return nil, nil +} + +func isCloudEventSourceRemovingFinalizer(om metav1.ObjectMeta, oldOm metav1.ObjectMeta, spec CloudEventSourceSpec, oldSpec CloudEventSourceSpec) bool { + cesSpec, _ := json.MarshalIndent(spec, "", " ") + oldCesSpec, _ := json.MarshalIndent(oldSpec, "", " ") + cesSpecString := string(cesSpec) + oldCesSpecString := string(oldCesSpec) + + return len(om.Finalizers) == 0 && len(oldOm.Finalizers) == 1 && cesSpecString == oldCesSpecString +} + +func validateSpec(spec *CloudEventSourceSpec) (admission.Warnings, error) { + if spec.EventSubscription.ExcludedEventTypes != nil && spec.EventSubscription.IncludedEventTypes != nil { + return nil, fmt.Errorf("setting included types and excluded types at the same time is not supported") + } + + if spec.EventSubscription.ExcludedEventTypes != nil { + for _, excludedEventType := range spec.EventSubscription.ExcludedEventTypes { + if !slices.Contains(AllEventTypes, excludedEventType) { + return nil, fmt.Errorf("excludedEventType: %s in cloudeventsource spec is not supported", excludedEventType) + } + } + } + + if spec.EventSubscription.IncludedEventTypes != nil { + for _, includedEventType := range spec.EventSubscription.IncludedEventTypes { + if !slices.Contains(AllEventTypes, includedEventType) { + return nil, fmt.Errorf("includedEventType: %s in cloudeventsource spec is not supported", includedEventType) + } + } + } + return nil, nil +} diff --git a/apis/eventing/v1alpha1/cloudeventsource_webhook_test.go b/apis/eventing/v1alpha1/cloudeventsource_webhook_test.go new file mode 100644 index 00000000000..425dba8a148 --- /dev/null +++ b/apis/eventing/v1alpha1/cloudeventsource_webhook_test.go @@ -0,0 +1,242 @@ +/* +Copyright 2024 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "path/filepath" + "strconv" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + admissionv1beta1 "k8s.io/api/admission/v1beta1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment +var ctx context.Context +var cancel context.CancelFunc + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Webhook Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.Background()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: false, + WebhookInstallOptions: envtest.WebhookInstallOptions{ + Paths: []string{filepath.Join("..", "..", "..", "config", "webhooks")}, + }, + } + var err error + // cfg is defined in this file globally. + done := make(chan interface{}) + go func() { + defer GinkgoRecover() + cfg, err = testEnv.Start() + close(done) + }() + Eventually(done).WithTimeout(time.Minute).Should(BeClosed()) + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + scheme := runtime.NewScheme() + err = AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + + err = clientgoscheme.AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + + err = admissionv1beta1.AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + // start webhook server using Manager + webhookInstallOptions := &testEnv.WebhookInstallOptions + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme, + WebhookServer: webhook.NewServer(webhook.Options{ + Host: webhookInstallOptions.LocalServingHost, + Port: webhookInstallOptions.LocalServingPort, + CertDir: webhookInstallOptions.LocalServingCertDir, + }), + LeaderElection: false, + Metrics: server.Options{ + BindAddress: "0", + }, + }) + Expect(err).NotTo(HaveOccurred()) + + err = (&CloudEventSource{}).SetupWebhookWithManager(mgr) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:webhook + + go func() { + defer GinkgoRecover() + err = mgr.Start(ctx) + Expect(err).NotTo(HaveOccurred()) + }() + + // wait for the webhook server to get ready + dialer := &net.Dialer{Timeout: time.Second} + addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort) + Eventually(func() error { + conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true}) + if err != nil { + return err + } + conn.Close() + return nil + }).Should(Succeed()) + +}) + +var _ = It("validate cloudeventsource when event type is not support", func() { + namespaceName := "nscloudeventnotsupport" + namespace := createNamespace(namespaceName) + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + spec := createCloudEventSourceSpecWithExcludeEventType("keda.scaledobject.ready.v1.test") + ces := createCloudEventSource("nsccesexcludenotsupport", namespaceName, spec) + Eventually(func() error { + return k8sClient.Create(context.Background(), ces) + }).Should(HaveOccurred()) + + spec = createCloudEventSourceSpecWithIncludeEventType("keda.scaledobject.ready.v1.test") + ces = createCloudEventSource("nsccesincludenotsupport", namespaceName, spec) + Eventually(func() error { + return k8sClient.Create(context.Background(), ces) + }).Should(HaveOccurred()) +}) + +var _ = It("validate cloudeventsource when event type is support", func() { + namespaceName := "cloudeventtestns" + namespace := createNamespace(namespaceName) + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + for k, eventType := range AllEventTypes { + spec := createCloudEventSourceSpecWithExcludeEventType(eventType) + ces := createCloudEventSource("cloudeventexclude"+strconv.Itoa(k), namespaceName, spec) + Eventually(func() error { + return k8sClient.Create(context.Background(), ces) + }).ShouldNot(HaveOccurred()) + } + + for k, eventType := range AllEventTypes { + spec := createCloudEventSourceSpecWithIncludeEventType(eventType) + ces := createCloudEventSource("cloudeventinclude"+strconv.Itoa(k), namespaceName, spec) + Eventually(func() error { + return k8sClient.Create(context.Background(), ces) + }).ShouldNot(HaveOccurred()) + } +}) + +var _ = It("validate invalid cloudeventsource which eventtype in both excludetypes and includetypes", func() { + namespaceName := "cloudeventtestnsinvalid" + namespace := createNamespace(namespaceName) + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + spec := createInvalidCloudEventSourceSpe(ScaledObjectReadyType) + ces := createCloudEventSource("invalidcloudevent", namespaceName, spec) + Eventually(func() error { + return k8sClient.Create(context.Background(), ces) + }).Should(HaveOccurred()) +}) + +// -------------------------------------------------------------------------- // +// ----------------------------- HELP FUNCTIONS ----------------------------- // +// -------------------------------------------------------------------------- // + +func createNamespace(name string) *v1.Namespace { + return &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + } +} + +func createCloudEventSourceSpecWithExcludeEventType(eventtype CloudEventType) CloudEventSourceSpec { + return CloudEventSourceSpec{ + EventSubscription: EventSubscription{ + ExcludedEventTypes: []CloudEventType{eventtype}, + }, + } +} + +func createCloudEventSourceSpecWithIncludeEventType(eventtype CloudEventType) CloudEventSourceSpec { + return CloudEventSourceSpec{ + EventSubscription: EventSubscription{ + IncludedEventTypes: []CloudEventType{eventtype}, + }, + } +} + +func createInvalidCloudEventSourceSpe(eventtype CloudEventType) CloudEventSourceSpec { + return CloudEventSourceSpec{ + EventSubscription: EventSubscription{ + ExcludedEventTypes: []CloudEventType{eventtype}, + IncludedEventTypes: []CloudEventType{eventtype}, + }, + } +} + +func createCloudEventSource(name string, namespace string, spec CloudEventSourceSpec) *CloudEventSource { + return &CloudEventSource{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "CloudEventSource", + APIVersion: "eventing.keda.sh", + }, + Spec: spec, + } +} diff --git a/apis/eventing/v1alpha1/zz_generated.deepcopy.go b/apis/eventing/v1alpha1/zz_generated.deepcopy.go index 57f0e41b882..a76e4c544df 100644 --- a/apis/eventing/v1alpha1/zz_generated.deepcopy.go +++ b/apis/eventing/v1alpha1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v1alpha1 import ( kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -103,6 +103,7 @@ func (in *CloudEventSourceList) DeepCopyObject() runtime.Object { func (in *CloudEventSourceSpec) DeepCopyInto(out *CloudEventSourceSpec) { *out = *in in.Destination.DeepCopyInto(&out.Destination) + in.EventSubscription.DeepCopyInto(&out.EventSubscription) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudEventSourceSpec. @@ -154,3 +155,28 @@ func (in *Destination) DeepCopy() *Destination { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSubscription) DeepCopyInto(out *EventSubscription) { + *out = *in + if in.IncludedEventTypes != nil { + in, out := &in.IncludedEventTypes, &out.IncludedEventTypes + *out = make([]CloudEventType, len(*in)) + copy(*out, *in) + } + if in.ExcludedEventTypes != nil { + in, out := &in.ExcludedEventTypes, &out.ExcludedEventTypes + *out = make([]CloudEventType, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSubscription. +func (in *EventSubscription) DeepCopy() *EventSubscription { + if in == nil { + return nil + } + out := new(EventSubscription) + in.DeepCopyInto(out) + return out +} diff --git a/cmd/webhooks/main.go b/cmd/webhooks/main.go index 4f03ba99b63..d9832bdd1b7 100644 --- a/cmd/webhooks/main.go +++ b/cmd/webhooks/main.go @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" + eventingv1alpha1 "github.com/kedacore/keda/v2/apis/eventing/v1alpha1" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/pkg/k8s" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -49,6 +50,7 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(kedav1alpha1.AddToScheme(scheme)) + utilruntime.Must(eventingv1alpha1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } @@ -138,6 +140,10 @@ func setupWebhook(mgr manager.Manager) { setupLog.Error(err, "unable to create webhook", "webhook", "ScaledObject") os.Exit(1) } + if err := (&kedav1alpha1.ScaledJob{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "ScaledJob") + os.Exit(1) + } if err := (&kedav1alpha1.TriggerAuthentication{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "TriggerAuthentication") os.Exit(1) @@ -146,8 +152,8 @@ func setupWebhook(mgr manager.Manager) { setupLog.Error(err, "unable to create webhook", "webhook", "ClusterTriggerAuthentication") os.Exit(1) } - if err := (&kedav1alpha1.ScaledJob{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "ScaledJob") + if err := (&eventingv1alpha1.CloudEventSource{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "CloudEventSource") os.Exit(1) } } diff --git a/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml b/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml index 2225f2b6ec8..8fd3c5417b5 100644 --- a/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml +++ b/config/crd/bases/eventing.keda.sh_cloudeventsources.yaml @@ -57,6 +57,28 @@ spec: - uri type: object type: object + eventSubscription: + description: EventSubscription defines filters for events + properties: + excludedEventTypes: + items: + description: CloudEventType contains the list of cloudevent + types + enum: + - keda.scaledobject.ready.v1 + - keda.scaledobject.failed.v1 + type: string + type: array + includedEventTypes: + items: + description: CloudEventType contains the list of cloudevent + types + enum: + - keda.scaledobject.ready.v1 + - keda.scaledobject.failed.v1 + type: string + type: array + type: object required: - destination type: object diff --git a/config/webhooks/validation_webhooks.yaml b/config/webhooks/validation_webhooks.yaml index 0e35590bc11..14ff71baef8 100644 --- a/config/webhooks/validation_webhooks.yaml +++ b/config/webhooks/validation_webhooks.yaml @@ -105,3 +105,27 @@ webhooks: - clustertriggerauthentications sideEffects: None timeoutSeconds: 10 +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: keda-admission-webhooks + namespace: keda + path: /validate-eventing-keda-sh-v1alpha1-cloudeventsource + failurePolicy: Ignore + matchPolicy: Equivalent + name: vcloudeventsource.kb.io + namespaceSelector: {} + objectSelector: {} + rules: + - apiGroups: + - eventing.keda.sh + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - cloudeventsources + sideEffects: None + timeoutSeconds: 10 diff --git a/controllers/keda/scaledobject_controller.go b/controllers/keda/scaledobject_controller.go index f634738d3c3..27625f6f10a 100755 --- a/controllers/keda/scaledobject_controller.go +++ b/controllers/keda/scaledobject_controller.go @@ -42,6 +42,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" + eventingv1alpha1 "github.com/kedacore/keda/v2/apis/eventing/v1alpha1" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" kedacontrollerutil "github.com/kedacore/keda/v2/controllers/keda/util" "github.com/kedacore/keda/v2/pkg/common/message" @@ -183,7 +184,7 @@ func (r *ScaledObjectReconciler) Reconcile(ctx context.Context, req ctrl.Request if !scaledObject.Status.Conditions.AreInitialized() { conditions := kedav1alpha1.GetInitializedConditions() if err := kedastatus.SetStatusConditions(ctx, r.Client, reqLogger, scaledObject, conditions); err != nil { - r.EventEmitter.Emit(scaledObject, req.NamespacedName, corev1.EventTypeWarning, eventemitter.ScaledObjectFailedType, eventreason.ScaledObjectUpdateFailed, err.Error()) + r.EventEmitter.Emit(scaledObject, req.NamespacedName, corev1.EventTypeWarning, eventingv1alpha1.ScaledObjectFailedType, eventreason.ScaledObjectUpdateFailed, err.Error()) return ctrl.Result{}, err } } @@ -195,18 +196,18 @@ func (r *ScaledObjectReconciler) Reconcile(ctx context.Context, req ctrl.Request reqLogger.Error(err, msg) conditions.SetReadyCondition(metav1.ConditionFalse, "ScaledObjectCheckFailed", msg) conditions.SetActiveCondition(metav1.ConditionUnknown, "UnknownState", "ScaledObject check failed") - r.EventEmitter.Emit(scaledObject, req.NamespacedName, corev1.EventTypeWarning, eventemitter.ScaledObjectFailedType, eventreason.ScaledObjectCheckFailed, msg) + r.EventEmitter.Emit(scaledObject, req.NamespacedName, corev1.EventTypeWarning, eventingv1alpha1.ScaledObjectFailedType, eventreason.ScaledObjectCheckFailed, msg) } else { wasReady := conditions.GetReadyCondition() if wasReady.IsFalse() || wasReady.IsUnknown() { - r.EventEmitter.Emit(scaledObject, req.NamespacedName, corev1.EventTypeNormal, eventemitter.ScaledObjectReadyType, eventreason.ScaledObjectReady, message.ScalerReadyMsg) + r.EventEmitter.Emit(scaledObject, req.NamespacedName, corev1.EventTypeNormal, eventingv1alpha1.ScaledObjectReadyType, eventreason.ScaledObjectReady, message.ScalerReadyMsg) } reqLogger.V(1).Info(msg) conditions.SetReadyCondition(metav1.ConditionTrue, kedav1alpha1.ScaledObjectConditionReadySuccessReason, msg) } if err := kedastatus.SetStatusConditions(ctx, r.Client, reqLogger, scaledObject, &conditions); err != nil { - r.EventEmitter.Emit(scaledObject, req.NamespacedName, corev1.EventTypeWarning, eventemitter.ScaledObjectFailedType, eventreason.ScaledObjectUpdateFailed, err.Error()) + r.EventEmitter.Emit(scaledObject, req.NamespacedName, corev1.EventTypeWarning, eventingv1alpha1.ScaledObjectFailedType, eventreason.ScaledObjectUpdateFailed, err.Error()) return ctrl.Result{}, err } diff --git a/pkg/eventemitter/cloudevent_http_handler.go b/pkg/eventemitter/cloudevent_http_handler.go index e02e64861db..fea8b943b7e 100644 --- a/pkg/eventemitter/cloudevent_http_handler.go +++ b/pkg/eventemitter/cloudevent_http_handler.go @@ -93,7 +93,7 @@ func (c *CloudEventHTTPHandler) EmitEvent(eventData eventdata.EventData, failure event := cloudevents.NewEvent() event.SetSource(source) event.SetSubject(subject) - event.SetType(eventData.EventType) + event.SetType(string(eventData.CloudEventType)) if err := event.SetData(cloudevents.ApplicationJSON, EmitData{Reason: eventData.Reason, Message: eventData.Message}); err != nil { c.logger.Error(err, "Failed to set data to CloudEvents receiver") diff --git a/pkg/eventemitter/cloudevent_http_handler_test.go b/pkg/eventemitter/cloudevent_http_handler_test.go index b9ce9c250c2..28fc91aa098 100644 --- a/pkg/eventemitter/cloudevent_http_handler_test.go +++ b/pkg/eventemitter/cloudevent_http_handler_test.go @@ -51,12 +51,12 @@ var testErrCloudeventHTTPHandlerTestData = []parseCloudeventHTTPHandlerTestData{ } var testErrEventData = eventdata.EventData{ - Namespace: "aaa", - ObjectName: "bbb", - EventType: "ccc", - Reason: "ddd", - Message: "eee", - Time: time.Now().UTC(), + Namespace: "aaa", + ObjectName: "bbb", + CloudEventType: "ccc", + Reason: "ddd", + Message: "eee", + Time: time.Now().UTC(), } func TestCorrectCloudeventHTTPHandler(t *testing.T) { diff --git a/pkg/eventemitter/eventdata/eventdata.go b/pkg/eventemitter/eventdata/eventdata.go index e2a571d2035..3536f79fc0e 100644 --- a/pkg/eventemitter/eventdata/eventdata.go +++ b/pkg/eventemitter/eventdata/eventdata.go @@ -18,18 +18,20 @@ package eventdata import ( "time" + + eventingv1alpha1 "github.com/kedacore/keda/v2/apis/eventing/v1alpha1" ) // EventData will save all event info and handler info for retry. type EventData struct { - Namespace string - ObjectName string - ObjectType string - EventType string - Reason string - Message string - Time time.Time - HandlerKey string - RetryTimes int - Err error + Namespace string + ObjectName string + ObjectType string + CloudEventType eventingv1alpha1.CloudEventType + Reason string + Message string + Time time.Time + HandlerKey string + RetryTimes int + Err error } diff --git a/pkg/eventemitter/eventemitter.go b/pkg/eventemitter/eventemitter.go index 90ba7153383..7f473de74a3 100644 --- a/pkg/eventemitter/eventemitter.go +++ b/pkg/eventemitter/eventemitter.go @@ -59,7 +59,9 @@ type EventEmitter struct { recorder record.EventRecorder clusterName string eventHandlersCache map[string]EventDataHandler + eventFilterCache map[string]*EventFilter eventHandlersCacheLock *sync.RWMutex + eventFilterCacheLock *sync.RWMutex eventLoopContexts *sync.Map cloudEventProcessingChan chan eventdata.EventData } @@ -68,7 +70,7 @@ type EventEmitter struct { type EventHandler interface { DeleteCloudEventSource(cloudEventSource *eventingv1alpha1.CloudEventSource) error HandleCloudEventSource(ctx context.Context, cloudEventSource *eventingv1alpha1.CloudEventSource) error - Emit(object runtime.Object, namesapce types.NamespacedName, eventType string, cloudeventType string, reason string, message string) + Emit(object runtime.Object, namesapce types.NamespacedName, eventType string, cloudeventType eventingv1alpha1.CloudEventType, reason string, message string) } // EventDataHandler defines the behavior for different event handlers @@ -97,7 +99,9 @@ func NewEventEmitter(client client.Client, recorder record.EventRecorder, cluste recorder: recorder, clusterName: clusterName, eventHandlersCache: map[string]EventDataHandler{}, + eventFilterCache: map[string]*EventFilter{}, eventHandlersCacheLock: &sync.RWMutex{}, + eventFilterCacheLock: &sync.RWMutex{}, eventLoopContexts: &sync.Map{}, cloudEventProcessingChan: make(chan eventdata.EventData, maxChannelBuffer), } @@ -165,7 +169,9 @@ func (e *EventEmitter) DeleteCloudEventSource(cloudEventSource *eventingv1alpha1 // use in the loop. func (e *EventEmitter) createEventHandlers(ctx context.Context, cloudEventSource *eventingv1alpha1.CloudEventSource) { e.eventHandlersCacheLock.Lock() + e.eventFilterCacheLock.Lock() defer e.eventHandlersCacheLock.Unlock() + defer e.eventFilterCacheLock.Unlock() key := cloudEventSource.GenerateIdentifier() @@ -188,12 +194,17 @@ func (e *EventEmitter) createEventHandlers(ctx context.Context, cloudEventSource } e.eventHandlersCache[eventHandlerKey] = eventHandler } + + // Create EventFilter from CloudEventSource + e.eventFilterCache[key] = NewEventFilter(cloudEventSource.Spec.EventSubscription.IncludedEventTypes, cloudEventSource.Spec.EventSubscription.ExcludedEventTypes) } // clearEventHandlersCache will clear all event handlers that created by the passing CloudEventSource func (e *EventEmitter) clearEventHandlersCache(cloudEventSource *eventingv1alpha1.CloudEventSource) { e.eventHandlersCacheLock.Lock() defer e.eventHandlersCacheLock.Unlock() + e.eventFilterCacheLock.Lock() + defer e.eventFilterCacheLock.Unlock() key := cloudEventSource.GenerateIdentifier() @@ -205,6 +216,8 @@ func (e *EventEmitter) clearEventHandlersCache(cloudEventSource *eventingv1alpha delete(e.eventHandlersCache, key) } } + + delete(e.eventFilterCache, key) } // checkIfEventHandlersExist will check if the event handlers that were created by passing CloudEventSource exist @@ -274,7 +287,7 @@ func (e *EventEmitter) checkEventHandlers(ctx context.Context, cloudEventSource } // Emit is emitting event to both local kubernetes and custom CloudEventSource handler. After emit event to local kubernetes, event will inqueue and waitng for handler's consuming. -func (e *EventEmitter) Emit(object runtime.Object, namesapce types.NamespacedName, eventType, cloudeventType, reason, message string) { +func (e *EventEmitter) Emit(object runtime.Object, namesapce types.NamespacedName, eventType string, cloudeventType eventingv1alpha1.CloudEventType, reason, message string) { e.recorder.Event(object, eventType, reason, message) e.eventHandlersCacheLock.RLock() @@ -286,13 +299,13 @@ func (e *EventEmitter) Emit(object runtime.Object, namesapce types.NamespacedNam objectName, _ := meta.NewAccessor().Name(object) objectType, _ := meta.NewAccessor().Kind(object) eventData := eventdata.EventData{ - Namespace: namesapce.Namespace, - EventType: cloudeventType, - ObjectName: strings.ToLower(objectName), - ObjectType: strings.ToLower(objectType), - Reason: reason, - Message: message, - Time: time.Now().UTC(), + Namespace: namesapce.Namespace, + CloudEventType: cloudeventType, + ObjectName: strings.ToLower(objectName), + ObjectType: strings.ToLower(objectType), + Reason: reason, + Message: message, + Time: time.Now().UTC(), } go e.enqueueEventData(eventData) } @@ -324,6 +337,19 @@ func (e *EventEmitter) emitEventByHandler(eventData eventdata.EventData) { if eventData.HandlerKey == "" { for key, handler := range e.eventHandlersCache { + e.eventFilterCacheLock.RLock() + defer e.eventFilterCacheLock.RUnlock() + // Filter Event + identifierKey := getPrefixIdentifierFromKey(key) + + if e.eventFilterCache[identifierKey] != nil { + isFiltered := e.eventFilterCache[identifierKey].FilterEvent(eventData.CloudEventType) + if isFiltered { + e.log.V(1).Info("Event is filtered", "cloudeventType", eventData.CloudEventType, "event identifier", identifierKey) + return + } + } + eventData.HandlerKey = key if handler.GetActiveStatus() == metav1.ConditionTrue { go handler.EmitEvent(eventData, e.emitErrorHandle) @@ -400,6 +426,16 @@ func newEventHandlerKey(kindNamespaceName string, handlerType string) string { / return fmt.Sprintf("%s.%s", kindNamespaceName, handlerType) } +// getPrefixIdentifierFromKey will return the prefix identifier from the handler key. Handler key is generated by the format of "CloudEventSource.Namespace.Name.HandlerType" and the prefix identifier is "CloudEventSource.Namespace.Name" +func getPrefixIdentifierFromKey(handlerKey string) string { + keys := strings.Split(handlerKey, ".") + if len(keys) >= 3 { + return keys[0] + "." + keys[1] + "." + keys[2] + } + return "" +} + +// getHandlerTypeFromKey will return the handler type from the handler key. Handler key is generated by the format of "CloudEventSource.Namespace.Name.HandlerType" and the handler type is "HandlerType" func getHandlerTypeFromKey(handlerKey string) string { keys := strings.Split(handlerKey, ".") if len(keys) >= 4 { @@ -408,6 +444,7 @@ func getHandlerTypeFromKey(handlerKey string) string { return "" } +// getSourceNameFromKey will return the handler type from the source name. Source name is generated by the format of "CloudEventSource.Namespace.Name.HandlerType" and the source name is "Name" func getSourceNameFromKey(handlerKey string) string { keys := strings.Split(handlerKey, ".") if len(keys) >= 4 { diff --git a/pkg/eventemitter/eventemitter_test.go b/pkg/eventemitter/eventemitter_test.go index 7e93466a6d4..a8ad2ba1986 100644 --- a/pkg/eventemitter/eventemitter_test.go +++ b/pkg/eventemitter/eventemitter_test.go @@ -66,23 +66,27 @@ func TestEventHandler_FailedEmitEvent(t *testing.T) { key := newEventHandlerKey(cloudEventSource.GenerateIdentifier(), cloudEventHandlerTypeHTTP) caches[key] = eventHandler + filtercaches := map[string]*EventFilter{} + eventEmitter := EventEmitter{ client: mockClient, recorder: recorder, clusterName: "cluster-name", eventHandlersCache: caches, eventHandlersCacheLock: &sync.RWMutex{}, + eventFilterCache: filtercaches, + eventFilterCacheLock: &sync.RWMutex{}, eventLoopContexts: &sync.Map{}, cloudEventProcessingChan: make(chan eventdata.EventData, 1), } eventData := eventdata.EventData{ - Namespace: "aaa", - ObjectName: "bbb", - EventType: "ccc", - Reason: "ddd", - Message: "eee", - Time: time.Now().UTC(), + Namespace: "aaa", + ObjectName: "bbb", + CloudEventType: "ccc", + Reason: "ddd", + Message: "eee", + Time: time.Now().UTC(), } mockClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() @@ -128,23 +132,27 @@ func TestEventHandler_DirectCall(t *testing.T) { key := newEventHandlerKey(cloudEventSource.GenerateIdentifier(), cloudEventHandlerTypeHTTP) caches[key] = eventHandler + filtercaches := map[string]*EventFilter{} + eventEmitter := EventEmitter{ client: mockClient, recorder: recorder, clusterName: "cluster-name", eventHandlersCache: caches, eventHandlersCacheLock: &sync.RWMutex{}, + eventFilterCache: filtercaches, + eventFilterCacheLock: &sync.RWMutex{}, eventLoopContexts: &sync.Map{}, cloudEventProcessingChan: make(chan eventdata.EventData, 1), } eventData := eventdata.EventData{ - Namespace: "aaa", - ObjectName: "bbb", - EventType: "ccc", - Reason: "ddd", - Message: "eee", - Time: time.Now().UTC(), + Namespace: "aaa", + ObjectName: "bbb", + CloudEventType: "ccc", + Reason: "ddd", + Message: "eee", + Time: time.Now().UTC(), } mockClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() diff --git a/pkg/eventemitter/eventfilter.go b/pkg/eventemitter/eventfilter.go new file mode 100644 index 00000000000..11af3af1ac8 --- /dev/null +++ b/pkg/eventemitter/eventfilter.go @@ -0,0 +1,51 @@ +/* +Copyright 2024 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package eventemitter + +import ( + "golang.org/x/exp/slices" + + eventingv1alpha1 "github.com/kedacore/keda/v2/apis/eventing/v1alpha1" +) + +// EventFilter defines the behavior for different event handlers +type EventFilter struct { + IncludedEventTypes []eventingv1alpha1.CloudEventType + + ExcludedEventTypes []eventingv1alpha1.CloudEventType +} + +// NewEventFilter creates a new EventFilter +func NewEventFilter(includedEventTypes []eventingv1alpha1.CloudEventType, excludedEventTypes []eventingv1alpha1.CloudEventType) *EventFilter { + return &EventFilter{ + IncludedEventTypes: includedEventTypes, + ExcludedEventTypes: excludedEventTypes, + } +} + +// FilterEvent returns true if the event is filtered and should not be handled +func (e *EventFilter) FilterEvent(eventType eventingv1alpha1.CloudEventType) bool { + if len(e.IncludedEventTypes) > 0 { + return !slices.Contains(e.IncludedEventTypes, eventType) + } + + if len(e.ExcludedEventTypes) > 0 { + return slices.Contains(e.ExcludedEventTypes, eventType) + } + + return false +} diff --git a/pkg/mock/mock_eventemitter/mock_interface.go b/pkg/mock/mock_eventemitter/mock_interface.go index 63bc19082c4..d3346ea50bf 100644 --- a/pkg/mock/mock_eventemitter/mock_interface.go +++ b/pkg/mock/mock_eventemitter/mock_interface.go @@ -59,7 +59,7 @@ func (mr *MockEventHandlerMockRecorder) DeleteCloudEventSource(cloudEventSource } // Emit mocks base method. -func (m *MockEventHandler) Emit(object runtime.Object, namesapce types.NamespacedName, eventType, cloudeventType, reason, message string) { +func (m *MockEventHandler) Emit(object runtime.Object, namesapce types.NamespacedName, eventType string, cloudeventType v1alpha1.CloudEventType, reason, message string) { m.ctrl.T.Helper() m.ctrl.Call(m, "Emit", object, namesapce, eventType, cloudeventType, reason, message) } diff --git a/tests/internals/cloudevent_source/cloudevent_source_test.go b/tests/internals/cloudevent_source/cloudevent_source_test.go index 01080cc4cd0..e56579207a3 100644 --- a/tests/internals/cloudevent_source/cloudevent_source_test.go +++ b/tests/internals/cloudevent_source/cloudevent_source_test.go @@ -29,12 +29,15 @@ var ( scaledObjectName = fmt.Sprintf("%s-so", testName) clientName = fmt.Sprintf("%s-client", testName) cloudeventSourceName = fmt.Sprintf("%s-ce", testName) + cloudeventSourceErrName = fmt.Sprintf("%s-ce-err", testName) + cloudeventSourceErrName2 = fmt.Sprintf("%s-ce-err2", testName) cloudEventHTTPReceiverName = fmt.Sprintf("%s-cloudevent-http-receiver", testName) cloudEventHTTPServiceName = fmt.Sprintf("%s-cloudevent-http-service", testName) cloudEventHTTPServiceURL = fmt.Sprintf("http://%s.%s.svc.cluster.local:8899", cloudEventHTTPServiceName, namespace) clusterName = "test-cluster" expectedSubject = fmt.Sprintf("/%s/%s/scaledobject/%s", clusterName, namespace, scaledObjectName) expectedSource = fmt.Sprintf("/%s/keda/keda", clusterName) + lastCloudEventTime = time.Now() ) type templateData struct { @@ -42,6 +45,8 @@ type templateData struct { ScaledObject string ClientName string CloudEventSourceName string + CloudeventSourceErrName string + CloudeventSourceErrName2 string CloudEventHTTPReceiverName string CloudEventHTTPServiceName string CloudEventHTTPServiceURL string @@ -62,6 +67,38 @@ const ( uri: {{.CloudEventHTTPServiceURL}} ` + cloudEventSourceWithExcludeTemplate = ` + apiVersion: eventing.keda.sh/v1alpha1 + kind: CloudEventSource + metadata: + name: {{.CloudEventSourceName}} + namespace: {{.TestNamespace}} + spec: + clusterName: {{.ClusterName}} + destination: + http: + uri: {{.CloudEventHTTPServiceURL}} + eventSubscription: + excludedEventTypes: + - keda.scaledobject.failed.v1 + ` + + cloudEventSourceWithIncludeTemplate = ` + apiVersion: eventing.keda.sh/v1alpha1 + kind: CloudEventSource + metadata: + name: {{.CloudEventSourceName}} + namespace: {{.TestNamespace}} + spec: + clusterName: {{.ClusterName}} + destination: + http: + uri: {{.CloudEventHTTPServiceURL}} + eventSubscription: + includedEventTypes: + - keda.scaledobject.failed.v1 + ` + cloudEventHTTPServiceTemplate = ` apiVersion: v1 kind: Service @@ -139,6 +176,40 @@ spec: - sh - -c - "exec tail -f /dev/null"` + + cloudEventSourceWithErrTypeTemplate = ` + apiVersion: eventing.keda.sh/v1alpha1 + kind: CloudEventSource + metadata: + name: {{.CloudeventSourceErrName}} + namespace: {{.TestNamespace}} + spec: + clusterName: {{.ClusterName}} + destination: + http: + uri: {{.CloudEventHTTPServiceURL}} + eventSubscription: + includedEventTypes: + - keda.scaledobject.failed.v2 + ` + + cloudEventSourceWithErrTypeTemplate2 = ` + apiVersion: eventing.keda.sh/v1alpha1 + kind: CloudEventSource + metadata: + name: {{.CloudeventSourceErrName2}} + namespace: {{.TestNamespace}} + spec: + clusterName: {{.ClusterName}} + destination: + http: + uri: {{.CloudEventHTTPServiceURL}} + eventSubscription: + includedEventTypes: + - keda.scaledobject.failed.v1 + excludedEventTypes: + - keda.scaledobject.failed.v1 + ` ) func TestScaledObjectGeneral(t *testing.T) { @@ -149,10 +220,12 @@ func TestScaledObjectGeneral(t *testing.T) { data, templates := getTemplateData() CreateKubernetesResources(t, kc, namespace, data, templates) - time.Sleep(15 * time.Second) assert.True(t, WaitForAllPodRunningInNamespace(t, kc, namespace, 5, 20), "all pods should be running") testErrEventSourceEmitValue(t, kc, data) + testErrEventSourceExcludeValue(t, kc, data) + testErrEventSourceIncludeValue(t, kc, data) + testErrEventSourceCreation(t, kc, data) DeleteKubernetesResources(t, namespace, data, templates) } @@ -163,7 +236,9 @@ func testErrEventSourceEmitValue(t *testing.T, _ *kubernetes.Clientset, data tem KubectlApplyWithTemplate(t, data, "scaledObjectErrTemplate", scaledObjectErrTemplate) // wait 15 seconds to ensure event propagation - time.Sleep(15 * time.Second) + time.Sleep(5 * time.Second) + KubectlDeleteWithTemplate(t, data, "scaledObjectErrTemplate", scaledObjectErrTemplate) + time.Sleep(10 * time.Second) out, outErr, err := ExecCommandOnSpecificPod(t, clientName, namespace, fmt.Sprintf("curl -X GET %s/getCloudEvent/%s", cloudEventHTTPServiceURL, "ScaledObjectCheckFailed")) assert.NotEmpty(t, out) @@ -188,9 +263,98 @@ func testErrEventSourceEmitValue(t *testing.T, _ *kubernetes.Clientset, data tem assert.Equal(t, cloudEvent.Type(), "keda.scaledobject.failed.v1") assert.Equal(t, cloudEvent.Source(), expectedSource) assert.Equal(t, cloudEvent.DataContentType(), "application/json") + + if lastCloudEventTime.Before(cloudEvent.Time()) { + lastCloudEventTime = cloudEvent.Time() + } + } + } + assert.NotEmpty(t, foundEvents) +} + +// tests error events not emitted by +func testErrEventSourceExcludeValue(t *testing.T, _ *kubernetes.Clientset, data templateData) { + t.Log("--- test emitting eventsource about scaledobject err with exclude filter---") + + KubectlDeleteWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) + KubectlApplyWithTemplate(t, data, "cloudEventSourceWithExcludeTemplate", cloudEventSourceWithExcludeTemplate) + KubectlApplyWithTemplate(t, data, "scaledObjectErrTemplate", scaledObjectErrTemplate) + + // wait 15 seconds to ensure event propagation + time.Sleep(15 * time.Second) + + out, outErr, err := ExecCommandOnSpecificPod(t, clientName, namespace, fmt.Sprintf("curl -X GET %s/getCloudEvent/%s", cloudEventHTTPServiceURL, "ScaledObjectCheckFailed")) + assert.NotEmpty(t, out) + assert.Empty(t, outErr) + assert.NoError(t, err, "dont expect error requesting ") + + cloudEvents := []cloudevents.Event{} + err = json.Unmarshal([]byte(out), &cloudEvents) + + assert.NoError(t, err, "dont expect error unmarshaling the cloudEvents") + + for _, cloudEvent := range cloudEvents { + assert.Condition(t, func() bool { + if cloudEvent.Subject() == expectedSubject && + cloudEvent.Time().After(lastCloudEventTime) && + cloudEvent.Type() == "keda.scaledobject.failed.v1" { + return false + } + return true + }, "get filtered event") + } + + KubectlDeleteWithTemplate(t, data, "cloudEventSourceWithExcludeTemplate", cloudEventSourceWithExcludeTemplate) + KubectlApplyWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) +} + +// tests error events in include filter +func testErrEventSourceIncludeValue(t *testing.T, _ *kubernetes.Clientset, data templateData) { + t.Log("--- test emitting eventsource about scaledobject err with include filter---") + + KubectlDeleteWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) + KubectlApplyWithTemplate(t, data, "cloudEventSourceWithIncludeTemplate", cloudEventSourceWithIncludeTemplate) + KubectlApplyWithTemplate(t, data, "scaledObjectErrTemplate", scaledObjectErrTemplate) + + // wait 15 seconds to ensure event propagation + time.Sleep(15 * time.Second) + + out, outErr, err := ExecCommandOnSpecificPod(t, clientName, namespace, fmt.Sprintf("curl -X GET %s/getCloudEvent/%s", cloudEventHTTPServiceURL, "ScaledObjectCheckFailed")) + assert.NotEmpty(t, out) + assert.Empty(t, outErr) + assert.NoError(t, err, "dont expect error requesting ") + + cloudEvents := []cloudevents.Event{} + err = json.Unmarshal([]byte(out), &cloudEvents) + + assert.NoError(t, err, "dont expect error unmarshaling the cloudEvents") + + foundEvents := []cloudevents.Event{} + for _, cloudEvent := range cloudEvents { + if cloudEvent.Subject() == expectedSubject && + cloudEvent.Time().After(lastCloudEventTime) && + cloudEvent.Type() == "keda.scaledobject.failed.v1" { + foundEvents = append(foundEvents, cloudEvent) } } assert.NotEmpty(t, foundEvents) + KubectlDeleteWithTemplate(t, data, "cloudEventSourceWithIncludeTemplate", cloudEventSourceWithIncludeTemplate) + KubectlApplyWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) +} + +// tests error event type when creation +func testErrEventSourceCreation(t *testing.T, _ *kubernetes.Clientset, data templateData) { + t.Log("--- test emitting eventsource about scaledobject err with include filter---") + + KubectlDeleteWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) + + err := KubectlApplyWithErrors(t, data, "cloudEventSourceWithErrTypeTemplate", cloudEventSourceWithErrTypeTemplate) + assert.ErrorContains(t, err, `The CloudEventSource "eventsource-test-ce-err" is invalid:`) + + err = KubectlApplyWithErrors(t, data, "cloudEventSourceWithErrTypeTemplate2", cloudEventSourceWithErrTypeTemplate2) + assert.ErrorContains(t, err, `setting included types and excluded types at the same time is not supported`) + + KubectlApplyWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) } // help function to load template data @@ -200,6 +364,8 @@ func getTemplateData() (templateData, []Template) { ScaledObject: scaledObjectName, ClientName: clientName, CloudEventSourceName: cloudeventSourceName, + CloudeventSourceErrName: cloudeventSourceErrName, + CloudeventSourceErrName2: cloudeventSourceErrName2, CloudEventHTTPReceiverName: cloudEventHTTPReceiverName, CloudEventHTTPServiceName: cloudEventHTTPServiceName, CloudEventHTTPServiceURL: cloudEventHTTPServiceURL, From adfe8677fa0298794d5de191295f492a32f7027a Mon Sep 17 00:00:00 2001 From: aliaqel-stripe <120822631+aliaqel-stripe@users.noreply.github.com> Date: Thu, 11 Apr 2024 05:31:47 -0700 Subject: [PATCH 08/10] Revert "Add GRPC Healthchecks (#5581)" (#5681) Signed-off-by: Ali Aqel --- CHANGELOG.md | 1 - cmd/operator/main.go | 2 +- pkg/metricsservice/client.go | 28 ++- pkg/metricsservice/server.go | 44 ++--- .../google.golang.org/grpc/health/client.go | 117 ------------- .../google.golang.org/grpc/health/logging.go | 23 --- .../google.golang.org/grpc/health/server.go | 163 ------------------ vendor/modules.txt | 1 - 8 files changed, 25 insertions(+), 354 deletions(-) delete mode 100644 vendor/google.golang.org/grpc/health/client.go delete mode 100644 vendor/google.golang.org/grpc/health/logging.go delete mode 100644 vendor/google.golang.org/grpc/health/server.go diff --git a/CHANGELOG.md b/CHANGELOG.md index d9429608c3c..bd1de3e38b0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,7 +67,6 @@ Here is an overview of all new **experimental** features: - **General**: Add active trigger name in ScaledObject's scale out event ([#5577](https://github.com/kedacore/keda/issues/5577)) - **General**: Add command-line flag in Adapter to allow override of gRPC Authority Header ([#5449](https://github.com/kedacore/keda/issues/5449)) - **General**: Add GRPC Client and Server metrics ([#5502](https://github.com/kedacore/keda/issues/5502)) -- **General**: Add GRPC Healthchecks ([#5590](https://github.com/kedacore/keda/issues/5590)) - **General**: Add OPENTELEMETRY flag in e2e test YAML ([#5375](https://github.com/kedacore/keda/issues/5375)) - **General**: Add support for cross tenant/cloud authentication when using Azure Workload Identity for TriggerAuthentication ([#5441](https://github.com/kedacore/keda/issues/5441)) - **General**: Support csv-format for WATCH_NAMESPACE env var ([#5670](https://github.com/kedacore/keda/issues/5670)) diff --git a/cmd/operator/main.go b/cmd/operator/main.go index db04089e0e3..945881595fb 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -300,7 +300,7 @@ func main() { close(certReady) } - grpcServer := metricsservice.NewGrpcServer(&scaledHandler, metricsServiceAddr, certDir, certReady, mgr.Elected()) + grpcServer := metricsservice.NewGrpcServer(&scaledHandler, metricsServiceAddr, certDir, certReady) if err := mgr.Add(&grpcServer); err != nil { setupLog.Error(err, "unable to set up Metrics Service gRPC server") os.Exit(1) diff --git a/pkg/metricsservice/client.go b/pkg/metricsservice/client.go index 7c01c77cebc..be7f408fb38 100644 --- a/pkg/metricsservice/client.go +++ b/pkg/metricsservice/client.go @@ -38,23 +38,17 @@ type GrpcClient struct { } func NewGrpcClient(url, certDir, authority string, clientMetrics *grpcprom.ClientMetrics) (*GrpcClient, error) { - defaultConfig := fmt.Sprintf(`{ - "methodConfig": [{ - "timeout": "3s", - "waitForReady": true, - "retryPolicy": { - "InitialBackoff": ".25s", - "MaxBackoff": "2.0s", - "BackoffMultiplier": 2, - "RetryableStatusCodes": [ "UNAVAILABLE" ] - } - }], - "loadBalancingPolicy": "round_robin", - "healthCheckConfig": { - "serviceName": "%s" - } - }`, - api.MetricsService_ServiceDesc.ServiceName) + defaultConfig := `{ + "methodConfig": [{ + "timeout": "3s", + "waitForReady": true, + "retryPolicy": { + "InitialBackoff": ".25s", + "MaxBackoff": "2.0s", + "BackoffMultiplier": 2, + "RetryableStatusCodes": [ "UNAVAILABLE" ] + } + }]}` creds, err := utils.LoadGrpcTLSCredentials(certDir, false) if err != nil { diff --git a/pkg/metricsservice/server.go b/pkg/metricsservice/server.go index 1c9a948b857..73b456e9545 100644 --- a/pkg/metricsservice/server.go +++ b/pkg/metricsservice/server.go @@ -22,8 +22,6 @@ import ( "net" "google.golang.org/grpc" - "google.golang.org/grpc/health" - "google.golang.org/grpc/health/grpc_health_v1" "k8s.io/metrics/pkg/apis/external_metrics/v1beta1" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -37,11 +35,9 @@ var log = logf.Log.WithName("grpc_server") type GrpcServer struct { server *grpc.Server - healthServer *health.Server address string certDir string certsReady chan struct{} - elected <-chan struct{} scalerHandler *scaling.ScaleHandler api.UnimplementedMetricsServiceServer } @@ -65,13 +61,12 @@ func (s *GrpcServer) GetMetrics(ctx context.Context, in *api.ScaledObjectRef) (* } // NewGrpcServer creates a new instance of GrpcServer -func NewGrpcServer(scaleHandler *scaling.ScaleHandler, address, certDir string, certsReady chan struct{}, elected <-chan struct{}) GrpcServer { +func NewGrpcServer(scaleHandler *scaling.ScaleHandler, address, certDir string, certsReady chan struct{}) GrpcServer { return GrpcServer{ address: address, scalerHandler: scaleHandler, certDir: certDir, certsReady: certsReady, - elected: elected, } } @@ -88,8 +83,8 @@ func (s *GrpcServer) startServer() error { return nil } -// StartGrpcServer starts the grpc server in non-serving mode and when the controller is elected leader -// sets the status of the server to Serving. +// Start starts a new gRPC Metrics Service, this implements Runnable interface +// of controller-runtime Manager, so we can use mgr.Add() to start this component. func (s *GrpcServer) Start(ctx context.Context) error { <-s.certsReady if s.server == nil { @@ -112,43 +107,30 @@ func (s *GrpcServer) Start(ctx context.Context) error { s.server = grpc.NewServer(grpcServerOpts...) api.RegisterMetricsServiceServer(s.server, s) - - s.healthServer = health.NewServer() - s.healthServer.SetServingStatus(api.MetricsService_ServiceDesc.ServiceName, grpc_health_v1.HealthCheckResponse_NOT_SERVING) - grpc_health_v1.RegisterHealthServer(s.server, s.healthServer) } errChan := make(chan error) go func() { log.Info("Starting Metrics Service gRPC Server", "address", s.address) - if err := s.startServer(); err != nil && err != grpc.ErrServerStopped { + if err := s.startServer(); err != nil { err := fmt.Errorf("unable to start Metrics Service gRPC server on address %s, error: %w", s.address, err) log.Error(err, "error starting Metrics Service gRPC server") errChan <- err } }() - for { - select { - case err := <-errChan: - return err - case <-ctx.Done(): - log.Info("Shutting down gRPC server") - s.healthServer.SetServingStatus(api.MetricsService_ServiceDesc.ServiceName, grpc_health_v1.HealthCheckResponse_NOT_SERVING) - s.server.GracefulStop() - return nil - case <-s.elected: - // clear the channel now that we are leader-elected - s.elected = nil - log.Info("Setting gRPC server status to Serving") - s.healthServer.SetServingStatus(api.MetricsService_ServiceDesc.ServiceName, grpc_health_v1.HealthCheckResponse_SERVING) - } + select { + case err := <-errChan: + return err + case <-ctx.Done(): + return nil } } -// We don't want to wait until LeaderElection to start the GRPC server, but we want to switch to Serving state once we are elected. -// Hence, here, we say we don't need leader election here and above we listen to the Elected channel from the manager to set the server to Serving +// NeedLeaderElection is needed to implement LeaderElectionRunnable interface +// of controller-runtime. This assures that the component is started/stoped +// when this particular instance is selected/deselected as a leader. func (s *GrpcServer) NeedLeaderElection() bool { - return false + return true } diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go deleted file mode 100644 index 740745c45f6..00000000000 --- a/vendor/google.golang.org/grpc/health/client.go +++ /dev/null @@ -1,117 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package health - -import ( - "context" - "fmt" - "io" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" - healthpb "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/status" -) - -var ( - backoffStrategy = backoff.DefaultExponential - backoffFunc = func(ctx context.Context, retries int) bool { - d := backoffStrategy.Backoff(retries) - timer := time.NewTimer(d) - select { - case <-timer.C: - return true - case <-ctx.Done(): - timer.Stop() - return false - } - } -) - -func init() { - internal.HealthCheckFunc = clientHealthCheck -} - -const healthCheckMethod = "/grpc.health.v1.Health/Watch" - -// This function implements the protocol defined at: -// https://github.com/grpc/grpc/blob/master/doc/health-checking.md -func clientHealthCheck(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), service string) error { - tryCnt := 0 - -retryConnection: - for { - // Backs off if the connection has failed in some way without receiving a message in the previous retry. - if tryCnt > 0 && !backoffFunc(ctx, tryCnt-1) { - return nil - } - tryCnt++ - - if ctx.Err() != nil { - return nil - } - setConnectivityState(connectivity.Connecting, nil) - rawS, err := newStream(healthCheckMethod) - if err != nil { - continue retryConnection - } - - s, ok := rawS.(grpc.ClientStream) - // Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes. - if !ok { - setConnectivityState(connectivity.Ready, nil) - return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS) - } - - if err = s.SendMsg(&healthpb.HealthCheckRequest{Service: service}); err != nil && err != io.EOF { - // Stream should have been closed, so we can safely continue to create a new stream. - continue retryConnection - } - s.CloseSend() - - resp := new(healthpb.HealthCheckResponse) - for { - err = s.RecvMsg(resp) - - // Reports healthy for the LBing purposes if health check is not implemented in the server. - if status.Code(err) == codes.Unimplemented { - setConnectivityState(connectivity.Ready, nil) - return err - } - - // Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED. - if err != nil { - setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but received health check RPC error: %v", err)) - continue retryConnection - } - - // As a message has been received, removes the need for backoff for the next retry by resetting the try count. - tryCnt = 0 - if resp.Status == healthpb.HealthCheckResponse_SERVING { - setConnectivityState(connectivity.Ready, nil) - } else { - setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but health check failed. status=%s", resp.Status)) - } - } - } -} diff --git a/vendor/google.golang.org/grpc/health/logging.go b/vendor/google.golang.org/grpc/health/logging.go deleted file mode 100644 index 83c6acf55ef..00000000000 --- a/vendor/google.golang.org/grpc/health/logging.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package health - -import "google.golang.org/grpc/grpclog" - -var logger = grpclog.Component("health_service") diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go deleted file mode 100644 index cce6312d77f..00000000000 --- a/vendor/google.golang.org/grpc/health/server.go +++ /dev/null @@ -1,163 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package health provides a service that exposes server's health and it must be -// imported to enable support for client-side health checks. -package health - -import ( - "context" - "sync" - - "google.golang.org/grpc/codes" - healthgrpc "google.golang.org/grpc/health/grpc_health_v1" - healthpb "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/status" -) - -// Server implements `service Health`. -type Server struct { - healthgrpc.UnimplementedHealthServer - mu sync.RWMutex - // If shutdown is true, it's expected all serving status is NOT_SERVING, and - // will stay in NOT_SERVING. - shutdown bool - // statusMap stores the serving status of the services this Server monitors. - statusMap map[string]healthpb.HealthCheckResponse_ServingStatus - updates map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus -} - -// NewServer returns a new Server. -func NewServer() *Server { - return &Server{ - statusMap: map[string]healthpb.HealthCheckResponse_ServingStatus{"": healthpb.HealthCheckResponse_SERVING}, - updates: make(map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus), - } -} - -// Check implements `service Health`. -func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { - s.mu.RLock() - defer s.mu.RUnlock() - if servingStatus, ok := s.statusMap[in.Service]; ok { - return &healthpb.HealthCheckResponse{ - Status: servingStatus, - }, nil - } - return nil, status.Error(codes.NotFound, "unknown service") -} - -// Watch implements `service Health`. -func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { - service := in.Service - // update channel is used for getting service status updates. - update := make(chan healthpb.HealthCheckResponse_ServingStatus, 1) - s.mu.Lock() - // Puts the initial status to the channel. - if servingStatus, ok := s.statusMap[service]; ok { - update <- servingStatus - } else { - update <- healthpb.HealthCheckResponse_SERVICE_UNKNOWN - } - - // Registers the update channel to the correct place in the updates map. - if _, ok := s.updates[service]; !ok { - s.updates[service] = make(map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus) - } - s.updates[service][stream] = update - defer func() { - s.mu.Lock() - delete(s.updates[service], stream) - s.mu.Unlock() - }() - s.mu.Unlock() - - var lastSentStatus healthpb.HealthCheckResponse_ServingStatus = -1 - for { - select { - // Status updated. Sends the up-to-date status to the client. - case servingStatus := <-update: - if lastSentStatus == servingStatus { - continue - } - lastSentStatus = servingStatus - err := stream.Send(&healthpb.HealthCheckResponse{Status: servingStatus}) - if err != nil { - return status.Error(codes.Canceled, "Stream has ended.") - } - // Context done. Removes the update channel from the updates map. - case <-stream.Context().Done(): - return status.Error(codes.Canceled, "Stream has ended.") - } - } -} - -// SetServingStatus is called when need to reset the serving status of a service -// or insert a new service entry into the statusMap. -func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { - s.mu.Lock() - defer s.mu.Unlock() - if s.shutdown { - logger.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus) - return - } - - s.setServingStatusLocked(service, servingStatus) -} - -func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { - s.statusMap[service] = servingStatus - for _, update := range s.updates[service] { - // Clears previous updates, that are not sent to the client, from the channel. - // This can happen if the client is not reading and the server gets flow control limited. - select { - case <-update: - default: - } - // Puts the most recent update to the channel. - update <- servingStatus - } -} - -// Shutdown sets all serving status to NOT_SERVING, and configures the server to -// ignore all future status changes. -// -// This changes serving status for all services. To set status for a particular -// services, call SetServingStatus(). -func (s *Server) Shutdown() { - s.mu.Lock() - defer s.mu.Unlock() - s.shutdown = true - for service := range s.statusMap { - s.setServingStatusLocked(service, healthpb.HealthCheckResponse_NOT_SERVING) - } -} - -// Resume sets all serving status to SERVING, and configures the server to -// accept all future status changes. -// -// This changes serving status for all services. To set status for a particular -// services, call SetServingStatus(). -func (s *Server) Resume() { - s.mu.Lock() - defer s.mu.Unlock() - s.shutdown = false - for service := range s.statusMap { - s.setServingStatusLocked(service, healthpb.HealthCheckResponse_SERVING) - } -} diff --git a/vendor/modules.txt b/vendor/modules.txt index cf666a56679..81b3b829adc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1760,7 +1760,6 @@ google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog -google.golang.org/grpc/health google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff From 80806a73218e7d128bd25945f573c2a91316d1d3 Mon Sep 17 00:00:00 2001 From: xrwang <68765051+xrwang8@users.noreply.github.com> Date: Fri, 12 Apr 2024 18:28:25 +0800 Subject: [PATCH 09/10] add `InitialCooldownPeriod` for ScaledObjects (#5478) Signed-off-by: xrwang <68765051+xrwang8@users.noreply.github.com> Signed-off-by: wangxingrui Co-authored-by: Jorge Turrado Ferrero Co-authored-by: Zbynek Roubalik --- CHANGELOG.md | 3 + apis/keda/v1alpha1/scaledobject_types.go | 2 + config/crd/bases/keda.sh_scaledobjects.yaml | 3 + pkg/scaling/executor/scale_scaledobjects.go | 12 +- .../initial_delay_cooldownperiod_test.go | 120 ++++++++++++++++++ 5 files changed, 137 insertions(+), 3 deletions(-) create mode 100644 tests/internals/initial_delay_cooldownperiod/initial_delay_cooldownperiod_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index bd1de3e38b0..f62ba0515b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,9 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio - **General**: Provide capability to filter CloudEvents ([#3533](https://github.com/kedacore/keda/issues/3533)) - **NATS Scaler**: Add TLS authentication ([#2296](https://github.com/kedacore/keda/issues/2296)) +- **ScaledObject**: Ability to specify `initialCooldownPeriod` ([#5008](https://github.com/kedacore/keda/issues/5008)) + + #### Experimental diff --git a/apis/keda/v1alpha1/scaledobject_types.go b/apis/keda/v1alpha1/scaledobject_types.go index 70853774cd4..a806c3bd25c 100644 --- a/apis/keda/v1alpha1/scaledobject_types.go +++ b/apis/keda/v1alpha1/scaledobject_types.go @@ -100,6 +100,8 @@ type ScaledObjectSpec struct { Triggers []ScaleTriggers `json:"triggers"` // +optional Fallback *Fallback `json:"fallback,omitempty"` + // +optional + InitialCooldownPeriod int32 `json:"initialCooldownPeriod,omitempty"` } // Fallback is the spec for fallback options diff --git a/config/crd/bases/keda.sh_scaledobjects.yaml b/config/crd/bases/keda.sh_scaledobjects.yaml index f103979c8b0..8f03de32b18 100644 --- a/config/crd/bases/keda.sh_scaledobjects.yaml +++ b/config/crd/bases/keda.sh_scaledobjects.yaml @@ -238,6 +238,9 @@ spec: idleReplicaCount: format: int32 type: integer + initialCooldownPeriod: + format: int32 + type: integer maxReplicaCount: format: int32 type: integer diff --git a/pkg/scaling/executor/scale_scaledobjects.go b/pkg/scaling/executor/scale_scaledobjects.go index 0fb37f89136..f7f880afcde 100644 --- a/pkg/scaling/executor/scale_scaledobjects.go +++ b/pkg/scaling/executor/scale_scaledobjects.go @@ -253,12 +253,18 @@ func (e *scaleExecutor) scaleToZeroOrIdle(ctx context.Context, logger logr.Logge cooldownPeriod = time.Second * time.Duration(defaultCooldownPeriod) } + initialCooldownPeriod := time.Second * time.Duration(scaledObject.Spec.InitialCooldownPeriod) + + // If the ScaledObject was just created,CreationTimestamp is zero, set the CreationTimestamp to now + if scaledObject.ObjectMeta.CreationTimestamp.IsZero() { + scaledObject.ObjectMeta.CreationTimestamp = metav1.NewTime(time.Now()) + } + // LastActiveTime can be nil if the ScaleTarget was scaled outside of KEDA. // In this case we will ignore the cooldown period and scale it down - if scaledObject.Status.LastActiveTime == nil || - scaledObject.Status.LastActiveTime.Add(cooldownPeriod).Before(time.Now()) { + if (scaledObject.Status.LastActiveTime == nil && scaledObject.ObjectMeta.CreationTimestamp.Add(initialCooldownPeriod).Before(time.Now())) || (scaledObject.Status.LastActiveTime != nil && + scaledObject.Status.LastActiveTime.Add(cooldownPeriod).Before(time.Now())) { // or last time a trigger was active was > cooldown period, so scale in. - idleValue, scaleToReplicas := getIdleOrMinimumReplicaCount(scaledObject) currentReplicas, err := e.updateScaleOnScaleTarget(ctx, scaledObject, scale, scaleToReplicas) diff --git a/tests/internals/initial_delay_cooldownperiod/initial_delay_cooldownperiod_test.go b/tests/internals/initial_delay_cooldownperiod/initial_delay_cooldownperiod_test.go new file mode 100644 index 00000000000..bfc7f307b95 --- /dev/null +++ b/tests/internals/initial_delay_cooldownperiod/initial_delay_cooldownperiod_test.go @@ -0,0 +1,120 @@ +//go:build e2e +// +build e2e + +package initial_delay_cooldownperiod_test + +import ( + "fmt" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + . "github.com/kedacore/keda/v2/tests/helper" +) + +const ( + testName = "initial-delay-cooldownperiod-test" +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + + now = time.Now().Local() + start = (now.Minute() + 10) % 60 + end = (start + 1) % 60 +) + +type templateData struct { + TestNamespace string + DeploymentName string + ScaledObjectName string + StartMin string + EndMin string +} + +const ( + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + deploy: {{.DeploymentName}} +spec: + replicas: 1 + selector: + matchLabels: + pod: {{.DeploymentName}} + template: + metadata: + labels: + pod: {{.DeploymentName}} + spec: + containers: + - name: nginx + image: 'nginxinc/nginx-unprivileged' +` + + scaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + cooldownPeriod: 5 + minReplicaCount: 0 + initialCooldownPeriod: 120 + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + stabilizationWindowSeconds: 15 + triggers: + - type: cron + metadata: + timezone: Etc/UTC + start: {{.StartMin}} * * * * + end: {{.EndMin}} * * * * + desiredReplicas: '0' +` +) + +func TestScaler(t *testing.T) { + // setup + t.Log("--- setting up ---") + + // Create kubernetes resources + kc := GetKubernetesClient(t) + data, templates := getTemplateData() + + CreateKubernetesResources(t, kc, testNamespace, data, templates) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), + "replica count should be %d after 1 minute", 1) + t.Log("--- Waiting for some time to ensure deployment replica count doesn't change ---") + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 1, 90) + t.Log("--- scale to 0 replicas ---") + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 120, 1), + "replica count should be %d after 2 minute", 0) // Assert that the workload is scaled to zero after the initial cooldown period + + DeleteKubernetesResources(t, testNamespace, data, templates) +} +func getTemplateData() (templateData, []Template) { + return templateData{ + TestNamespace: testNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + StartMin: strconv.Itoa(start), + EndMin: strconv.Itoa(end), + }, []Template{ + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, + } +} From bcaf5c07e785e3e58e3be4e3707b518fdef6acde Mon Sep 17 00:00:00 2001 From: June Han Date: Mon, 15 Apr 2024 21:53:28 +0900 Subject: [PATCH 10/10] Add annotation on hpa to disable ownership validation (#5536) Signed-off-by: June Han --- CHANGELOG.md | 1 + apis/keda/v1alpha1/scaledobject_types.go | 1 + apis/keda/v1alpha1/scaledobject_webhook.go | 3 +++ .../v1alpha1/scaledobject_webhook_test.go | 20 +++++++++++++++++++ 4 files changed, 25 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f62ba0515b5..bc7219db7d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -72,6 +72,7 @@ Here is an overview of all new **experimental** features: - **General**: Add GRPC Client and Server metrics ([#5502](https://github.com/kedacore/keda/issues/5502)) - **General**: Add OPENTELEMETRY flag in e2e test YAML ([#5375](https://github.com/kedacore/keda/issues/5375)) - **General**: Add support for cross tenant/cloud authentication when using Azure Workload Identity for TriggerAuthentication ([#5441](https://github.com/kedacore/keda/issues/5441)) +- **General**: Add `validations.keda.sh/hpa-ownership` annotation to HPA to disable ownership validation ([#5516](https://github.com/kedacore/keda/issues/5516)) - **General**: Support csv-format for WATCH_NAMESPACE env var ([#5670](https://github.com/kedacore/keda/issues/5670)) - **Azure Event Hub Scaler**: Remove usage of checkpoint offsets to account for SDK checkpointing implementation changes ([#5574](https://github.com/kedacore/keda/issues/5574)) - **GCP Stackdriver Scaler**: Add missing parameters 'rate' and 'count' for GCP Stackdriver Scaler alignment ([#5633](https://github.com/kedacore/keda/issues/5633)) diff --git a/apis/keda/v1alpha1/scaledobject_types.go b/apis/keda/v1alpha1/scaledobject_types.go index a806c3bd25c..bbe5c429185 100644 --- a/apis/keda/v1alpha1/scaledobject_types.go +++ b/apis/keda/v1alpha1/scaledobject_types.go @@ -53,6 +53,7 @@ type ScaledObject struct { const ScaledObjectOwnerAnnotation = "scaledobject.keda.sh/name" const ScaledObjectTransferHpaOwnershipAnnotation = "scaledobject.keda.sh/transfer-hpa-ownership" +const ValidationsHpaOwnershipAnnotation = "validations.keda.sh/hpa-ownership" const PausedReplicasAnnotation = "autoscaling.keda.sh/paused-replicas" const PausedAnnotation = "autoscaling.keda.sh/paused" diff --git a/apis/keda/v1alpha1/scaledobject_webhook.go b/apis/keda/v1alpha1/scaledobject_webhook.go index 4c8253f640c..460d5225095 100644 --- a/apis/keda/v1alpha1/scaledobject_webhook.go +++ b/apis/keda/v1alpha1/scaledobject_webhook.go @@ -211,6 +211,9 @@ func verifyHpas(incomingSo *ScaledObject, action string, _ bool) error { } for _, hpa := range hpaList.Items { + if hpa.ObjectMeta.Annotations[ValidationsHpaOwnershipAnnotation] == "false" { + continue + } val, _ := json.MarshalIndent(hpa, "", " ") scaledobjectlog.V(1).Info(fmt.Sprintf("checking hpa %s: %v", hpa.Name, string(val))) diff --git a/apis/keda/v1alpha1/scaledobject_webhook_test.go b/apis/keda/v1alpha1/scaledobject_webhook_test.go index be9640f319f..dbe898ee1f5 100644 --- a/apis/keda/v1alpha1/scaledobject_webhook_test.go +++ b/apis/keda/v1alpha1/scaledobject_webhook_test.go @@ -177,6 +177,26 @@ var _ = It("shouldn't validate the so creation when there is another unmanaged h }).ShouldNot(HaveOccurred()) }) +var _ = It("shouldn't validate the so creation when hpa has shared-ownership unactivated", func() { + + hpaName := "test-hpa-disabled-validation-by-hpa-ownership" + namespaceName := "hpa-ownership" + namespace := createNamespace(namespaceName) + hpa := createHpa(hpaName, namespaceName, workloadName, "apps/v1", "Deployment", nil) + hpa.ObjectMeta.Annotations = map[string]string{ValidationsHpaOwnershipAnnotation: "false"} + so := createScaledObject(soName, namespaceName, workloadName, "apps/v1", "Deployment", false, map[string]string{ScaledObjectTransferHpaOwnershipAnnotation: "false"}, hpaName) + + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + err = k8sClient.Create(context.Background(), hpa) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).ShouldNot(HaveOccurred()) +}) + var _ = It("shouldn't validate the so creation when there is another so", func() { so2Name := "test-so2"