Skip to content

Commit

Permalink
Merge branch 'main' into clean_metrics
Browse files Browse the repository at this point in the history
  • Loading branch information
Jorge Turrado Ferrero authored Nov 10, 2021
2 parents 8ffaa4e + cc95674 commit f75ebaf
Show file tree
Hide file tree
Showing 33 changed files with 991 additions and 517 deletions.
1 change: 0 additions & 1 deletion .github/workflows/pr-e2e.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ name: pr-e2e-tests
on:
issue_comment:
types: [created]

jobs:
check:
runs-on: ubuntu-latest
Expand Down
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,11 @@
- Add Cassandra Scaler ([#2211](https://github.com/kedacore/keda/pull/2211))
- Improve Redis Scaler, upgrade library, add username and Sentinel support ([#2181](https://github.com/kedacore/keda/pull/2181))
- Add GCP identity authentication when using Pubsub Scaler ([#2225](https://github.com/kedacore/keda/pull/2225))
- Add ScalersCache to reuse scalers unless they need changing ([#2187](https://github.com/kedacore/keda/pull/2187))

### Improvements

- Improve context handling in appropriate functionality in which we instantiate scalers ([#2267](https://github.com/kedacore/keda/pull/2267))
- Improve validation in Cron scaler in case start & end input is same.([#2032](https://github.com/kedacore/keda/pull/2032))
- Improve the cron validation in Cron Scaler ([#2038](https://github.com/kedacore/keda/pull/2038))
- Add Bearer auth for Metrics API scaler ([#2028](https://github.com/kedacore/keda/pull/2028))
Expand All @@ -53,6 +55,7 @@
- Improve Cloudwatch Scaler metric exporting logic ([#2243](https://github.com/kedacore/keda/pull/2243))
- Refactor aws related scalers to reuse the aws clients instead of creating a new one for every GetMetrics call([#2255](https://github.com/kedacore/keda/pull/2255))
- Cleanup metric names inside scalers ([#2260](https://github.com/kedacore/keda/pull/2260))
- Validating values length in prometheus query response ([#2264](https://github.com/kedacore/keda/pull/2264))

### Breaking Changes

Expand Down
2 changes: 1 addition & 1 deletion CREATE-NEW-SCALER.md
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ The constructor should have the following parameters:
## Lifecycle of a scaler
The scaler is created and closed everytime KEDA or HPA wants to call `GetMetrics`, and everytime a new ScaledObject is created or updated that has a trigger for that scaler. Thus, a developer of a scaler should not assume that the scaler will maintain any state between these calls.
Scalers are created and cached until the ScaledObject is modified, or `.IsActive()`/`GetMetrics()` result in an error. The cached scaler is then invalidated and a new scaler is created. `Close()` is called on all scalers when disposed.
## Note
The scaler code is embedded into the two separate binaries comprising KEDA, the operator and the custom metrics server component. The metrics server must be occasionally rebuilt published and deployed to k8s for it to have the same code as your operator.
Expand Down
55 changes: 45 additions & 10 deletions adapter/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,18 @@ limitations under the License.
package main

import (
"context"
"flag"
"fmt"
"os"
"runtime"
"strconv"
"time"

"github.com/go-logr/logr"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
k8sruntime "k8s.io/apimachinery/pkg/runtime"
openapinamer "k8s.io/apiserver/pkg/endpoints/openapi"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/client-go/kubernetes/scheme"
Expand All @@ -36,12 +38,14 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/controller"

basecmd "sigs.k8s.io/custom-metrics-apiserver/pkg/cmd"
"sigs.k8s.io/custom-metrics-apiserver/pkg/provider"

generatedopenapi "github.com/kedacore/keda/v2/adapter/generated/openapi"
kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1"
kedacontrollers "github.com/kedacore/keda/v2/controllers/keda"
prommetrics "github.com/kedacore/keda/v2/pkg/metrics"
kedaprovider "github.com/kedacore/keda/v2/pkg/provider"
"github.com/kedacore/keda/v2/pkg/scaling"
Expand All @@ -65,7 +69,7 @@ var (
adapterClientRequestBurst int
)

func (a *Adapter) makeProvider(globalHTTPTimeout time.Duration) (provider.MetricsProvider, error) {
func (a *Adapter) makeProvider(ctx context.Context, globalHTTPTimeout time.Duration) (provider.MetricsProvider, <-chan struct{}, error) {
// Get a config to talk to the apiserver
cfg, err := config.GetConfig()
if cfg != nil {
Expand All @@ -75,25 +79,25 @@ func (a *Adapter) makeProvider(globalHTTPTimeout time.Duration) (provider.Metric

if err != nil {
logger.Error(err, "failed to get the config")
return nil, fmt.Errorf("failed to get the config (%s)", err)
return nil, nil, fmt.Errorf("failed to get the config (%s)", err)
}

scheme := scheme.Scheme
if err := appsv1.SchemeBuilder.AddToScheme(scheme); err != nil {
logger.Error(err, "failed to add apps/v1 scheme to runtime scheme")
return nil, fmt.Errorf("failed to add apps/v1 scheme to runtime scheme (%s)", err)
return nil, nil, fmt.Errorf("failed to add apps/v1 scheme to runtime scheme (%s)", err)
}
if err := kedav1alpha1.SchemeBuilder.AddToScheme(scheme); err != nil {
logger.Error(err, "failed to add keda scheme to runtime scheme")
return nil, fmt.Errorf("failed to add keda scheme to runtime scheme (%s)", err)
return nil, nil, fmt.Errorf("failed to add keda scheme to runtime scheme (%s)", err)
}

kubeclient, err := client.New(cfg, client.Options{
Scheme: scheme,
})
if err != nil {
logger.Error(err, "unable to construct new client")
return nil, fmt.Errorf("unable to construct new client (%s)", err)
return nil, nil, fmt.Errorf("unable to construct new client (%s)", err)
}

broadcaster := record.NewBroadcaster()
Expand All @@ -103,13 +107,43 @@ func (a *Adapter) makeProvider(globalHTTPTimeout time.Duration) (provider.Metric
namespace, err := getWatchNamespace()
if err != nil {
logger.Error(err, "failed to get watch namespace")
return nil, fmt.Errorf("failed to get watch namespace (%s)", err)
return nil, nil, fmt.Errorf("failed to get watch namespace (%s)", err)
}

prometheusServer := &prommetrics.PrometheusMetricServer{}
go func() { prometheusServer.NewServer(fmt.Sprintf(":%v", prometheusMetricsPort), prometheusMetricsPath) }()
stopCh := make(chan struct{})
if err := runScaledObjectController(ctx, scheme, namespace, handler, logger, stopCh); err != nil {
return nil, nil, err
}

return kedaprovider.NewProvider(ctx, logger, handler, kubeclient, namespace), stopCh, nil
}

func runScaledObjectController(ctx context.Context, scheme *k8sruntime.Scheme, namespace string, scaleHandler scaling.ScaleHandler, logger logr.Logger, stopCh chan<- struct{}) error {
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
Namespace: namespace,
})
if err != nil {
return err
}

if err := (&kedacontrollers.MetricsScaledObjectReconciler{
ScaleHandler: scaleHandler,
}).SetupWithManager(mgr, controller.Options{}); err != nil {
return err
}

go func() {
if err := mgr.Start(ctx); err != nil {
logger.Error(err, "controller-runtime encountered an error")
stopCh <- struct{}{}
close(stopCh)
}
}()

return kedaprovider.NewProvider(logger, handler, kubeclient, namespace), nil
return nil
}

func printVersion() {
Expand All @@ -130,6 +164,7 @@ func getWatchNamespace() (string, error) {
}

func main() {
ctx := ctrl.SetupSignalHandler()
var err error
defer func() {
if err != nil {
Expand Down Expand Up @@ -171,15 +206,15 @@ func main() {
return
}

kedaProvider, err := cmd.makeProvider(time.Duration(globalHTTPTimeoutMS) * time.Millisecond)
kedaProvider, stopCh, err := cmd.makeProvider(ctx, time.Duration(globalHTTPTimeoutMS)*time.Millisecond)
if err != nil {
logger.Error(err, "making provider")
return
}
cmd.WithExternalMetrics(kedaProvider)

logger.Info(cmd.Message)
if err = cmd.Run(wait.NeverStop); err != nil {
if err = cmd.Run(stopCh); err != nil {
return
}
}
4 changes: 3 additions & 1 deletion config/crd/bases/keda.sh_clustertriggerauthentications.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.4.1
controller-gen.kubebuilder.io/version: v0.6.1
creationTimestamp: null
name: clustertriggerauthentications.keda.sh
spec:
Expand Down Expand Up @@ -90,6 +90,8 @@ spec:
type: object
mount:
type: string
namespace:
type: string
role:
type: string
secrets:
Expand Down
2 changes: 1 addition & 1 deletion config/crd/bases/keda.sh_scaledobjects.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.4.1
controller-gen.kubebuilder.io/version: v0.6.1
creationTimestamp: null
name: scaledobjects.keda.sh
spec:
Expand Down
35 changes: 16 additions & 19 deletions controllers/keda/hpa.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,35 +160,32 @@ func (r *ScaledObjectReconciler) getScaledObjectMetricSpecs(ctx context.Context,
var externalMetricNames []string
var resourceMetricNames []string

scalers, err := r.scaleHandler.GetScalers(ctx, scaledObject)
cache, err := r.scaleHandler.GetScalersCache(ctx, scaledObject)
if err != nil {
logger.Error(err, "Error getting scalers")
return nil, err
}

for _, scaler := range scalers {
metricSpecs := scaler.GetMetricSpecForScaling(ctx)
metricSpecs := cache.GetMetricSpecForScaling(ctx)

for _, metricSpec := range metricSpecs {
if metricSpec.Resource != nil {
resourceMetricNames = append(resourceMetricNames, string(metricSpec.Resource.Name))
}

if metricSpec.External != nil {
externalMetricName := metricSpec.External.Metric.Name
if kedacontrollerutil.Contains(externalMetricNames, externalMetricName) {
return nil, fmt.Errorf("metricName %s defined multiple times in ScaledObject %s, please refer the documentation how to define metricName manually", externalMetricName, scaledObject.Name)
}
for _, metricSpec := range metricSpecs {
if metricSpec.Resource != nil {
resourceMetricNames = append(resourceMetricNames, string(metricSpec.Resource.Name))
}

// add the scaledobject.keda.sh/name label. This is how the MetricsAdapter will know which scaledobject a metric is for when the HPA queries it.
metricSpec.External.Metric.Selector = &metav1.LabelSelector{MatchLabels: make(map[string]string)}
metricSpec.External.Metric.Selector.MatchLabels["scaledobject.keda.sh/name"] = scaledObject.Name
externalMetricNames = append(externalMetricNames, externalMetricName)
if metricSpec.External != nil {
externalMetricName := metricSpec.External.Metric.Name
if kedacontrollerutil.Contains(externalMetricNames, externalMetricName) {
return nil, fmt.Errorf("metricName %s defined multiple times in ScaledObject %s, please refer the documentation how to define metricName manually", externalMetricName, scaledObject.Name)
}

// add the scaledobject.keda.sh/name label. This is how the MetricsAdapter will know which scaledobject a metric is for when the HPA queries it.
metricSpec.External.Metric.Selector = &metav1.LabelSelector{MatchLabels: make(map[string]string)}
metricSpec.External.Metric.Selector.MatchLabels["scaledobject.keda.sh/name"] = scaledObject.Name
externalMetricNames = append(externalMetricNames, externalMetricName)
}
scaledObjectMetricSpecs = append(scaledObjectMetricSpecs, metricSpecs...)
scaler.Close(ctx)
}
scaledObjectMetricSpecs = append(scaledObjectMetricSpecs, metricSpecs...)

// sort metrics in ScaledObject, this way we always check the same resource in Reconcile loop and we can prevent unnecessary HPA updates,
// see https://github.com/kedacore/keda/issues/1531 for details
Expand Down
17 changes: 13 additions & 4 deletions controllers/keda/hpa_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ import (
"github.com/kedacore/keda/v2/pkg/mock/mock_client"
mock_scalers "github.com/kedacore/keda/v2/pkg/mock/mock_scaler"
"github.com/kedacore/keda/v2/pkg/mock/mock_scaling"
kedascalers "github.com/kedacore/keda/v2/pkg/scalers"
"github.com/kedacore/keda/v2/pkg/scalers"
"github.com/kedacore/keda/v2/pkg/scaling/cache"
)

var _ = Describe("hpa", func() {
Expand Down Expand Up @@ -129,7 +130,16 @@ func setupTest(health map[string]v1alpha1.HealthStatus, scaler *mock_scalers.Moc
},
}

scalers := []kedascalers.Scaler{scaler}
scalersCache := cache.ScalersCache{
Scalers: []cache.ScalerBuilder{{
Scaler: scaler,
Factory: func() (scalers.Scaler, error) {
return scaler, nil
},
}},
Logger: nil,
Recorder: nil,
}
metricSpec := v2beta2.MetricSpec{
External: &v2beta2.ExternalMetricSource{
Metric: v2beta2.MetricIdentifier{
Expand All @@ -140,8 +150,7 @@ func setupTest(health map[string]v1alpha1.HealthStatus, scaler *mock_scalers.Moc
metricSpecs := []v2beta2.MetricSpec{metricSpec}
ctx := context.Background()
scaler.EXPECT().GetMetricSpecForScaling(ctx).Return(metricSpecs)
scaler.EXPECT().Close(ctx)
scaleHandler.EXPECT().GetScalers(context.Background(), gomock.Eq(scaledObject)).Return(scalers, nil)
scaleHandler.EXPECT().GetScalersCache(context.Background(), gomock.Eq(scaledObject)).Return(&scalersCache, nil)

return scaledObject
}
45 changes: 45 additions & 0 deletions controllers/keda/metrics_adapter_controller.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
/*
Copyright 2021 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package keda

import (
"context"

kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1"
"github.com/kedacore/keda/v2/pkg/scaling"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/predicate"
)

type MetricsScaledObjectReconciler struct {
ScaleHandler scaling.ScaleHandler
}

func (r *MetricsScaledObjectReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
r.ScaleHandler.ClearScalersCache(ctx, req.Name, req.Namespace)
return ctrl.Result{}, nil
}

func (r *MetricsScaledObjectReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error {
return ctrl.NewControllerManagedBy(mgr).
For(&kedav1alpha1.ScaledObject{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})).
Owns(&kedav1alpha1.ScaledObject{}).
WithOptions(options).
Complete(r)
}
12 changes: 6 additions & 6 deletions controllers/keda/scaledjob_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,14 +140,14 @@ func (r *ScaledJobReconciler) reconcileScaledJob(ctx context.Context, logger log
}

// Check ScaledJob is Ready or not
_, err = r.scaleHandler.GetScalers(ctx, scaledJob)
_, err = r.scaleHandler.GetScalersCache(ctx, scaledJob)
if err != nil {
logger.Error(err, "Error getting scalers")
return "Failed to ensure ScaledJob is correctly created", err
}

// scaledJob was created or modified - let's start a new ScaleLoop
err = r.requestScaleLoop(logger, scaledJob)
err = r.requestScaleLoop(ctx, logger, scaledJob)
if err != nil {
return "Failed to start a new scale loop with scaling logic", err
}
Expand Down Expand Up @@ -187,13 +187,13 @@ func (r *ScaledJobReconciler) deletePreviousVersionScaleJobs(ctx context.Context
}

// requestScaleLoop request ScaleLoop handler for the respective ScaledJob
func (r *ScaledJobReconciler) requestScaleLoop(logger logr.Logger, scaledJob *kedav1alpha1.ScaledJob) error {
func (r *ScaledJobReconciler) requestScaleLoop(ctx context.Context, logger logr.Logger, scaledJob *kedav1alpha1.ScaledJob) error {
logger.V(1).Info("Starting a new ScaleLoop")
return r.scaleHandler.HandleScalableObject(scaledJob)
return r.scaleHandler.HandleScalableObject(ctx, scaledJob)
}

// stopScaleLoop stops ScaleLoop handler for the respective ScaledJob
func (r *ScaledJobReconciler) stopScaleLoop(logger logr.Logger, scaledJob *kedav1alpha1.ScaledJob) error {
func (r *ScaledJobReconciler) stopScaleLoop(ctx context.Context, logger logr.Logger, scaledJob *kedav1alpha1.ScaledJob) error {
logger.V(1).Info("Stopping a ScaleLoop")
return r.scaleHandler.DeleteScalableObject(scaledJob)
return r.scaleHandler.DeleteScalableObject(ctx, scaledJob)
}
2 changes: 1 addition & 1 deletion controllers/keda/scaledjob_finalizer.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ func (r *ScaledJobReconciler) finalizeScaledJob(ctx context.Context, logger logr
// Run finalization logic for scaledJobFinalizer. If the
// finalization logic fails, don't remove the finalizer so
// that we can retry during the next reconciliation.
if err := r.stopScaleLoop(logger, scaledJob); err != nil {
if err := r.stopScaleLoop(ctx, logger, scaledJob); err != nil {
return err
}

Expand Down
Loading

0 comments on commit f75ebaf

Please sign in to comment.