Skip to content

Commit

Permalink
Merge pull request #9 from kedacore/v2
Browse files Browse the repository at this point in the history
Rebasing with V2
  • Loading branch information
chughts authored Oct 14, 2020
2 parents c498970 + 2afc068 commit 305a210
Show file tree
Hide file tree
Showing 69 changed files with 1,067 additions and 645 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/v2-build.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: master build
name: v2 build
on:
push:
branches:
Expand Down
3 changes: 2 additions & 1 deletion .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,9 @@ linters:
- misspell
- bodyclose
- unconvert
- ineffassign
- staticcheck
#- interfacer
#- ineffassign
#- scopelint
#- structcheck
- deadcode
Expand Down
4 changes: 2 additions & 2 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
- Define KEDA readiness and liveness probes ([#788](https://github.com/kedacore/keda/issues/788))
- KEDA Support for configurable scaling behavior in HPA v2beta2 ([#802](https://github.com/kedacore/keda/issues/802))
- Add External Push scaler ([#820](https://github.com/kedacore/keda/issues/820) | [docs](https://keda.sh/docs/2.0/scalers/external-push/))
- Add Standard Resource metrics to KEDA ([#874](https://github.com/kedacore/keda/pull/874))
- Managed Identity support for Azure Monitor scaler ([#936](https://github.com/kedacore/keda/issues/936))
- Add support for multiple triggers on ScaledObject ([#476](https://github.com/kedacore/keda/issues/476))
- Add consumer offset reset policy option to Kafka scaler ([#925](https://github.com/kedacore/keda/pull/925))
Expand All @@ -31,7 +30,8 @@
- Add support for multiple redis list types in redis list scaler ([#1006](https://github.com/kedacore/keda/pull/1006)) | [docs](https://keda.sh/docs/2.0/scalers/redis-lists/))
- Introduce Azure Log Analytics scaler ([#1061](https://github.com/kedacore/keda/issues/1061)) | [docs](https://keda.sh/docs/2.0/scalers/azure-log-analytics/))
- Add Metrics API Scaler ([#1026](https://github.com/kedacore/keda/pull/1026))

- Add cpu/memory Scaler ([#1215](https://github.com/kedacore/keda/pull/1215))
- Add Scaling Strategy for ScaledJob ([#1227](https://github.com/kedacore/keda/pull/1227))

### Improvements

Expand Down
3 changes: 2 additions & 1 deletion api/v1alpha1/scaledobject_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ type AdvancedConfig struct {

// HorizontalPodAutoscalerConfig specifies horizontal scale config
type HorizontalPodAutoscalerConfig struct {
ResourceMetrics []*autoscalingv2beta2.ResourceMetricSource `json:"resourceMetrics,omitempty"`
// +optional
Behavior *autoscalingv2beta2.HorizontalPodAutoscalerBehavior `json:"behavior,omitempty"`
}
Expand Down Expand Up @@ -94,6 +93,8 @@ type ScaledObjectStatus struct {
// +optional
ExternalMetricNames []string `json:"externalMetricNames,omitempty"`
// +optional
ResourceMetricNames []string `json:"resourceMetricNames,omitempty"`
// +optional
Conditions Conditions `json:"conditions,omitempty"`
}

Expand Down
12 changes: 6 additions & 6 deletions api/v1alpha1/triggerauthentication_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,11 +50,11 @@ type PodIdentityProvider string
// PodIdentityProvider<IDENTITY_PROVIDER> specifies other available Identity providers
const (
PodIdentityProviderNone PodIdentityProvider = "none"
PodIdentityProviderAzure = "azure"
PodIdentityProviderGCP = "gcp"
PodIdentityProviderSpiffe = "spiffe"
PodIdentityProviderAwsEKS = "aws-eks"
PodIdentityProviderAwsKiam = "aws-kiam"
PodIdentityProviderAzure PodIdentityProvider = "azure"
PodIdentityProviderGCP PodIdentityProvider = "gcp"
PodIdentityProviderSpiffe PodIdentityProvider = "spiffe"
PodIdentityProviderAwsEKS PodIdentityProvider = "aws-eks"
PodIdentityProviderAwsKiam PodIdentityProvider = "aws-kiam"
)

// PodIdentityAnnotationEKS specifies aws role arn for aws-eks Identity Provider
Expand Down Expand Up @@ -118,7 +118,7 @@ type VaultAuthentication string
// Client authenticating to Vault
const (
VaultAuthenticationToken VaultAuthentication = "token"
VaultAuthenticationKubernetes = "kubernetes"
VaultAuthenticationKubernetes VaultAuthentication = "kubernetes"
// VaultAuthenticationAWS = "aws"
)

Expand Down
16 changes: 5 additions & 11 deletions api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

61 changes: 4 additions & 57 deletions config/crd/bases/keda.sh_scaledobjects.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -187,63 +187,6 @@ spec:
type: integer
type: object
type: object
resourceMetrics:
items:
description: ResourceMetricSource indicates how to scale
on a resource metric known to Kubernetes, as specified
in requests and limits, describing each pod in the current
scale target (e.g. CPU or memory). The values will be
averaged together before being compared to the target. Such
metrics are built in to Kubernetes, and have special scaling
options on top of those available to normal per-pod metrics
using the "pods" source. Only one "target" type should
be set.
properties:
name:
description: name is the name of the resource in question.
type: string
target:
description: target specifies the target value for the
given metric
properties:
averageUtilization:
description: averageUtilization is the target value
of the average of the resource metric across all
relevant pods, represented as a percentage of
the requested value of the resource for the pods.
Currently only valid for Resource metric source
type
format: int32
type: integer
averageValue:
anyOf:
- type: integer
- type: string
description: averageValue is the target value of
the average of the metric across all relevant
pods (as a quantity)
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
type:
description: type represents whether the metric
type is Utilization, Value, or AverageValue
type: string
value:
anyOf:
- type: integer
- type: string
description: value is the target value of the metric
(as a quantity).
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
required:
- type
type: object
required:
- name
- target
type: object
type: array
type: object
restoreToOriginalReplicaCount:
type: boolean
Expand Down Expand Up @@ -342,6 +285,10 @@ spec:
originalReplicaCount:
format: int32
type: integer
resourceMetricNames:
items:
type: string
type: array
scaleTargetGVKR:
description: GroupVersionKindResource provides unified structure for
schema.GroupVersionKind and Resource
Expand Down
35 changes: 12 additions & 23 deletions controllers/hpa.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,35 +135,36 @@ func (r *ScaledObjectReconciler) updateHPAIfNeeded(logger logr.Logger, scaledObj
func (r *ScaledObjectReconciler) getScaledObjectMetricSpecs(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) ([]autoscalingv2beta2.MetricSpec, error) {
var scaledObjectMetricSpecs []autoscalingv2beta2.MetricSpec
var externalMetricNames []string
var resourceMetricNames []string

scalers, err := r.scaleHandler.GetScalers(scaledObject)
if err != nil {
logger.Error(err, "Error getting scalers")
return nil, err
}

// Handling the Resource metrics through KEDA
if scaledObject.Spec.Advanced != nil && scaledObject.Spec.Advanced.HorizontalPodAutoscalerConfig != nil {
metrics := getResourceMetrics(scaledObject.Spec.Advanced.HorizontalPodAutoscalerConfig.ResourceMetrics)
scaledObjectMetricSpecs = append(scaledObjectMetricSpecs, metrics...)
}

for _, scaler := range scalers {
metricSpecs := scaler.GetMetricSpecForScaling()

// add the scaledObjectName label. This is how the MetricsAdapter will know which scaledobject a metric is for when the HPA queries it.
for _, metricSpec := range metricSpecs {
metricSpec.External.Metric.Selector = &metav1.LabelSelector{MatchLabels: make(map[string]string)}
metricSpec.External.Metric.Selector.MatchLabels["scaledObjectName"] = scaledObject.Name
externalMetricNames = append(externalMetricNames, metricSpec.External.Metric.Name)
if metricSpec.Resource != nil {
resourceMetricNames = append(resourceMetricNames, string(metricSpec.Resource.Name))
}
if metricSpec.External != nil {
// add the scaledObjectName label. This is how the MetricsAdapter will know which scaledobject a metric is for when the HPA queries it.
metricSpec.External.Metric.Selector = &metav1.LabelSelector{MatchLabels: make(map[string]string)}
metricSpec.External.Metric.Selector.MatchLabels["scaledObjectName"] = scaledObject.Name
externalMetricNames = append(externalMetricNames, metricSpec.External.Metric.Name)
}
}
scaledObjectMetricSpecs = append(scaledObjectMetricSpecs, metricSpecs...)
scaler.Close()
}

// store External.MetricNames used by scalers defined in the ScaledObject
// store External.MetricNames,Resource.MetricsNames used by scalers defined in the ScaledObject
status := scaledObject.Status.DeepCopy()
status.ExternalMetricNames = externalMetricNames
status.ResourceMetricNames = resourceMetricNames
err = kedacontrollerutil.UpdateScaledObjectStatus(r.Client, logger, scaledObject, status)
if err != nil {
logger.Error(err, "Error updating scaledObject status with used externalMetricNames")
Expand All @@ -173,18 +174,6 @@ func (r *ScaledObjectReconciler) getScaledObjectMetricSpecs(logger logr.Logger,
return scaledObjectMetricSpecs, nil
}

func getResourceMetrics(resourceMetrics []*autoscalingv2beta2.ResourceMetricSource) []autoscalingv2beta2.MetricSpec {
metrics := make([]autoscalingv2beta2.MetricSpec, 0, len(resourceMetrics))
for _, resourceMetric := range resourceMetrics {
metrics = append(metrics, autoscalingv2beta2.MetricSpec{
Type: "Resource",
Resource: resourceMetric,
})
}

return metrics
}

// checkMinK8sVersionforHPABehavior min version (k8s v1.18) for HPA Behavior
func (r *ScaledObjectReconciler) checkMinK8sVersionforHPABehavior(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) {
if r.kubeVersion.MinorVersion < 18 {
Expand Down
43 changes: 21 additions & 22 deletions pkg/scalers/artemis_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,8 @@ const (
var artemisLog = logf.Log.WithName("artemis_queue_scaler")

// NewArtemisQueueScaler creates a new artemis queue Scaler
func NewArtemisQueueScaler(resolvedSecrets, metadata, authParams map[string]string) (Scaler, error) {
artemisMetadata, err := parseArtemisMetadata(resolvedSecrets, metadata, authParams)
func NewArtemisQueueScaler(config *ScalerConfig) (Scaler, error) {
artemisMetadata, err := parseArtemisMetadata(config)
if err != nil {
return nil, fmt.Errorf("error parsing artemis metadata: %s", err)
}
Expand All @@ -65,38 +65,38 @@ func NewArtemisQueueScaler(resolvedSecrets, metadata, authParams map[string]stri
}, nil
}

func parseArtemisMetadata(resolvedEnv, metadata, authParams map[string]string) (*artemisMetadata, error) {
func parseArtemisMetadata(config *ScalerConfig) (*artemisMetadata, error) {
meta := artemisMetadata{}

meta.queueLength = defaultArtemisQueueLength

if val, ok := metadata["restApiTemplate"]; ok && val != "" {
meta.restAPITemplate = metadata["restApiTemplate"]
if val, ok := config.TriggerMetadata["restApiTemplate"]; ok && val != "" {
meta.restAPITemplate = config.TriggerMetadata["restApiTemplate"]
} else {
meta.restAPITemplate = defaultRestAPITemplate
}

if metadata["managementEndpoint"] == "" {
if config.TriggerMetadata["managementEndpoint"] == "" {
return nil, errors.New("no management endpoint given")
}
meta.managementEndpoint = metadata["managementEndpoint"]
meta.managementEndpoint = config.TriggerMetadata["managementEndpoint"]

if metadata["queueName"] == "" {
if config.TriggerMetadata["queueName"] == "" {
return nil, errors.New("no queue name given")
}
meta.queueName = metadata["queueName"]
meta.queueName = config.TriggerMetadata["queueName"]

if metadata["brokerName"] == "" {
if config.TriggerMetadata["brokerName"] == "" {
return nil, errors.New("no broker name given")
}
meta.brokerName = metadata["brokerName"]
meta.brokerName = config.TriggerMetadata["brokerName"]

if metadata["brokerAddress"] == "" {
if config.TriggerMetadata["brokerAddress"] == "" {
return nil, errors.New("no broker address given")
}
meta.brokerAddress = metadata["brokerAddress"]
meta.brokerAddress = config.TriggerMetadata["brokerAddress"]

if val, ok := metadata["queueLength"]; ok {
if val, ok := config.TriggerMetadata["queueLength"]; ok {
queueLength, err := strconv.Atoi(val)
if err != nil {
return nil, fmt.Errorf("can't parse queueLength: %s", err)
Expand All @@ -105,12 +105,12 @@ func parseArtemisMetadata(resolvedEnv, metadata, authParams map[string]string) (
meta.queueLength = queueLength
}

if val, ok := authParams["username"]; ok && val != "" {
if val, ok := config.AuthParams["username"]; ok && val != "" {
meta.username = val
} else if val, ok := metadata["username"]; ok && val != "" {
} else if val, ok := config.TriggerMetadata["username"]; ok && val != "" {
username := val

if val, ok := resolvedEnv[username]; ok && val != "" {
if val, ok := config.ResolvedEnv[username]; ok && val != "" {
meta.username = val
} else {
meta.username = username
Expand All @@ -121,12 +121,12 @@ func parseArtemisMetadata(resolvedEnv, metadata, authParams map[string]string) (
return nil, fmt.Errorf("username cannot be empty")
}

if val, ok := authParams["password"]; ok && val != "" {
if val, ok := config.AuthParams["password"]; ok && val != "" {
meta.password = val
} else if val, ok := metadata["password"]; ok && val != "" {
} else if val, ok := config.TriggerMetadata["password"]; ok && val != "" {
password := val

if val, ok := resolvedEnv[password]; ok && val != "" {
if val, ok := config.ResolvedEnv[password]; ok && val != "" {
meta.password = val
} else {
meta.password = password
Expand Down Expand Up @@ -162,9 +162,8 @@ func (s *artemisScaler) getMonitoringEndpoint() string {
}

func (s *artemisScaler) getQueueMessageCount() (int, error) {
var messageCount int
var monitoringInfo *artemisMonitoring
messageCount = 0
messageCount := 0

client := &http.Client{
Timeout: time.Second * 3,
Expand Down
8 changes: 4 additions & 4 deletions pkg/scalers/artemis_scaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ var testArtemisMetadataWithAuthParams = []parseArtemisMetadataTestData{

func TestArtemisParseMetadata(t *testing.T) {
for _, testData := range testArtemisMetadata {
_, err := parseArtemisMetadata(sampleArtemisResolvedEnv, testData.metadata, nil)
_, err := parseArtemisMetadata(&ScalerConfig{ResolvedEnv: sampleArtemisResolvedEnv, TriggerMetadata: testData.metadata, AuthParams: nil})
if err != nil && !testData.isError {
t.Error("Expected success but got error", err)
}
Expand All @@ -90,7 +90,7 @@ func TestArtemisParseMetadata(t *testing.T) {

// test with missing auth params should all fail
for _, testData := range testArtemisMetadataWithEmptyAuthParams {
_, err := parseArtemisMetadata(sampleArtemisResolvedEnv, testData.metadata, emptyArtemisAuthParams)
_, err := parseArtemisMetadata(&ScalerConfig{ResolvedEnv: sampleArtemisResolvedEnv, TriggerMetadata: testData.metadata, AuthParams: emptyArtemisAuthParams})
if err != nil && !testData.isError {
t.Error("Expected success but got error", err)
}
Expand All @@ -101,7 +101,7 @@ func TestArtemisParseMetadata(t *testing.T) {

// test with complete auth params should not fail
for _, testData := range testArtemisMetadataWithAuthParams {
_, err := parseArtemisMetadata(sampleArtemisResolvedEnv, testData.metadata, artemisAuthParams)
_, err := parseArtemisMetadata(&ScalerConfig{ResolvedEnv: sampleArtemisResolvedEnv, TriggerMetadata: testData.metadata, AuthParams: artemisAuthParams})
if err != nil && !testData.isError {
t.Error("Expected success but got error", err)
}
Expand All @@ -113,7 +113,7 @@ func TestArtemisParseMetadata(t *testing.T) {

func TestArtemisGetMetricSpecForScaling(t *testing.T) {
for _, testData := range artemisMetricIdentifiers {
meta, err := parseArtemisMetadata(sampleArtemisResolvedEnv, testData.metadataTestData.metadata, nil)
meta, err := parseArtemisMetadata(&ScalerConfig{ResolvedEnv: sampleArtemisResolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: nil})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
Expand Down
Loading

0 comments on commit 305a210

Please sign in to comment.