Skip to content

Commit

Permalink
Smarter Zarf Registry HPA behavior (#1317)
Browse files Browse the repository at this point in the history
Make Registry HPA less aggressive and block from downscaling while
running `zarf package deploy` if images exist.

## Type of change

- [x] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Other (security config, docs update, etc)

## Checklist before merging

- [x] Test, docs, adr added or updated as needed
- [x] [Contributor Guide
Steps](https://github.com/defenseunicorns/zarf/blob/main/CONTRIBUTING.md#developer-workflow)
followed
  • Loading branch information
jeff-mccoy authored Feb 3, 2023
1 parent 032b8fb commit 6df91da
Show file tree
Hide file tree
Showing 9 changed files with 234 additions and 115 deletions.
27 changes: 25 additions & 2 deletions packages/zarf-registry/chart/templates/hpa.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v1
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ template "docker-registry.fullname" . }}
Expand All @@ -15,5 +15,28 @@ spec:
name: {{ template "docker-registry.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
targetCPUUtilizationPercentage: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
behavior:
scaleDown:
# Use 60 second stabilization window becuase zarf will freeze scale down during deploys
stabilizationWindowSeconds: 60
# Scale down one pod per minute
policies:
- type: Pods
value: 1
periodSeconds: 60
scaleUp:
# Delay initial checks by 30 seconds
stabilizationWindowSeconds: 30
# Scale up one pod per minute
policies:
- type: Pods
value: 1
periodSeconds: 60
{{- end }}
6 changes: 6 additions & 0 deletions packages/zarf-registry/registry-values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,9 @@ fullnameOverride: "zarf-docker-registry"

podLabels:
zarf.dev/agent: "ignore"

autoscaling:
enabled: ###ZARF_VAR_REGISTRY_HPA_ENABLE###
minReplicas: "###ZARF_VAR_REGISTRY_HPA_MIN###"
maxReplicas: "###ZARF_VAR_REGISTRY_HPA_MAX###"
targetCPUUtilizationPercentage: 80
12 changes: 12 additions & 0 deletions packages/zarf-registry/zarf.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,18 @@ variables:
description: "The memory limit for the registry"
default: "2Gi"

- name: REGISTRY_HPA_MIN
description: "The minimum number of registry replicas"
default: "1"

- name: REGISTRY_HPA_MAX
description: "The maximum number of registry replicas"
default: "5"

- name: REGISTRY_HPA_ENABLE
description: "Enable the Horizontal Pod Autoscaler for the registry"
default: "true"

components:
- name: zarf-seed-registry
description: |
Expand Down
21 changes: 19 additions & 2 deletions src/internal/cluster/injector.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ import (
// The chunk size for the tarball chunks.
var payloadChunkSize = 1024 * 768

// RunInjectionMadness initializes a Zarf injection into the cluster.
func (c *Cluster) RunInjectionMadness(tempPath types.TempPaths) {
// StartInjectionMadness initializes a Zarf injection into the cluster.
func (c *Cluster) StartInjectionMadness(tempPath types.TempPaths) {
message.Debugf("packager.runInjectionMadness(%#v)", tempPath)

spinner := message.NewProgressSpinner("Attempting to bootstrap the seed image into the cluster")
Expand Down Expand Up @@ -104,6 +104,23 @@ func (c *Cluster) RunInjectionMadness(tempPath types.TempPaths) {
spinner.Fatalf(nil, "Unable to perform the injection")
}

// StopInjectionMadness handles cleanup once the seed registry is up.
func (c *Cluster) StopInjectionMadness() error {
// Try to kill the injector pod now
if err := c.Kube.DeletePod(ZarfNamespace, "injector"); err != nil {
return err
}

// Remove the configmaps
labelMatch := map[string]string{"zarf-injector": "payload"}
if err := c.Kube.DeleteConfigMapsByLabel(ZarfNamespace, labelMatch); err != nil {
return err
}

// Remove the injector service
return c.Kube.DeleteService(ZarfNamespace, "zarf-injector")
}

func (c *Cluster) createPayloadConfigmaps(tempPath types.TempPaths, spinner *message.Spinner) ([]string, string, error) {
message.Debugf("packager.tryInjectorPayloadDeploy(%#v)", tempPath)
var configMaps []string
Expand Down
107 changes: 0 additions & 107 deletions src/internal/cluster/seed.go

This file was deleted.

74 changes: 74 additions & 0 deletions src/internal/cluster/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -202,3 +202,77 @@ func (c *Cluster) SaveZarfState(state types.ZarfState) error {

return nil
}

func (c *Cluster) fillInEmptyContainerRegistryValues(containerRegistry types.RegistryInfo) types.RegistryInfo {
// Set default NodePort if none was provided
if containerRegistry.NodePort == 0 {
containerRegistry.NodePort = config.ZarfInClusterContainerRegistryNodePort
}

// Set default url if an external registry was not provided
if containerRegistry.Address == "" {
containerRegistry.InternalRegistry = true
containerRegistry.Address = fmt.Sprintf("%s:%d", config.IPV4Localhost, containerRegistry.NodePort)
}

// Generate a push-user password if not provided by init flag
if containerRegistry.PushPassword == "" {
containerRegistry.PushPassword = utils.RandomString(config.ZarfGeneratedPasswordLen)
}

// Set pull-username if not provided by init flag
if containerRegistry.PullUsername == "" {
if containerRegistry.InternalRegistry {
containerRegistry.PullUsername = config.ZarfRegistryPullUser
} else {
// If this is an external registry and a pull-user wasn't provided, use the same credentials as the push user
containerRegistry.PullUsername = containerRegistry.PushUsername
}
}
if containerRegistry.PullPassword == "" {
if containerRegistry.InternalRegistry {
containerRegistry.PullPassword = utils.RandomString(config.ZarfGeneratedPasswordLen)
} else {
// If this is an external registry and a pull-user wasn't provided, use the same credentials as the push user
containerRegistry.PullPassword = containerRegistry.PushPassword
}
}

if containerRegistry.Secret == "" {
containerRegistry.Secret = utils.RandomString(config.ZarfGeneratedSecretLen)
}

return containerRegistry
}

// Fill in empty GitServerInfo values with the defaults.
func (c *Cluster) fillInEmptyGitServerValues(gitServer types.GitServerInfo) types.GitServerInfo {
// Set default svc url if an external repository was not provided
if gitServer.Address == "" {
gitServer.Address = config.ZarfInClusterGitServiceURL
gitServer.InternalServer = true
}

// Generate a push-user password if not provided by init flag
if gitServer.PushPassword == "" {
gitServer.PushPassword = utils.RandomString(config.ZarfGeneratedPasswordLen)
}

// Set read-user information if using an internal repository, otherwise copy from the push-user
if gitServer.PullUsername == "" {
if gitServer.InternalServer {
gitServer.PullUsername = config.ZarfGitReadUser
} else {
gitServer.PullUsername = gitServer.PushUsername
}
}
if gitServer.PullPassword == "" {
if gitServer.InternalServer {
gitServer.PullPassword = utils.RandomString(config.ZarfGeneratedPasswordLen)
} else {
gitServer.PullPassword = gitServer.PushPassword
}
}

return gitServer
}
39 changes: 39 additions & 0 deletions src/internal/cluster/zarf.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"github.com/defenseunicorns/zarf/src/config"
"github.com/defenseunicorns/zarf/src/pkg/message"
"github.com/defenseunicorns/zarf/src/types"
autoscalingV2 "k8s.io/api/autoscaling/v2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
Expand Down Expand Up @@ -98,3 +99,41 @@ func (c *Cluster) RecordPackageDeployment(pkg types.ZarfPackage, components []ty

c.Kube.CreateOrUpdateSecret(deployedPackageSecret)
}

// EnableRegHPAScaleDown enables the HPA scale down for the Zarf Registry.
func (c *Cluster) EnableRegHPAScaleDown() error {
hpa, err := c.Kube.GetHPA(ZarfNamespace, "zarf-docker-registry")
if err != nil {
return err
}

// Enable HPA scale down.
policy := autoscalingV2.MinChangePolicySelect
hpa.Spec.Behavior.ScaleDown.SelectPolicy = &policy

// Save the HPA changes.
if _, err = c.Kube.UpdateHPA(hpa); err != nil {
return err
}

return nil
}

// DisableRegHPAScaleDown disables the HPA scale down for the Zarf Registry.
func (c *Cluster) DisableRegHPAScaleDown() error {
hpa, err := c.Kube.GetHPA(ZarfNamespace, "zarf-docker-registry")
if err != nil {
return err
}

// Disable HPA scale down.
policy := autoscalingV2.DisabledPolicySelect
hpa.Spec.Behavior.ScaleDown.SelectPolicy = &policy

// Save the HPA changes.
if _, err = c.Kube.UpdateHPA(hpa); err != nil {
return err
}

return nil
}
36 changes: 36 additions & 0 deletions src/pkg/k8s/hpa.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
// SPDX-License-Identifier: Apache-2.0
// SPDX-FileCopyrightText: 2021-Present The Zarf Authors

// Package k8s provides a client for interacting with a Kubernetes cluster.
package k8s

import (
"context"

autoscalingV2 "k8s.io/api/autoscaling/v2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

// GetAllHPAs returns a list of horizontal pod autoscalers for all namespaces.
func (k *K8s) GetAllHPAs() (*autoscalingV2.HorizontalPodAutoscalerList, error) {
return k.GetHPAs(corev1.NamespaceAll)
}

// GetHPAs returns a list of horizontal pod autoscalers in a given namespace.
func (k *K8s) GetHPAs(namespace string) (*autoscalingV2.HorizontalPodAutoscalerList, error) {
metaOptions := metav1.ListOptions{}
return k.Clientset.AutoscalingV2().HorizontalPodAutoscalers(namespace).List(context.TODO(), metaOptions)
}

// GetHPA returns a single horizontal pod autoscaler by namespace and name.
func (k *K8s) GetHPA(namespace, name string) (*autoscalingV2.HorizontalPodAutoscaler, error) {
metaOptions := metav1.GetOptions{}
return k.Clientset.AutoscalingV2().HorizontalPodAutoscalers(namespace).Get(context.TODO(), name, metaOptions)
}

// UpdateHPA updates the given horizontal pod autoscaler in the cluster.
func (k *K8s) UpdateHPA(hpa *autoscalingV2.HorizontalPodAutoscaler) (*autoscalingV2.HorizontalPodAutoscaler, error) {
metaOptions := metav1.UpdateOptions{}
return k.Clientset.AutoscalingV2().HorizontalPodAutoscalers(hpa.Namespace).Update(context.TODO(), hpa, metaOptions)
}
Loading

0 comments on commit 6df91da

Please sign in to comment.