From 41c0b6303a97da2c04bc0e40bb0fb636219d308b Mon Sep 17 00:00:00 2001 From: Steve Kuznetsov Date: Tue, 30 Apr 2024 18:55:31 -0600 Subject: [PATCH] nodepool_controller: add a reconciler for cleanup When we change the config for a NodePool or the way in which we hash the values, we leak token and user data secrets. However, since these secrets are annotated with the NodePool they were created for, it's simple to check that the Secret still matches what we expect and clean it up otherwise. Signed-off-by: Steve Kuznetsov --- .../controllers/nodepool/manifests.go | 17 +- .../nodepool/nodepool_controller.go | 230 ++++++++++++--- .../nodepool/nodepool_controller_test.go | 274 +++++++++++++++++- .../controllers/scheduler/autoscaler_test.go | 5 +- 4 files changed, 473 insertions(+), 53 deletions(-) diff --git a/hypershift-operator/controllers/nodepool/manifests.go b/hypershift-operator/controllers/nodepool/manifests.go index 2842290835..994b099171 100644 --- a/hypershift-operator/controllers/nodepool/manifests.go +++ b/hypershift-operator/controllers/nodepool/manifests.go @@ -42,20 +42,23 @@ func machineHealthCheck(nodePool *hyperv1.NodePool, controlPlaneNamespace string } } +const ignitionUserDataPrefix = "user-data" + func IgnitionUserDataSecret(namespace, name, payloadInputHash string) *corev1.Secret { - return &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: fmt.Sprintf("user-data-%s-%s", name, payloadInputHash), - }, - } + return namedSecret(namespace, fmt.Sprintf("%s-%s-%s", ignitionUserDataPrefix, name, payloadInputHash)) } +const tokenSecretPrefix = "token" + func TokenSecret(namespace, name, payloadInputHash string) *corev1.Secret { + return namedSecret(namespace, fmt.Sprintf("%s-%s-%s", tokenSecretPrefix, name, payloadInputHash)) +} + +func namedSecret(namespace, name string) *corev1.Secret { return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, - Name: fmt.Sprintf("token-%s-%s", name, payloadInputHash), + Name: name, }, } } diff --git a/hypershift-operator/controllers/nodepool/nodepool_controller.go b/hypershift-operator/controllers/nodepool/nodepool_controller.go index 125e2561f9..61a438a779 100644 --- a/hypershift-operator/controllers/nodepool/nodepool_controller.go +++ b/hypershift-operator/controllers/nodepool/nodepool_controller.go @@ -117,7 +117,6 @@ type NodePoolReconciler struct { client.Client recorder record.EventRecorder ReleaseProvider releaseinfo.Provider - controller controller.Controller upsert.CreateOrUpdateProvider HypershiftOperatorImage string ImageMetadataProvider supportutil.ImageMetadataProvider @@ -141,7 +140,7 @@ var ( ) func (r *NodePoolReconciler) SetupWithManager(mgr ctrl.Manager) error { - controller, err := ctrl.NewControllerManagedBy(mgr). + if err := ctrl.NewControllerManagedBy(mgr). For(&hyperv1.NodePool{}, builder.WithPredicates(supportutil.PredicatesForHostedClusterAnnotationScoping(mgr.GetClient()))). // We want to reconcile when the HostedCluster IgnitionEndpoint is available. Watches(&hyperv1.HostedCluster{}, handler.EnqueueRequestsFromMapFunc(r.enqueueNodePoolsForHostedCluster), builder.WithPredicates(supportutil.PredicatesForHostedClusterAnnotationScoping(mgr.GetClient()))). @@ -158,12 +157,22 @@ func (r *NodePoolReconciler) SetupWithManager(mgr ctrl.Manager) error { RateLimiter: workqueue.NewItemExponentialFailureRateLimiter(1*time.Second, 10*time.Second), MaxConcurrentReconciles: 10, }). - Build(r) - if err != nil { + Complete(r); err != nil { + return errors.Wrap(err, "failed setting up with a controller manager") + } + + if err := ctrl.NewControllerManagedBy(mgr). + For(&corev1.Secret{}). + WithOptions(controller.Options{ + RateLimiter: workqueue.NewItemExponentialFailureRateLimiter(1*time.Second, 10*time.Second), + MaxConcurrentReconciles: 10, + }). + Complete(&secretJanitor{ + NodePoolReconciler: r, + }); err != nil { return errors.Wrap(err, "failed setting up with a controller manager") } - r.controller = controller r.recorder = mgr.GetEventRecorderFor("nodepool-controller") return nil @@ -267,26 +276,10 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho return ctrl.Result{}, nil } - // 1. - Reconcile conditions according to current state of the world. - proxy := globalconfig.ProxyConfig() - globalconfig.ReconcileProxyConfigWithStatusFromHostedCluster(proxy, hcluster) - - // NOTE: The image global config is not injected via userdata or NodePool ignition config. - // It is included directly by the ignition server. However, we need to detect the change - // here to trigger a nodepool update. - image := globalconfig.ImageConfig() - globalconfig.ReconcileImageConfigFromHostedCluster(image, hcluster) - - // Serialize proxy and image into a single string to use in the token secret hash. - globalConfigBytes := bytes.NewBuffer(nil) - enc := json.NewEncoder(globalConfigBytes) - if err := enc.Encode(proxy); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to encode proxy global config: %w", err) - } - if err := enc.Encode(image); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to encode image global config: %w", err) + globalConfig, err := globalConfigString(hcluster) + if err != nil { + return ctrl.Result{}, err } - globalConfig := globalConfigBytes.String() if isAutoscalingEnabled(nodePool) { SetStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{ @@ -562,11 +555,7 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho // 3 generic core config resources: fips, ssh and haproxy. // TODO (alberto): consider moving the expectedCoreConfigResources check // into the token Secret controller so we don't block Machine infra creation on this. - expectedCoreConfigResources := 3 - if len(hcluster.Spec.ImageContentSources) > 0 { - // additional core config resource created when image content source specified. - expectedCoreConfigResources += 1 - } + expectedCoreConfigResources := expectedCoreConfigResourcesForHostedCluster(hcluster) config, missingConfigs, err := r.getConfig(ctx, nodePool, expectedCoreConfigResources, controlPlaneNamespace, releaseImage, hcluster) if err != nil { SetStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{ @@ -643,7 +632,7 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho } // Signal ignition payload generation - targetPayloadConfigHash := supportutil.HashSimple(config + targetVersion + pullSecretName + globalConfig) + targetPayloadConfigHash := payloadConfigHash(config, targetVersion, pullSecretName, globalConfig) tokenSecret := TokenSecret(controlPlaneNamespace, nodePool.Name, targetPayloadConfigHash) condition, err := r.createValidGeneratedPayloadCondition(ctx, tokenSecret, nodePool.Generation) if err != nil { @@ -771,7 +760,7 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho return ctrl.Result{}, fmt.Errorf("failed to get token Secret: %w", err) } if err == nil { - if err := setExpirationTimestampOnToken(ctx, r.Client, tokenSecret); err != nil && !apierrors.IsNotFound(err) { + if err := setExpirationTimestampOnToken(ctx, r.Client, tokenSecret, nil); err != nil && !apierrors.IsNotFound(err) { return ctrl.Result{}, fmt.Errorf("failed to set expiration on token Secret: %w", err) } } @@ -818,7 +807,7 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho userDataSecret := IgnitionUserDataSecret(controlPlaneNamespace, nodePool.GetName(), targetPayloadConfigHash) if result, err := r.CreateOrUpdate(ctx, r.Client, userDataSecret, func() error { - return reconcileUserDataSecret(userDataSecret, nodePool, caCertBytes, tokenBytes, ignEndpoint, targetPayloadConfigHash, proxy) + return reconcileUserDataSecret(userDataSecret, nodePool, caCertBytes, tokenBytes, ignEndpoint, targetPayloadConfigHash, globalconfig.ProxyConfig()) }); err != nil { return ctrl.Result{}, err } else { @@ -996,6 +985,15 @@ func isArchAndPlatformSupported(nodePool *hyperv1.NodePool) bool { return supported } +func expectedCoreConfigResourcesForHostedCluster(hcluster *hyperv1.HostedCluster) int { + expectedCoreConfigResources := 3 + if len(hcluster.Spec.ImageContentSources) > 0 { + // additional core config resource created when image content source specified. + expectedCoreConfigResources += 1 + } + return expectedCoreConfigResources +} + // setMachineAndNodeConditions sets the nodePool's AllMachinesReady and AllNodesHealthy conditions. func (r *NodePoolReconciler) setMachineAndNodeConditions(ctx context.Context, nodePool *hyperv1.NodePool, hc *hyperv1.HostedCluster) error { // Get all Machines for NodePool. @@ -2827,13 +2825,17 @@ func validateInfraID(infraID string) error { return nil } -func setExpirationTimestampOnToken(ctx context.Context, c client.Client, tokenSecret *corev1.Secret) error { +func setExpirationTimestampOnToken(ctx context.Context, c client.Client, tokenSecret *corev1.Secret, now func() time.Time) error { + if now == nil { + now = time.Now + } + // this should be a reasonable value to allow all in flight provisions to complete. timeUntilExpiry := 2 * time.Hour if tokenSecret.Annotations == nil { tokenSecret.Annotations = map[string]string{} } - tokenSecret.Annotations[hyperv1.IgnitionServerTokenExpirationTimestampAnnotation] = time.Now().Add(timeUntilExpiry).Format(time.RFC3339) + tokenSecret.Annotations[hyperv1.IgnitionServerTokenExpirationTimestampAnnotation] = now().Add(timeUntilExpiry).Format(time.RFC3339) return c.Update(ctx, tokenSecret) } @@ -2913,8 +2915,12 @@ func (r *NodePoolReconciler) getPullSecretBytes(ctx context.Context, hostedClust // getPullSecretName retrieves the name of the pull secret in the hosted cluster spec func (r *NodePoolReconciler) getPullSecretName(ctx context.Context, hostedCluster *hyperv1.HostedCluster) (string, error) { + return getPullSecretName(ctx, r.Client, hostedCluster) +} + +func getPullSecretName(ctx context.Context, crclient client.Client, hostedCluster *hyperv1.HostedCluster) (string, error) { pullSecret := &corev1.Secret{} - if err := r.Client.Get(ctx, client.ObjectKey{Namespace: hostedCluster.Namespace, Name: hostedCluster.Spec.PullSecret.Name}, pullSecret); err != nil { + if err := crclient.Get(ctx, client.ObjectKey{Namespace: hostedCluster.Namespace, Name: hostedCluster.Spec.PullSecret.Name}, pullSecret); err != nil { return "", fmt.Errorf("cannot get pull secret %s/%s: %w", hostedCluster.Namespace, hostedCluster.Spec.PullSecret.Name, err) } if _, hasKey := pullSecret.Data[corev1.DockerConfigJsonKey]; !hasKey { @@ -2982,3 +2988,157 @@ func aggregateMachineMessages(msgs []string) string { return builder.String() } + +// secretJanitor reconciles secrets and determines which secrets should remain in the cluster and which should be cleaned up. +// Any secret annotated with a nodePool name should only be on the cluster if the nodePool continues to exist +// and if our current calculation for the inputs to the name matches what the secret is named. +type secretJanitor struct { + *NodePoolReconciler + + now func() time.Time +} + +func (r *secretJanitor) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + log := ctrl.LoggerFrom(ctx).WithValues("secret", req.String()) + + secret := &corev1.Secret{} + if err := r.Client.Get(ctx, req.NamespacedName, secret); err != nil { + if apierrors.IsNotFound(err) { + log.Info("not found", "request", req.String()) + return ctrl.Result{}, nil + } + log.Error(err, "error getting secret") + return ctrl.Result{}, err + } + + // only handle secrets that are associated with a NodePool + nodePoolName, annotated := secret.Annotations[nodePoolAnnotation] + if !annotated { + return ctrl.Result{}, nil + } + log = log.WithValues("nodePool", nodePoolName) + + // only handle secret types that we know about explicitly + shouldHandle := false + for _, prefix := range []string{tokenSecretPrefix, ignitionUserDataPrefix} { + if strings.HasPrefix(secret.Name, prefix) { + shouldHandle = true + break + } + } + if !shouldHandle { + return ctrl.Result{}, nil + } + + nodePool := &hyperv1.NodePool{} + if err := r.Client.Get(ctx, supportutil.ParseNamespacedName(nodePoolName), nodePool); err != nil && !apierrors.IsNotFound(err) { + log.Error(err, "error getting nodepool") + return ctrl.Result{}, err + } else if apierrors.IsNotFound(err) { + log.Info("removing secret as nodePool is missing") + return ctrl.Result{}, r.Client.Delete(ctx, secret) + } + + hcluster, err := GetHostedClusterByName(ctx, r.Client, nodePool.GetNamespace(), nodePool.Spec.ClusterName) + if err != nil { + return ctrl.Result{}, err + } + + controlPlaneNamespace := manifests.HostedControlPlaneNamespace(hcluster.Namespace, hcluster.Name) + expectedCoreConfigResources := expectedCoreConfigResourcesForHostedCluster(hcluster) + releaseImage, err := r.getReleaseImage(ctx, hcluster, nodePool.Status.Version, nodePool.Spec.Release.Image) + if err != nil { + return ctrl.Result{}, err + } + + config, missingConfigs, err := r.getConfig(ctx, nodePool, expectedCoreConfigResources, controlPlaneNamespace, releaseImage, hcluster) + if err != nil { + return ctrl.Result{}, err + } + if missingConfigs { + return ctrl.Result{}, nil + } + targetVersion := releaseImage.Version() + + pullSecretName, err := r.getPullSecretName(ctx, hcluster) + if err != nil { + return ctrl.Result{}, err + } + + globalConfig, err := globalConfigString(hcluster) + if err != nil { + return ctrl.Result{}, err + } + + targetPayloadConfigHash := payloadConfigHash(config, targetVersion, pullSecretName, globalConfig) + + // synchronously deleting the ignition token is unsafe; we need to clean up tokens by annotating them to expire + synchronousCleanup := func(ctx context.Context, c client.Client, secret *corev1.Secret) error { + return c.Delete(ctx, secret) + } + type nodePoolSecret struct { + expectedName string + matchingPrefix string + cleanup func(context.Context, client.Client, *corev1.Secret) error + } + valid := false + options := []nodePoolSecret{ + { + expectedName: TokenSecret(controlPlaneNamespace, nodePool.Name, targetPayloadConfigHash).Name, + matchingPrefix: tokenSecretPrefix, + cleanup: func(ctx context.Context, c client.Client, secret *corev1.Secret) error { + return setExpirationTimestampOnToken(ctx, c, secret, r.now) + }, + }, + { + expectedName: IgnitionUserDataSecret(controlPlaneNamespace, nodePool.GetName(), targetPayloadConfigHash).Name, + matchingPrefix: ignitionUserDataPrefix, + cleanup: synchronousCleanup, + }, + } + cleanup := synchronousCleanup + var names []string + for _, option := range options { + names = append(names, option.expectedName) + if secret.Name == option.expectedName { + valid = true + } + if strings.HasPrefix(secret.Name, option.matchingPrefix) { + cleanup = option.cleanup + } + } + + if valid { + return ctrl.Result{}, nil + } + + log.WithValues("options", names, "valid", valid).Info("removing secret as it does not match the expected set of names") + return ctrl.Result{}, cleanup(ctx, r.Client, secret) +} + +func globalConfigString(hcluster *hyperv1.HostedCluster) (string, error) { + // 1. - Reconcile conditions according to current state of the world. + proxy := globalconfig.ProxyConfig() + globalconfig.ReconcileProxyConfigWithStatusFromHostedCluster(proxy, hcluster) + + // NOTE: The image global config is not injected via userdata or NodePool ignition config. + // It is included directly by the ignition server. However, we need to detect the change + // here to trigger a nodepool update. + image := globalconfig.ImageConfig() + globalconfig.ReconcileImageConfigFromHostedCluster(image, hcluster) + + // Serialize proxy and image into a single string to use in the token secret hash. + globalConfigBytes := bytes.NewBuffer(nil) + enc := json.NewEncoder(globalConfigBytes) + if err := enc.Encode(proxy); err != nil { + return "", fmt.Errorf("failed to encode proxy global config: %w", err) + } + if err := enc.Encode(image); err != nil { + return "", fmt.Errorf("failed to encode image global config: %w", err) + } + return globalConfigBytes.String(), nil +} + +func payloadConfigHash(config, targetVersion, pullSecretName, globalConfig string) string { + return supportutil.HashSimple(config + targetVersion + pullSecretName + globalConfig) +} diff --git a/hypershift-operator/controllers/nodepool/nodepool_controller_test.go b/hypershift-operator/controllers/nodepool/nodepool_controller_test.go index 16f2737477..f6b6af4bf3 100644 --- a/hypershift-operator/controllers/nodepool/nodepool_controller_test.go +++ b/hypershift-operator/controllers/nodepool/nodepool_controller_test.go @@ -10,19 +10,26 @@ import ( "time" "github.com/go-logr/logr" + "github.com/go-logr/zapr" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" . "github.com/onsi/gomega" "github.com/openshift/api/image/docker10" imagev1 "github.com/openshift/api/image/v1" + "github.com/openshift/hypershift/support/supportedversion" + "github.com/openshift/hypershift/support/testutil" + "go.uber.org/zap/zaptest" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + testingclock "k8s.io/utils/clock/testing" "k8s.io/utils/ptr" capiaws "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" capiv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -37,7 +44,6 @@ import ( "github.com/openshift/hypershift/support/api" "github.com/openshift/hypershift/support/releaseinfo" fakereleaseprovider "github.com/openshift/hypershift/support/releaseinfo/fake" - "github.com/openshift/hypershift/support/testutil" "github.com/openshift/hypershift/support/thirdparty/library-go/pkg/image/dockerv1client" "github.com/openshift/hypershift/support/upsert" "github.com/openshift/hypershift/support/util/fakeimagemetadataprovider" @@ -2059,6 +2065,12 @@ func TestGetNodePoolNamespacedName(t *testing.T) { } func TestSetExpirationTimestampOnToken(t *testing.T) { + theTime, err := time.Parse(time.RFC3339Nano, "2006-01-02T15:04:05.999999999Z") + if err != nil { + t.Fatalf("could not parse time: %v", err) + } + fakeClock := testingclock.NewFakeClock(theTime) + fakeName := "test-token" fakeNamespace := "master-cluster1" fakeCurrentTokenVal := "tokenval1" @@ -2084,7 +2096,7 @@ func TestSetExpirationTimestampOnToken(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) c := fake.NewClientBuilder().WithObjects(tc.inputSecret).Build() - err := setExpirationTimestampOnToken(context.Background(), c, tc.inputSecret) + err := setExpirationTimestampOnToken(context.Background(), c, tc.inputSecret, fakeClock.Now) g.Expect(err).To(Not(HaveOccurred())) actualSecretData := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -2094,13 +2106,9 @@ func TestSetExpirationTimestampOnToken(t *testing.T) { } err = c.Get(context.Background(), client.ObjectKeyFromObject(actualSecretData), actualSecretData) g.Expect(err).To(Not(HaveOccurred())) - rawExpirationTimestamp, ok := actualSecretData.Annotations[hyperv1.IgnitionServerTokenExpirationTimestampAnnotation] - g.Expect(ok).To(BeTrue()) - expirationTimestamp, err := time.Parse(time.RFC3339, rawExpirationTimestamp) - g.Expect(err).To(Not(HaveOccurred())) - // ensures the 2 hour expiration is active. 119 minutes is one minute less than 2 hours. gives time for - // test to run. - g.Expect(time.Now().Add(119 * time.Minute).Before(expirationTimestamp)).To(BeTrue()) + g.Expect(actualSecretData.Annotations).To(testutil.MatchExpected(map[string]string{ + hyperv1.IgnitionServerTokenExpirationTimestampAnnotation: theTime.Add(2 * time.Hour).Format(time.RFC3339), + })) }) } } @@ -3309,3 +3317,251 @@ func TestReconcileMachineHealthCheck(t *testing.T) { }) } } + +func TestSecretJanitor_Reconcile(t *testing.T) { + ctx := ctrl.LoggerInto(context.Background(), zapr.NewLogger(zaptest.NewLogger(t))) + + theTime, err := time.Parse(time.RFC3339Nano, "2006-01-02T15:04:05.999999999Z") + if err != nil { + t.Fatalf("could not parse time: %v", err) + } + fakeClock := testingclock.NewFakeClock(theTime) + + pullSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "pull-secret", Namespace: "myns"}, + Data: map[string][]byte{ + corev1.DockerConfigJsonKey: []byte("whatever"), + }, + } + + hostedCluster := &hyperv1.HostedCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster-name", Namespace: "myns"}, + Spec: hyperv1.HostedClusterSpec{ + PullSecret: corev1.LocalObjectReference{Name: pullSecret.Name}, + }, + } + + nodePool := &hyperv1.NodePool{ + ObjectMeta: metav1.ObjectMeta{Name: "nodepool-name", Namespace: "myns"}, + Spec: hyperv1.NodePoolSpec{ + ClusterName: hostedCluster.Name, + Release: hyperv1.Release{Image: "fake-release-image"}, + Config: []corev1.LocalObjectReference{ + {Name: "machineconfig-1"}, + }, + }, + Status: hyperv1.NodePoolStatus{Version: supportedversion.LatestSupportedVersion.String()}, + } + + coreMachineConfig := ` +apiVersion: machineconfiguration.openshift.io/v1 +kind: MachineConfig +metadata: + labels: + machineconfiguration.openshift.io/role: master + name: config-1 +spec: + config: + ignition: + version: 3.2.0 + storage: + files: + - contents: + source: "[Service]\nType=oneshot\nExecStart=/usr/bin/echo Hello World\n\n[Install]\nWantedBy=multi-user.target" + filesystem: root + mode: 493 + path: /usr/local/bin/file1.sh +` + + machineConfig := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machineconfig-1", + Namespace: "myns", + }, + Data: map[string]string{ + TokenSecretConfigKey: coreMachineConfig, + }, + } + + ignitionConfig := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "core-machineconfig", + Namespace: "myns-cluster-name", + Labels: map[string]string{ + nodePoolCoreIgnitionConfigLabel: "true", + }, + }, + Data: map[string]string{ + TokenSecretConfigKey: coreMachineConfig, + }, + } + ignitionConfig2 := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "core-machineconfig-2", + Namespace: "myns-cluster-name", + Labels: map[string]string{ + nodePoolCoreIgnitionConfigLabel: "true", + }, + }, + Data: map[string]string{ + TokenSecretConfigKey: coreMachineConfig, + }, + } + ignitionConfig3 := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "core-machineconfig-3", + Namespace: "myns-cluster-name", + Labels: map[string]string{ + nodePoolCoreIgnitionConfigLabel: "true", + }, + }, + Data: map[string]string{ + TokenSecretConfigKey: coreMachineConfig, + }, + } + + c := fake.NewClientBuilder().WithScheme(api.Scheme).WithObjects( + nodePool, + hostedCluster, + pullSecret, + machineConfig, + ignitionConfig, + ignitionConfig2, + ignitionConfig3, + ).Build() + r := secretJanitor{ + NodePoolReconciler: &NodePoolReconciler{ + Client: c, + ReleaseProvider: &fakereleaseprovider.FakeReleaseProvider{Version: supportedversion.LatestSupportedVersion.String()}, + ImageMetadataProvider: &fakeimagemetadataprovider.FakeImageMetadataProvider{Result: &dockerv1client.DockerImageConfig{Config: &docker10.DockerConfig{ + Labels: map[string]string{}, + }}}, + }, + + now: fakeClock.Now, + } + + for _, testCase := range []struct { + name string + input *corev1.Secret + expected *corev1.Secret + }{ + { + name: "unrelated secret untouched", + input: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "whatever", + Namespace: "myns", + }, + }, + expected: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "whatever", + Namespace: "myns", + }, + }, + }, + { + name: "related but not known secret untouched", + input: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "related", + Namespace: "myns", + Annotations: map[string]string{ + nodePoolAnnotation: client.ObjectKeyFromObject(nodePool).String(), + }, + }, + }, + expected: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "related", + Namespace: "myns", + Annotations: map[string]string{ + nodePoolAnnotation: client.ObjectKeyFromObject(nodePool).String(), + }, + }, + }, + }, + { + name: "related known secret with correct hash untouched", + input: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "token-nodepool-name-da03707e", + Namespace: "myns", + Annotations: map[string]string{ + nodePoolAnnotation: client.ObjectKeyFromObject(nodePool).String(), + }, + }, + }, + expected: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "token-nodepool-name-da03707e", + Namespace: "myns", + Annotations: map[string]string{ + nodePoolAnnotation: client.ObjectKeyFromObject(nodePool).String(), + }, + }, + }, + }, + { + name: "related token secret with incorrect hash set for expiry", + input: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "token-nodepool-name-jsadfkjh23", + Namespace: "myns", + Annotations: map[string]string{ + nodePoolAnnotation: client.ObjectKeyFromObject(nodePool).String(), + }, + }, + }, + expected: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "token-nodepool-name-jsadfkjh23", + Namespace: "myns", + Annotations: map[string]string{ + nodePoolAnnotation: client.ObjectKeyFromObject(nodePool).String(), + "hypershift.openshift.io/ignition-token-expiration-timestamp": "2006-01-02T17:04:05Z", + }, + }, + }, + }, + { + name: "related ignition user data secret with incorrect hash deleted", + input: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "user-data-nodepool-name-jsadfkjh23", + Namespace: "myns", + Annotations: map[string]string{ + nodePoolAnnotation: client.ObjectKeyFromObject(nodePool).String(), + }, + }, + }, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + if err := c.Create(ctx, testCase.input); err != nil { + t.Errorf("failed to create object: %v", err) + } + + key := client.ObjectKeyFromObject(testCase.input) + if _, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: key}); err != nil { + t.Errorf("failed to reconcile object: %v", err) + } + + got := &corev1.Secret{} + err := c.Get(ctx, client.ObjectKeyFromObject(testCase.input), got) + if testCase.expected == nil { + if !apierrors.IsNotFound(err) { + t.Errorf("expected object to not exist, got error: %v", err) + } + } else { + if err != nil { + t.Errorf("failed to fetch object: %v", err) + } + if diff := cmp.Diff(got, testCase.expected, cmpopts.IgnoreFields(metav1.ObjectMeta{}, "ResourceVersion")); diff != "" { + t.Errorf("got unexpected object after reconcile: %v", diff) + } + } + }) + } +} diff --git a/hypershift-operator/controllers/scheduler/autoscaler_test.go b/hypershift-operator/controllers/scheduler/autoscaler_test.go index b20fda862c..399c9dc4ff 100644 --- a/hypershift-operator/controllers/scheduler/autoscaler_test.go +++ b/hypershift-operator/controllers/scheduler/autoscaler_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" machinev1beta1 "github.com/openshift/api/machine/v1beta1" hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" @@ -503,7 +504,7 @@ func TestDetermineRequiredNodes(t *testing.T) { t.Run(test.name, func(t *testing.T) { g := NewGomegaWithT(t) actual := determineRequiredNodes(pendingPods(test.pods), test.pods, test.nodes) - g.Expect(actual).To(BeEquivalentTo(test.expected)) + g.Expect(actual).To(testutil.MatchExpected(test.expected, cmp.AllowUnexported(nodeRequirement{}))) }) } } @@ -854,7 +855,7 @@ func TestNonRequestServingMachineSetsToScale(t *testing.T) { t.Run(test.name, func(t *testing.T) { g := NewGomegaWithT(t) actual := nonRequestServingMachineSetsToScale(context.Background(), cfg, test.hostedClusters, test.machineSets) - g.Expect(actual).To(BeEquivalentTo(test.expect)) + g.Expect(actual).To(testutil.MatchExpected(test.expect, cmp.AllowUnexported(machineSetReplicas{}))) }) } }