From f00eefdc6c3e1f128a6fde6bd8b3c71977f8e6f4 Mon Sep 17 00:00:00 2001 From: Bryan Cox Date: Fri, 21 Oct 2022 08:54:51 -0400 Subject: [PATCH] Retrieve CAPI/CAPA from release image Retrieves the CAPI and CAPA image components from the release image rather than registry.ci.openshift.org/hypershift/cluster-api:v1.0.0 and registry.ci.openshift.org/hypershift/cluster-api-aws-controller:v1.1.0 respectively --- .../hostedcluster/hostedcluster_controller.go | 59 ++++-------- .../hostedcluster_controller_test.go | 18 +++- .../hostedcluster_webhook_test.go | 7 +- .../internal/platform/aws/aws.go | 17 ++-- .../internal/platform/platform.go | 23 ++++- .../nodepool/nodepool_controller.go | 90 +++++++++++++++---- support/releaseinfo/registryclient/client.go | 2 +- support/util/imagemetadata.go | 17 ++++ 8 files changed, 160 insertions(+), 73 deletions(-) diff --git a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go index 75f690d95bd..14416020151 100644 --- a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go +++ b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go @@ -108,10 +108,7 @@ const ( HostedClusterAnnotation = "hypershift.openshift.io/cluster" clusterDeletionRequeueDuration = 5 * time.Second - // Image built from https://github.com/openshift/cluster-api/tree/release-1.0 - // Upstream canonical image comes from https://console.cloud.google.com/gcr/images/k8s-staging-cluster-api/global/ - // us.gcr.io/k8s-artifacts-prod/cluster-api/cluster-api-controller:v1.0.0 - imageCAPI = "registry.ci.openshift.org/hypershift/cluster-api:v1.0.0" + ImageStreamCAPI = "cluster-capi-controllers" ImageStreamAutoscalerImage = "cluster-autoscaler" ImageStreamClusterMachineApproverImage = "cluster-machine-approver" @@ -830,7 +827,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques _, ignitionServerHasHealthzHandler := hyperutil.ImageLabels(controlPlaneOperatorImageMetadata)[ignitionServerHealthzHandlerLabel] _, controlplaneOperatorManagesIgnitionServer := hyperutil.ImageLabels(controlPlaneOperatorImageMetadata)[controlplaneOperatorManagesIgnitionServerLabel] - p, err := platform.GetPlatform(hcluster, utilitiesImage) + p, err := platform.GetPlatform(hcluster, utilitiesImage, pullSecretBytes) if err != nil { return ctrl.Result{}, err } @@ -1260,7 +1257,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques } // Reconcile the CAPI manager components - err = r.reconcileCAPIManager(ctx, createOrUpdate, hcluster, hcp) + err = r.reconcileCAPIManager(ctx, createOrUpdate, hcluster, hcp, pullSecretBytes) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to reconcile capi manager: %w", err) } @@ -1275,14 +1272,14 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques // TODO (alberto): drop this after dropping < 4.11 support. if _, hasLabel := hyperutil.ImageLabels(controlPlaneOperatorImageMetadata)[controlPlaneOperatorManagesMachineAutoscaler]; !hasLabel { // Reconcile the autoscaler. - err = r.reconcileAutoscaler(ctx, createOrUpdate, hcluster, hcp, utilitiesImage) + err = r.reconcileAutoscaler(ctx, createOrUpdate, hcluster, hcp, utilitiesImage, pullSecretBytes) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to reconcile autoscaler: %w", err) } } if _, hasLabel := hyperutil.ImageLabels(controlPlaneOperatorImageMetadata)[controlPlaneOperatorManagesMachineApprover]; !hasLabel { // Reconcile the machine approver. - if err = r.reconcileMachineApprover(ctx, createOrUpdate, hcluster, hcp, utilitiesImage); err != nil { + if err = r.reconcileMachineApprover(ctx, createOrUpdate, hcluster, hcp, utilitiesImage, pullSecretBytes); err != nil { return ctrl.Result{}, fmt.Errorf("failed to reconcile machine approver: %w", err) } } @@ -1516,7 +1513,7 @@ func ensureHCPAWSRolesBackwardCompatibility(hc *hyperv1.HostedCluster, hcp *hype } // reconcileCAPIManager orchestrates orchestrates of all CAPI manager components. -func (r *HostedClusterReconciler) reconcileCAPIManager(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane) error { +func (r *HostedClusterReconciler) reconcileCAPIManager(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, pullSecretBytes []byte) error { controlPlaneNamespace := manifests.HostedControlPlaneNamespace(hcluster.Namespace, hcluster.Name) err := r.Client.Get(ctx, client.ObjectKeyFromObject(controlPlaneNamespace), controlPlaneNamespace) if err != nil { @@ -1604,7 +1601,10 @@ func (r *HostedClusterReconciler) reconcileCAPIManager(ctx context.Context, crea } // Reconcile CAPI manager deployment - capiImage := imageCAPI + capiImage, err := hyperutil.GetPayloadImage(ctx, hcluster, ImageStreamCAPI, pullSecretBytes) + if err != nil { + return fmt.Errorf("failed to retrieve capi image: %w", err) + } if envImage := os.Getenv(images.CAPIEnvVar); len(envImage) > 0 { capiImage = envImage } @@ -1849,8 +1849,8 @@ func servicePublishingStrategyByType(hcp *hyperv1.HostedCluster, svcType hyperv1 // reconcileAutoscaler orchestrates reconciliation of autoscaler components using // both the HostedCluster and the HostedControlPlane which the autoscaler takes // inputs from. -func (r *HostedClusterReconciler) reconcileAutoscaler(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, utilitiesImage string) error { - clusterAutoscalerImage, err := r.getPayloadImage(ctx, hcluster, ImageStreamAutoscalerImage) +func (r *HostedClusterReconciler) reconcileAutoscaler(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, utilitiesImage string, pullSecretBytes []byte) error { + clusterAutoscalerImage, err := hyperutil.GetPayloadImage(ctx, hcluster, ImageStreamAutoscalerImage, pullSecretBytes) if err != nil { return fmt.Errorf("failed to get image for machine approver: %w", err) } @@ -2437,7 +2437,7 @@ func reconcileCAPIManagerDeployment(deployment *appsv1.Deployment, hc *hyperv1.H }, }, }, - Command: []string{"/manager"}, + Command: []string{"/bin/cluster-api-controller-manager"}, Args: []string{"--namespace", "$(MY_NAMESPACE)", "--alsologtostderr", "--v=4", @@ -2917,7 +2917,7 @@ func (r *HostedClusterReconciler) delete(ctx context.Context, hc *hyperv1.Hosted } // Cleanup Platform specifics. - p, err := platform.GetPlatform(hc, "") + p, err := platform.GetPlatform(hc, "", nil) if err != nil { return false, err } @@ -3076,8 +3076,8 @@ func (r *HostedClusterReconciler) reconcileClusterPrometheusRBAC(ctx context.Con return nil } -func (r *HostedClusterReconciler) reconcileMachineApprover(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, utilitiesImage string) error { - machineApproverImage, err := r.getPayloadImage(ctx, hcluster, ImageStreamClusterMachineApproverImage) +func (r *HostedClusterReconciler) reconcileMachineApprover(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, utilitiesImage string, pullSecretBytes []byte) error { + machineApproverImage, err := hyperutil.GetPayloadImage(ctx, hcluster, ImageStreamClusterMachineApproverImage, pullSecretBytes) if err != nil { return fmt.Errorf("failed to get image for machine approver: %w", err) } @@ -3916,33 +3916,6 @@ func validateClusterID(hc *hyperv1.HostedCluster) error { return nil } -// getReleaseImage get the releaseInfo releaseImage for a given HC release image reference. -func (r *HostedClusterReconciler) getReleaseImage(ctx context.Context, hc *hyperv1.HostedCluster) (*releaseinfo.ReleaseImage, error) { - var pullSecret corev1.Secret - if err := r.Client.Get(ctx, types.NamespacedName{Namespace: hc.Namespace, Name: hc.Spec.PullSecret.Name}, &pullSecret); err != nil { - return nil, fmt.Errorf("failed to get pull secret: %w", err) - } - pullSecretBytes, ok := pullSecret.Data[corev1.DockerConfigJsonKey] - if !ok { - return nil, fmt.Errorf("expected %s key in pull secret", corev1.DockerConfigJsonKey) - } - return r.ReleaseProvider.Lookup(ctx, hc.Spec.Release.Image, pullSecretBytes) -} - -// getPayloadImage get an image from the payload for a particular component. -func (r *HostedClusterReconciler) getPayloadImage(ctx context.Context, hc *hyperv1.HostedCluster, component string) (string, error) { - releaseImage, err := r.getReleaseImage(ctx, hc) - if err != nil { - return "", fmt.Errorf("failed to lookup release image: %w", err) - } - - image, exists := releaseImage.ComponentImages()[component] - if !exists { - return "", fmt.Errorf("image does not exist for release: %q", image) - } - return image, nil -} - func (r *HostedClusterReconciler) reconcileServiceAccountSigningKey(ctx context.Context, hc *hyperv1.HostedCluster, targetNamespace string, createOrUpdate upsert.CreateOrUpdateFN) error { privateBytes, publicBytes, err := r.serviceAccountSigningKeyBytes(ctx, hc) if err != nil { diff --git a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go index 110bd76a9a3..72a9d4c916a 100644 --- a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go +++ b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go @@ -15,6 +15,7 @@ import ( configv1 "github.com/openshift/api/config/v1" "github.com/openshift/hypershift/api" hyperv1 "github.com/openshift/hypershift/api/v1alpha1" + version "github.com/openshift/hypershift/cmd/version" "github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests" platformaws "github.com/openshift/hypershift/hypershift-operator/controllers/hostedcluster/internal/platform/aws" "github.com/openshift/hypershift/hypershift-operator/controllers/hostedcluster/internal/platform/kubevirt" @@ -840,7 +841,7 @@ func expectedRules(addRules []rbacv1.PolicyRule) []rbacv1.PolicyRule { } func TestHostedClusterWatchesEverythingItCreates(t *testing.T) { - + releaseImage, _ := version.LookupDefaultOCPVersion() hostedClusters := []*hyperv1.HostedCluster{ { ObjectMeta: metav1.ObjectMeta{Name: "agent"}, @@ -849,6 +850,9 @@ func TestHostedClusterWatchesEverythingItCreates(t *testing.T) { Type: hyperv1.AgentPlatform, Agent: &hyperv1.AgentPlatformSpec{AgentNamespace: "agent-namespace"}, }, + Release: hyperv1.Release{ + Image: releaseImage.PullSpec, + }, }, Status: hyperv1.HostedClusterStatus{ IgnitionEndpoint: "ign", @@ -872,6 +876,9 @@ func TestHostedClusterWatchesEverythingItCreates(t *testing.T) { }, }, }, + Release: hyperv1.Release{ + Image: releaseImage.PullSpec, + }, }, }, { @@ -880,6 +887,9 @@ func TestHostedClusterWatchesEverythingItCreates(t *testing.T) { Platform: hyperv1.PlatformSpec{ Type: hyperv1.NonePlatform, }, + Release: hyperv1.Release{ + Image: releaseImage.PullSpec, + }, }, }, { @@ -889,6 +899,9 @@ func TestHostedClusterWatchesEverythingItCreates(t *testing.T) { Type: hyperv1.IBMCloudPlatform, IBMCloud: &hyperv1.IBMCloudPlatformSpec{}, }, + Release: hyperv1.Release{ + Image: releaseImage.PullSpec, + }, }, }, { @@ -897,6 +910,9 @@ func TestHostedClusterWatchesEverythingItCreates(t *testing.T) { Platform: hyperv1.PlatformSpec{ Type: hyperv1.KubevirtPlatform, }, + Release: hyperv1.Release{ + Image: releaseImage.PullSpec, + }, }, }, } diff --git a/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook_test.go b/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook_test.go index 36135ef0d5e..f0ad03bdc27 100644 --- a/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook_test.go +++ b/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook_test.go @@ -10,6 +10,7 @@ import ( hyperapi "github.com/openshift/hypershift/api" "github.com/openshift/hypershift/api/util/ipnet" hyperv1 "github.com/openshift/hypershift/api/v1alpha1" + version "github.com/openshift/hypershift/cmd/version" fakecapabilities "github.com/openshift/hypershift/support/capabilities/fake" fakereleaseprovider "github.com/openshift/hypershift/support/releaseinfo/fake" "github.com/openshift/hypershift/support/thirdparty/library-go/pkg/image/dockerv1client" @@ -30,6 +31,7 @@ import ( ) func TestWebhookAllowsHostedClusterReconcilerUpdates(t *testing.T) { + releaseImage, _ := version.LookupDefaultOCPVersion() t.Parallel() testCases := []struct { name string @@ -55,6 +57,9 @@ func TestWebhookAllowsHostedClusterReconcilerUpdates(t *testing.T) { }, }, }, + Release: hyperv1.Release{ + Image: releaseImage.PullSpec, + }, }, }, additionalObjects: []crclient.Object{ @@ -64,7 +69,7 @@ func TestWebhookAllowsHostedClusterReconcilerUpdates(t *testing.T) { }, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns"}, - Data: map[string][]byte{".dockerconfigjson": []byte("something")}, + Data: map[string][]byte{".dockerconfigjson": []byte("{\"something\": \"something\"}")}, }, &configv1.Ingress{ObjectMeta: metav1.ObjectMeta{Name: "cluster"}}, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "none-cluster"}}, diff --git a/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws.go b/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws.go index 34cbd87253c..7db8618c599 100644 --- a/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws.go +++ b/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws.go @@ -25,20 +25,19 @@ import ( ) const ( - // Image built from https://github.com/openshift/cluster-api-provider-aws/tree/release-1.1 - // Upstream canonical image comes from https://console.cloud.google.com/gcr/images/k8s-artifacts-prod - // us.gcr.io/k8s-artifacts-prod/cluster-api-aws/cluster-api-aws-controller:v1.1.0 - imageCAPA = "registry.ci.openshift.org/hypershift/cluster-api-aws-controller:v1.1.0" + ImageStreamCAPA = "aws-cluster-api-controllers" ) -func New(utilitiesImage string) *AWS { +func New(utilitiesImage string, capiProviderImage string) *AWS { return &AWS{ - utilitiesImage: utilitiesImage, + utilitiesImage: utilitiesImage, + capiProviderImage: capiProviderImage, } } type AWS struct { - utilitiesImage string + utilitiesImage string + capiProviderImage string } func (p AWS) ReconcileCAPIInfraCR(ctx context.Context, c client.Client, createOrUpdate upsert.CreateOrUpdateFN, @@ -63,7 +62,7 @@ func (p AWS) ReconcileCAPIInfraCR(ctx context.Context, c client.Client, createOr } func (p AWS) CAPIProviderDeploymentSpec(hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane) (*appsv1.DeploymentSpec, error) { - providerImage := imageCAPA + providerImage := p.capiProviderImage if envImage := os.Getenv(images.AWSCAPIProviderEnvVar); len(envImage) > 0 { providerImage = envImage } @@ -161,7 +160,7 @@ func (p AWS) CAPIProviderDeploymentSpec(hcluster *hyperv1.HostedCluster, hcp *hy Value: "true", }, }, - Command: []string{"/manager"}, + Command: []string{"/bin/cluster-api-provider-aws-controller-manager"}, Args: []string{"--namespace", "$(MY_NAMESPACE)", "--alsologtostderr", "--v=4", diff --git a/hypershift-operator/controllers/hostedcluster/internal/platform/platform.go b/hypershift-operator/controllers/hostedcluster/internal/platform/platform.go index 23fd7e750d0..8660d166669 100644 --- a/hypershift-operator/controllers/hostedcluster/internal/platform/platform.go +++ b/hypershift-operator/controllers/hostedcluster/internal/platform/platform.go @@ -13,11 +13,16 @@ import ( "github.com/openshift/hypershift/hypershift-operator/controllers/hostedcluster/internal/platform/none" "github.com/openshift/hypershift/hypershift-operator/controllers/hostedcluster/internal/platform/powervs" "github.com/openshift/hypershift/support/upsert" + imgUtil "github.com/openshift/hypershift/support/util" appsv1 "k8s.io/api/apps/v1" rbacv1 "k8s.io/api/rbac/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) +const ( + AWSCAPIProvider = "aws-cluster-api-controllers" +) + var _ Platform = aws.AWS{} var _ Platform = ibmcloud.IBMCloud{} var _ Platform = none.None{} @@ -57,11 +62,23 @@ type Platform interface { DeleteCredentials(ctx context.Context, c client.Client, hcluster *hyperv1.HostedCluster, controlPlaneNamespace string) error } -func GetPlatform(hcluster *hyperv1.HostedCluster, utilitiesImage string) (Platform, error) { - var platform Platform +// GetPlatform gets and initializes the cloud platform the hosted cluster was created on +func GetPlatform(hcluster *hyperv1.HostedCluster, utilitiesImage string, pullSecretBytes []byte) (Platform, error) { + var ( + platform Platform + capiImageProvider string + err error + ) + switch hcluster.Spec.Platform.Type { case hyperv1.AWSPlatform: - platform = aws.New(utilitiesImage) + if pullSecretBytes != nil { + capiImageProvider, err = imgUtil.GetPayloadImage(context.TODO(), hcluster, AWSCAPIProvider, pullSecretBytes) + if err != nil { + return nil, fmt.Errorf("failed to retrieve capa image: %w", err) + } + } + platform = aws.New(utilitiesImage, capiImageProvider) case hyperv1.IBMCloudPlatform: platform = &ibmcloud.IBMCloud{} case hyperv1.NonePlatform: diff --git a/hypershift-operator/controllers/nodepool/nodepool_controller.go b/hypershift-operator/controllers/nodepool/nodepool_controller.go index 5e40eb0b37e..db0930f4582 100644 --- a/hypershift-operator/controllers/nodepool/nodepool_controller.go +++ b/hypershift-operator/controllers/nodepool/nodepool_controller.go @@ -152,6 +152,7 @@ func (r *NodePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c if err := r.delete(ctx, nodePool, controlPlaneNamespace); err != nil { return ctrl.Result{}, fmt.Errorf("failed to delete nodepool: %w", err) } + // Now we can remove the finalizer. if controllerutil.ContainsFinalizer(nodePool, finalizer) { controllerutil.RemoveFinalizer(nodePool, finalizer) @@ -881,15 +882,16 @@ func (r *NodePoolReconciler) delete(ctx context.Context, nodePool *hyperv1.NodeP } } - // Delete any secret belonging to this NodePool i.e token Secret and userdata Secret. - secrets, err := r.listSecrets(ctx, nodePool) - if err != nil { - return fmt.Errorf("failed to list secrets: %w", err) + if err := deleteMachineDeployment(ctx, r.Client, md); err != nil { + return fmt.Errorf("failed to delete MachineDeployment: %w", err) } - for k := range secrets { - if err := r.Delete(ctx, &secrets[k]); err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("failed to delete secret: %w", err) - } + + if err := deleteMachineHealthCheck(ctx, r.Client, mhc); err != nil { + return fmt.Errorf("failed to delete MachineHealthCheck: %w", err) + } + + if err := deleteMachineSet(ctx, r.Client, ms); err != nil { + return fmt.Errorf("failed to delete MachineSet: %w", err) } // Delete any ConfigMap belonging to this NodePool i.e. TunedConfig ConfigMaps. @@ -901,16 +903,30 @@ func (r *NodePoolReconciler) delete(ctx context.Context, nodePool *hyperv1.NodeP return fmt.Errorf("failed to delete ConfigMaps with nodePool label: %w", err) } - if err := deleteMachineDeployment(ctx, r.Client, md); err != nil { - return fmt.Errorf("failed to delete MachineDeployment: %w", err) + // Ensure all machines in NodePool are deleted + err = r.ensureMachineDeletion(nodePool) + if err != nil { + return err } - if err := deleteMachineSet(ctx, r.Client, ms); err != nil { - return fmt.Errorf("failed to delete MachineSet: %w", err) + // Delete all secrets related to the NodePool + if err := r.deleteNodePoolSecrets(ctx, nodePool); err != nil { + return fmt.Errorf("failed to delete NodePool secrets: %w", err) } - if err := deleteMachineHealthCheck(ctx, r.Client, mhc); err != nil { - return fmt.Errorf("failed to delete MachineHealthCheck: %w", err) + return nil +} + +// deleteNodePoolSecrets deletes any secret belonging to this NodePool (ex. token Secret and userdata Secret) +func (r *NodePoolReconciler) deleteNodePoolSecrets(ctx context.Context, nodePool *hyperv1.NodePool) error { + secrets, err := r.listSecrets(ctx, nodePool) + if err != nil { + return fmt.Errorf("failed to list secrets: %w", err) + } + for k := range secrets { + if err := r.Delete(ctx, &secrets[k]); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete secret: %w", err) + } } return nil } @@ -1047,7 +1063,6 @@ func (r *NodePoolReconciler) reconcileMachineDeployment(log logr.Logger, nodePoolAnnotation: client.ObjectKeyFromObject(nodePool).String(), }, }, - Spec: capiv1.MachineSpec{ ClusterName: CAPIClusterName, Bootstrap: capiv1.Bootstrap{ @@ -2017,3 +2032,48 @@ func (r *NodePoolReconciler) doesCPODecompressAndDecodeConfig(ctx context.Contex _, managesDecompressAndDecodeConfig := supportutil.ImageLabels(controlPlaneOperatorImageMetadata)[controlPlaneOperatorManagesDecompressAndDecodeConfig] return managesDecompressAndDecodeConfig, nil } + +func (r *NodePoolReconciler) ensureMachineDeletion(nodePool *hyperv1.NodePool) error { + machineSetList := &unstructured.UnstructuredList{} + var gvk schema.GroupVersionKind + var err error + + switch nodePool.Spec.Platform.Type { + // Define the desired template type and mutateTemplate function. + case hyperv1.AWSPlatform: + gvk, err = apiutil.GVKForObject(&capiaws.AWSMachineList{}, api.Scheme) + if err != nil { + return err + } + default: + // need a default path that returns a value that does not cause the hypershift operator to crash + // if no explicit machineSet is defined safe to assume none exist + return nil + } + + machineSetList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: gvk.Group, + Kind: gvk.Kind, + Version: gvk.Version, + }) + + if err := r.List(context.Background(), machineSetList); err != nil { + return fmt.Errorf("failed to list MachineSet: %w", err) + } + + var filteredMachines []client.Object + for i, machineSet := range machineSetList.Items { + if machineSet.GetAnnotations() != nil { + if annotation, ok := machineSet.GetAnnotations()[nodePoolAnnotation]; ok && + annotation == client.ObjectKeyFromObject(nodePool).String() { + filteredMachines = append(filteredMachines, &machineSetList.Items[i]) + } + } + } + + if len(filteredMachines) > 0 { + return fmt.Errorf("all machines still not fully terminated") + } + + return nil +} diff --git a/support/releaseinfo/registryclient/client.go b/support/releaseinfo/registryclient/client.go index f51d494bd78..5e2bd847a05 100644 --- a/support/releaseinfo/registryclient/client.go +++ b/support/releaseinfo/registryclient/client.go @@ -234,7 +234,7 @@ func ExtractImageFilesToDir(ctx context.Context, imageRef string, pullSecret []b func getMetadata(ctx context.Context, imageRef string, pullSecret []byte) ([]distribution.Descriptor, distribution.BlobStore, error) { repo, ref, err := GetRepoSetup(ctx, imageRef, pullSecret) if err != nil { - return nil, nil, fmt.Errorf("failed to repo setup: %w", err) + return nil, nil, fmt.Errorf("failed to setup repo: %w", err) } firstManifest, location, err := manifest.FirstManifest(ctx, *ref, repo) if err != nil { diff --git a/support/util/imagemetadata.go b/support/util/imagemetadata.go index 23327256f97..f125253a63b 100644 --- a/support/util/imagemetadata.go +++ b/support/util/imagemetadata.go @@ -9,6 +9,8 @@ import ( "github.com/golang/groupcache/lru" "k8s.io/client-go/rest" + hyperv1 "github.com/openshift/hypershift/api/v1alpha1" + "github.com/openshift/hypershift/support/releaseinfo" "github.com/openshift/hypershift/support/thirdparty/library-go/pkg/image/dockerv1client" "github.com/openshift/hypershift/support/thirdparty/library-go/pkg/image/reference" "github.com/openshift/hypershift/support/thirdparty/library-go/pkg/image/registryclient" @@ -92,3 +94,18 @@ func ImageLabels(metadata *dockerv1client.DockerImageConfig) map[string]string { return metadata.ContainerConfig.Labels } } + +// GetPayloadImage get an image from the payload for a particular component +func GetPayloadImage(ctx context.Context, hc *hyperv1.HostedCluster, component string, pullSecret []byte) (string, error) { + releaseImageProvider := &releaseinfo.RegistryClientProvider{} + releaseImage, err := releaseinfo.Provider.Lookup(releaseImageProvider, ctx, hc.Spec.Release.Image, pullSecret) + if err != nil { + return "", fmt.Errorf("failed to lookup release image: %w", err) + } + + image, exists := releaseImage.ComponentImages()[component] + if !exists { + return "", fmt.Errorf("image does not exist for release: %q", image) + } + return image, nil +}