diff --git a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go index 63005b6f7b1..23d0e158976 100644 --- a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go +++ b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go @@ -108,10 +108,7 @@ const ( HostedClusterAnnotation = "hypershift.openshift.io/cluster" clusterDeletionRequeueDuration = 5 * time.Second - // Image built from https://github.com/openshift/cluster-api/tree/release-1.0 - // Upstream canonical image comes from https://console.cloud.google.com/gcr/images/k8s-staging-cluster-api/global/ - // us.gcr.io/k8s-artifacts-prod/cluster-api/cluster-api-controller:v1.0.0 - imageCAPI = "registry.ci.openshift.org/hypershift/cluster-api:v1.0.0" + ImageStreamCAPI = "cluster-capi-controllers" ImageStreamAutoscalerImage = "cluster-autoscaler" ImageStreamClusterMachineApproverImage = "cluster-machine-approver" @@ -912,7 +909,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques _, ignitionServerHasHealthzHandler := hyperutil.ImageLabels(controlPlaneOperatorImageMetadata)[ignitionServerHealthzHandlerLabel] _, controlplaneOperatorManagesIgnitionServer := hyperutil.ImageLabels(controlPlaneOperatorImageMetadata)[controlplaneOperatorManagesIgnitionServerLabel] - p, err := platform.GetPlatform(hcluster, utilitiesImage) + p, err := platform.GetPlatform(hcluster, utilitiesImage, pullSecretBytes) if err != nil { return ctrl.Result{}, err } @@ -1342,7 +1339,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques } // Reconcile the CAPI manager components - err = r.reconcileCAPIManager(ctx, createOrUpdate, hcluster, hcp) + err = r.reconcileCAPIManager(ctx, createOrUpdate, hcluster, hcp, pullSecretBytes) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to reconcile capi manager: %w", err) } @@ -1357,14 +1354,14 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques // TODO (alberto): drop this after dropping < 4.11 support. if _, hasLabel := hyperutil.ImageLabels(controlPlaneOperatorImageMetadata)[controlPlaneOperatorManagesMachineAutoscaler]; !hasLabel { // Reconcile the autoscaler. - err = r.reconcileAutoscaler(ctx, createOrUpdate, hcluster, hcp, utilitiesImage) + err = r.reconcileAutoscaler(ctx, createOrUpdate, hcluster, hcp, utilitiesImage, pullSecretBytes) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to reconcile autoscaler: %w", err) } } if _, hasLabel := hyperutil.ImageLabels(controlPlaneOperatorImageMetadata)[controlPlaneOperatorManagesMachineApprover]; !hasLabel { // Reconcile the machine approver. - if err = r.reconcileMachineApprover(ctx, createOrUpdate, hcluster, hcp, utilitiesImage); err != nil { + if err = r.reconcileMachineApprover(ctx, createOrUpdate, hcluster, hcp, utilitiesImage, pullSecretBytes); err != nil { return ctrl.Result{}, fmt.Errorf("failed to reconcile machine approver: %w", err) } } @@ -1598,7 +1595,7 @@ func ensureHCPAWSRolesBackwardCompatibility(hc *hyperv1.HostedCluster, hcp *hype } // reconcileCAPIManager orchestrates orchestrates of all CAPI manager components. -func (r *HostedClusterReconciler) reconcileCAPIManager(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane) error { +func (r *HostedClusterReconciler) reconcileCAPIManager(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, pullSecretBytes []byte) error { controlPlaneNamespace := manifests.HostedControlPlaneNamespace(hcluster.Namespace, hcluster.Name) err := r.Client.Get(ctx, client.ObjectKeyFromObject(controlPlaneNamespace), controlPlaneNamespace) if err != nil { @@ -1686,7 +1683,10 @@ func (r *HostedClusterReconciler) reconcileCAPIManager(ctx context.Context, crea } // Reconcile CAPI manager deployment - capiImage := imageCAPI + capiImage, err := hyperutil.GetPayloadImage(ctx, hcluster, ImageStreamCAPI, pullSecretBytes) + if err != nil { + return fmt.Errorf("failed to retrieve capi image: %w", err) + } if envImage := os.Getenv(images.CAPIEnvVar); len(envImage) > 0 { capiImage = envImage } @@ -1931,8 +1931,8 @@ func servicePublishingStrategyByType(hcp *hyperv1.HostedCluster, svcType hyperv1 // reconcileAutoscaler orchestrates reconciliation of autoscaler components using // both the HostedCluster and the HostedControlPlane which the autoscaler takes // inputs from. -func (r *HostedClusterReconciler) reconcileAutoscaler(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, utilitiesImage string) error { - clusterAutoscalerImage, err := r.getPayloadImage(ctx, hcluster, ImageStreamAutoscalerImage) +func (r *HostedClusterReconciler) reconcileAutoscaler(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, utilitiesImage string, pullSecretBytes []byte) error { + clusterAutoscalerImage, err := hyperutil.GetPayloadImage(ctx, hcluster, ImageStreamAutoscalerImage, pullSecretBytes) if err != nil { return fmt.Errorf("failed to get image for machine approver: %w", err) } @@ -2519,7 +2519,7 @@ func reconcileCAPIManagerDeployment(deployment *appsv1.Deployment, hc *hyperv1.H }, }, }, - Command: []string{"/manager"}, + Command: []string{"/bin/cluster-api-controller-manager"}, Args: []string{"--namespace", "$(MY_NAMESPACE)", "--alsologtostderr", "--v=4", @@ -2999,7 +2999,7 @@ func (r *HostedClusterReconciler) delete(ctx context.Context, hc *hyperv1.Hosted } // Cleanup Platform specifics. - p, err := platform.GetPlatform(hc, "") + p, err := platform.GetPlatform(hc, "", nil) if err != nil { return false, err } @@ -3158,8 +3158,8 @@ func (r *HostedClusterReconciler) reconcileClusterPrometheusRBAC(ctx context.Con return nil } -func (r *HostedClusterReconciler) reconcileMachineApprover(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, utilitiesImage string) error { - machineApproverImage, err := r.getPayloadImage(ctx, hcluster, ImageStreamClusterMachineApproverImage) +func (r *HostedClusterReconciler) reconcileMachineApprover(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, utilitiesImage string, pullSecretBytes []byte) error { + machineApproverImage, err := hyperutil.GetPayloadImage(ctx, hcluster, ImageStreamClusterMachineApproverImage, pullSecretBytes) if err != nil { return fmt.Errorf("failed to get image for machine approver: %w", err) } @@ -4069,33 +4069,6 @@ func validateClusterID(hc *hyperv1.HostedCluster) error { return nil } -// getReleaseImage get the releaseInfo releaseImage for a given HC release image reference. -func (r *HostedClusterReconciler) getReleaseImage(ctx context.Context, hc *hyperv1.HostedCluster) (*releaseinfo.ReleaseImage, error) { - var pullSecret corev1.Secret - if err := r.Client.Get(ctx, types.NamespacedName{Namespace: hc.Namespace, Name: hc.Spec.PullSecret.Name}, &pullSecret); err != nil { - return nil, fmt.Errorf("failed to get pull secret: %w", err) - } - pullSecretBytes, ok := pullSecret.Data[corev1.DockerConfigJsonKey] - if !ok { - return nil, fmt.Errorf("expected %s key in pull secret", corev1.DockerConfigJsonKey) - } - return r.ReleaseProvider.Lookup(ctx, hc.Spec.Release.Image, pullSecretBytes) -} - -// getPayloadImage get an image from the payload for a particular component. -func (r *HostedClusterReconciler) getPayloadImage(ctx context.Context, hc *hyperv1.HostedCluster, component string) (string, error) { - releaseImage, err := r.getReleaseImage(ctx, hc) - if err != nil { - return "", fmt.Errorf("failed to lookup release image: %w", err) - } - - image, exists := releaseImage.ComponentImages()[component] - if !exists { - return "", fmt.Errorf("image does not exist for release: %q", image) - } - return image, nil -} - func (r *HostedClusterReconciler) reconcileServiceAccountSigningKey(ctx context.Context, hc *hyperv1.HostedCluster, targetNamespace string, createOrUpdate upsert.CreateOrUpdateFN) error { privateBytes, publicBytes, err := r.serviceAccountSigningKeyBytes(ctx, hc) if err != nil { diff --git a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go index 2ee4fca64d7..11719228e93 100644 --- a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go +++ b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go @@ -15,6 +15,7 @@ import ( configv1 "github.com/openshift/api/config/v1" "github.com/openshift/hypershift/api" hyperv1 "github.com/openshift/hypershift/api/v1alpha1" + version "github.com/openshift/hypershift/cmd/version" "github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests" platformaws "github.com/openshift/hypershift/hypershift-operator/controllers/hostedcluster/internal/platform/aws" "github.com/openshift/hypershift/hypershift-operator/controllers/hostedcluster/internal/platform/kubevirt" @@ -840,7 +841,7 @@ func expectedRules(addRules []rbacv1.PolicyRule) []rbacv1.PolicyRule { } func TestHostedClusterWatchesEverythingItCreates(t *testing.T) { - + releaseImage, _ := version.LookupDefaultOCPVersion() hostedClusters := []*hyperv1.HostedCluster{ { ObjectMeta: metav1.ObjectMeta{Name: "agent"}, @@ -849,6 +850,9 @@ func TestHostedClusterWatchesEverythingItCreates(t *testing.T) { Type: hyperv1.AgentPlatform, Agent: &hyperv1.AgentPlatformSpec{AgentNamespace: "agent-namespace"}, }, + Release: hyperv1.Release{ + Image: releaseImage.PullSpec, + }, }, Status: hyperv1.HostedClusterStatus{ IgnitionEndpoint: "ign", @@ -872,6 +876,9 @@ func TestHostedClusterWatchesEverythingItCreates(t *testing.T) { }, }, }, + Release: hyperv1.Release{ + Image: releaseImage.PullSpec, + }, }, }, { @@ -880,6 +887,9 @@ func TestHostedClusterWatchesEverythingItCreates(t *testing.T) { Platform: hyperv1.PlatformSpec{ Type: hyperv1.NonePlatform, }, + Release: hyperv1.Release{ + Image: releaseImage.PullSpec, + }, }, }, { @@ -889,6 +899,9 @@ func TestHostedClusterWatchesEverythingItCreates(t *testing.T) { Type: hyperv1.IBMCloudPlatform, IBMCloud: &hyperv1.IBMCloudPlatformSpec{}, }, + Release: hyperv1.Release{ + Image: releaseImage.PullSpec, + }, }, }, { @@ -897,6 +910,9 @@ func TestHostedClusterWatchesEverythingItCreates(t *testing.T) { Platform: hyperv1.PlatformSpec{ Type: hyperv1.KubevirtPlatform, }, + Release: hyperv1.Release{ + Image: releaseImage.PullSpec, + }, }, }, } diff --git a/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook_test.go b/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook_test.go index 36135ef0d5e..f0ad03bdc27 100644 --- a/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook_test.go +++ b/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook_test.go @@ -10,6 +10,7 @@ import ( hyperapi "github.com/openshift/hypershift/api" "github.com/openshift/hypershift/api/util/ipnet" hyperv1 "github.com/openshift/hypershift/api/v1alpha1" + version "github.com/openshift/hypershift/cmd/version" fakecapabilities "github.com/openshift/hypershift/support/capabilities/fake" fakereleaseprovider "github.com/openshift/hypershift/support/releaseinfo/fake" "github.com/openshift/hypershift/support/thirdparty/library-go/pkg/image/dockerv1client" @@ -30,6 +31,7 @@ import ( ) func TestWebhookAllowsHostedClusterReconcilerUpdates(t *testing.T) { + releaseImage, _ := version.LookupDefaultOCPVersion() t.Parallel() testCases := []struct { name string @@ -55,6 +57,9 @@ func TestWebhookAllowsHostedClusterReconcilerUpdates(t *testing.T) { }, }, }, + Release: hyperv1.Release{ + Image: releaseImage.PullSpec, + }, }, }, additionalObjects: []crclient.Object{ @@ -64,7 +69,7 @@ func TestWebhookAllowsHostedClusterReconcilerUpdates(t *testing.T) { }, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns"}, - Data: map[string][]byte{".dockerconfigjson": []byte("something")}, + Data: map[string][]byte{".dockerconfigjson": []byte("{\"something\": \"something\"}")}, }, &configv1.Ingress{ObjectMeta: metav1.ObjectMeta{Name: "cluster"}}, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "none-cluster"}}, diff --git a/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws.go b/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws.go index 34cbd87253c..7db8618c599 100644 --- a/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws.go +++ b/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws.go @@ -25,20 +25,19 @@ import ( ) const ( - // Image built from https://github.com/openshift/cluster-api-provider-aws/tree/release-1.1 - // Upstream canonical image comes from https://console.cloud.google.com/gcr/images/k8s-artifacts-prod - // us.gcr.io/k8s-artifacts-prod/cluster-api-aws/cluster-api-aws-controller:v1.1.0 - imageCAPA = "registry.ci.openshift.org/hypershift/cluster-api-aws-controller:v1.1.0" + ImageStreamCAPA = "aws-cluster-api-controllers" ) -func New(utilitiesImage string) *AWS { +func New(utilitiesImage string, capiProviderImage string) *AWS { return &AWS{ - utilitiesImage: utilitiesImage, + utilitiesImage: utilitiesImage, + capiProviderImage: capiProviderImage, } } type AWS struct { - utilitiesImage string + utilitiesImage string + capiProviderImage string } func (p AWS) ReconcileCAPIInfraCR(ctx context.Context, c client.Client, createOrUpdate upsert.CreateOrUpdateFN, @@ -63,7 +62,7 @@ func (p AWS) ReconcileCAPIInfraCR(ctx context.Context, c client.Client, createOr } func (p AWS) CAPIProviderDeploymentSpec(hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane) (*appsv1.DeploymentSpec, error) { - providerImage := imageCAPA + providerImage := p.capiProviderImage if envImage := os.Getenv(images.AWSCAPIProviderEnvVar); len(envImage) > 0 { providerImage = envImage } @@ -161,7 +160,7 @@ func (p AWS) CAPIProviderDeploymentSpec(hcluster *hyperv1.HostedCluster, hcp *hy Value: "true", }, }, - Command: []string{"/manager"}, + Command: []string{"/bin/cluster-api-provider-aws-controller-manager"}, Args: []string{"--namespace", "$(MY_NAMESPACE)", "--alsologtostderr", "--v=4", diff --git a/hypershift-operator/controllers/hostedcluster/internal/platform/platform.go b/hypershift-operator/controllers/hostedcluster/internal/platform/platform.go index 23fd7e750d0..8660d166669 100644 --- a/hypershift-operator/controllers/hostedcluster/internal/platform/platform.go +++ b/hypershift-operator/controllers/hostedcluster/internal/platform/platform.go @@ -13,11 +13,16 @@ import ( "github.com/openshift/hypershift/hypershift-operator/controllers/hostedcluster/internal/platform/none" "github.com/openshift/hypershift/hypershift-operator/controllers/hostedcluster/internal/platform/powervs" "github.com/openshift/hypershift/support/upsert" + imgUtil "github.com/openshift/hypershift/support/util" appsv1 "k8s.io/api/apps/v1" rbacv1 "k8s.io/api/rbac/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) +const ( + AWSCAPIProvider = "aws-cluster-api-controllers" +) + var _ Platform = aws.AWS{} var _ Platform = ibmcloud.IBMCloud{} var _ Platform = none.None{} @@ -57,11 +62,23 @@ type Platform interface { DeleteCredentials(ctx context.Context, c client.Client, hcluster *hyperv1.HostedCluster, controlPlaneNamespace string) error } -func GetPlatform(hcluster *hyperv1.HostedCluster, utilitiesImage string) (Platform, error) { - var platform Platform +// GetPlatform gets and initializes the cloud platform the hosted cluster was created on +func GetPlatform(hcluster *hyperv1.HostedCluster, utilitiesImage string, pullSecretBytes []byte) (Platform, error) { + var ( + platform Platform + capiImageProvider string + err error + ) + switch hcluster.Spec.Platform.Type { case hyperv1.AWSPlatform: - platform = aws.New(utilitiesImage) + if pullSecretBytes != nil { + capiImageProvider, err = imgUtil.GetPayloadImage(context.TODO(), hcluster, AWSCAPIProvider, pullSecretBytes) + if err != nil { + return nil, fmt.Errorf("failed to retrieve capa image: %w", err) + } + } + platform = aws.New(utilitiesImage, capiImageProvider) case hyperv1.IBMCloudPlatform: platform = &ibmcloud.IBMCloud{} case hyperv1.NonePlatform: diff --git a/hypershift-operator/controllers/nodepool/nodepool_controller.go b/hypershift-operator/controllers/nodepool/nodepool_controller.go index 3f5e2a18a08..16fcd57a19b 100644 --- a/hypershift-operator/controllers/nodepool/nodepool_controller.go +++ b/hypershift-operator/controllers/nodepool/nodepool_controller.go @@ -154,6 +154,7 @@ func (r *NodePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c if err := r.delete(ctx, nodePool, controlPlaneNamespace); err != nil { return ctrl.Result{}, fmt.Errorf("failed to delete nodepool: %w", err) } + // Now we can remove the finalizer. if controllerutil.ContainsFinalizer(nodePool, finalizer) { controllerutil.RemoveFinalizer(nodePool, finalizer) @@ -942,15 +943,16 @@ func (r *NodePoolReconciler) delete(ctx context.Context, nodePool *hyperv1.NodeP } } - // Delete any secret belonging to this NodePool i.e token Secret and userdata Secret. - secrets, err := r.listSecrets(ctx, nodePool) - if err != nil { - return fmt.Errorf("failed to list secrets: %w", err) + if err := deleteMachineDeployment(ctx, r.Client, md); err != nil { + return fmt.Errorf("failed to delete MachineDeployment: %w", err) } - for k := range secrets { - if err := r.Delete(ctx, &secrets[k]); err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("failed to delete secret: %w", err) - } + + if err := deleteMachineHealthCheck(ctx, r.Client, mhc); err != nil { + return fmt.Errorf("failed to delete MachineHealthCheck: %w", err) + } + + if err := deleteMachineSet(ctx, r.Client, ms); err != nil { + return fmt.Errorf("failed to delete MachineSet: %w", err) } // Delete any ConfigMap belonging to this NodePool i.e. TuningConfig ConfigMaps. @@ -962,16 +964,30 @@ func (r *NodePoolReconciler) delete(ctx context.Context, nodePool *hyperv1.NodeP return fmt.Errorf("failed to delete ConfigMaps with nodePool label: %w", err) } - if err := deleteMachineDeployment(ctx, r.Client, md); err != nil { - return fmt.Errorf("failed to delete MachineDeployment: %w", err) + // Ensure all machines in NodePool are deleted + err = r.ensureMachineDeletion(nodePool, controlPlaneNamespace) + if err != nil { + return err } - if err := deleteMachineSet(ctx, r.Client, ms); err != nil { - return fmt.Errorf("failed to delete MachineSet: %w", err) + // Delete all secrets related to the NodePool + if err := r.deleteNodePoolSecrets(ctx, nodePool); err != nil { + return fmt.Errorf("failed to delete NodePool secrets: %w", err) } - if err := deleteMachineHealthCheck(ctx, r.Client, mhc); err != nil { - return fmt.Errorf("failed to delete MachineHealthCheck: %w", err) + return nil +} + +// deleteNodePoolSecrets deletes any secret belonging to this NodePool (ex. token Secret and userdata Secret) +func (r *NodePoolReconciler) deleteNodePoolSecrets(ctx context.Context, nodePool *hyperv1.NodePool) error { + secrets, err := r.listSecrets(ctx, nodePool) + if err != nil { + return fmt.Errorf("failed to list secrets: %w", err) + } + for k := range secrets { + if err := r.Delete(ctx, &secrets[k]); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete secret: %w", err) + } } return nil } @@ -1109,7 +1125,6 @@ func (r *NodePoolReconciler) reconcileMachineDeployment(log logr.Logger, nodePoolAnnotation: client.ObjectKeyFromObject(nodePool).String(), }, }, - Spec: capiv1.MachineSpec{ ClusterName: CAPIClusterName, Bootstrap: capiv1.Bootstrap{ @@ -2083,3 +2098,33 @@ func (r *NodePoolReconciler) doesCPODecompressAndDecodeConfig(ctx context.Contex _, managesDecompressAndDecodeConfig := supportutil.ImageLabels(controlPlaneOperatorImageMetadata)[controlPlaneOperatorManagesDecompressAndDecodeConfig] return managesDecompressAndDecodeConfig, nil } + +// ensureMachineDeletion ensures all the machines belonging to the NodePool's MachineSet are fully deleted +// This function can be deleted once the upstream PR (https://github.com/kubernetes-sigs/cluster-api-provider-aws/pull/3805) is merged and pulled into https://github.com/openshift/cluster-api +// This function is necessary to ensure AWSMachines are fully deleted prior to deleting the NodePull secrets being deleted due to a bug introduced by https://github.com/kubernetes-sigs/cluster-api-provider-aws/pull/2271 +// See https://github.com/openshift/hypershift/pull/1826#discussion_r1007349564 for more details +func (r *NodePoolReconciler) ensureMachineDeletion(nodePool *hyperv1.NodePool, controlPlaneNamespace string) error { + // Get list of CAPI Machines to filter through + machines := capiv1.MachineList{} + if err := r.List(context.Background(), &machines, &client.ListOptions{Namespace: controlPlaneNamespace}); err != nil { + return fmt.Errorf("failed to get list of Machines: %w", err) + } + + // Filter out only machines belonging to deleted NodePool + var machineSetOwnedMachines []capiv1.Machine + for i, machine := range machines.Items { + for _, ownerReference := range machine.OwnerReferences { + // Derive the MachineSet and NodePool names from each respective Machine + derivedMachineSetName := ownerReference.Name[:strings.LastIndex(machine.Name, "-")] + derivedNodePoolName := derivedMachineSetName[:strings.LastIndex(derivedMachineSetName, "-")] + + if ownerReference.Kind == "MachineSet" && derivedMachineSetName == ownerReference.Name && derivedNodePoolName == nodePool.Name { + machineSetOwnedMachines = append(machineSetOwnedMachines, machines.Items[i]) + } + } + } + if len(machineSetOwnedMachines) > 0 { + return fmt.Errorf("") + } + return nil +} diff --git a/support/releaseinfo/registryclient/client.go b/support/releaseinfo/registryclient/client.go index f51d494bd78..61c0a7be159 100644 --- a/support/releaseinfo/registryclient/client.go +++ b/support/releaseinfo/registryclient/client.go @@ -234,7 +234,7 @@ func ExtractImageFilesToDir(ctx context.Context, imageRef string, pullSecret []b func getMetadata(ctx context.Context, imageRef string, pullSecret []byte) ([]distribution.Descriptor, distribution.BlobStore, error) { repo, ref, err := GetRepoSetup(ctx, imageRef, pullSecret) if err != nil { - return nil, nil, fmt.Errorf("failed to repo setup: %w", err) + return nil, nil, fmt.Errorf("failed to get repo setup: %w", err) } firstManifest, location, err := manifest.FirstManifest(ctx, *ref, repo) if err != nil { diff --git a/support/util/imagemetadata.go b/support/util/imagemetadata.go index 23327256f97..f125253a63b 100644 --- a/support/util/imagemetadata.go +++ b/support/util/imagemetadata.go @@ -9,6 +9,8 @@ import ( "github.com/golang/groupcache/lru" "k8s.io/client-go/rest" + hyperv1 "github.com/openshift/hypershift/api/v1alpha1" + "github.com/openshift/hypershift/support/releaseinfo" "github.com/openshift/hypershift/support/thirdparty/library-go/pkg/image/dockerv1client" "github.com/openshift/hypershift/support/thirdparty/library-go/pkg/image/reference" "github.com/openshift/hypershift/support/thirdparty/library-go/pkg/image/registryclient" @@ -92,3 +94,18 @@ func ImageLabels(metadata *dockerv1client.DockerImageConfig) map[string]string { return metadata.ContainerConfig.Labels } } + +// GetPayloadImage get an image from the payload for a particular component +func GetPayloadImage(ctx context.Context, hc *hyperv1.HostedCluster, component string, pullSecret []byte) (string, error) { + releaseImageProvider := &releaseinfo.RegistryClientProvider{} + releaseImage, err := releaseinfo.Provider.Lookup(releaseImageProvider, ctx, hc.Spec.Release.Image, pullSecret) + if err != nil { + return "", fmt.Errorf("failed to lookup release image: %w", err) + } + + image, exists := releaseImage.ComponentImages()[component] + if !exists { + return "", fmt.Errorf("image does not exist for release: %q", image) + } + return image, nil +}