Skip to content

Commit

Permalink
Retrieve CAPI/CAPA from release image
Browse files Browse the repository at this point in the history
Retrieves the CAPI and CAPA image components from the release image rather than registry.ci.openshift.org/hypershift/cluster-api:v1.0.0 and registry.ci.openshift.org/hypershift/cluster-api-aws-controller:v1.1.0 respectively
  • Loading branch information
bryan-cox committed Oct 27, 2022
1 parent 8d93c27 commit 33c6567
Show file tree
Hide file tree
Showing 4 changed files with 136 additions and 56 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -108,10 +108,7 @@ const (
HostedClusterAnnotation = "hypershift.openshift.io/cluster"
clusterDeletionRequeueDuration = 5 * time.Second

// Image built from https://github.com/openshift/cluster-api/tree/release-1.0
// Upstream canonical image comes from https://console.cloud.google.com/gcr/images/k8s-staging-cluster-api/global/
// us.gcr.io/k8s-artifacts-prod/cluster-api/cluster-api-controller:v1.0.0
imageCAPI = "registry.ci.openshift.org/hypershift/cluster-api:v1.0.0"
ImageStreamCAPI = "cluster-capi-controllers"
ImageStreamAutoscalerImage = "cluster-autoscaler"
ImageStreamClusterMachineApproverImage = "cluster-machine-approver"

Expand Down Expand Up @@ -1604,7 +1601,10 @@ func (r *HostedClusterReconciler) reconcileCAPIManager(ctx context.Context, crea
}

// Reconcile CAPI manager deployment
capiImage := imageCAPI
capiImage, err := hyperutil.GetPayloadImage(ctx, hcluster, ImageStreamCAPI)
if err != nil {
return fmt.Errorf("failed to retrieve capi image: %w", err)
}
if envImage := os.Getenv(images.CAPIEnvVar); len(envImage) > 0 {
capiImage = envImage
}
Expand Down Expand Up @@ -1850,7 +1850,7 @@ func servicePublishingStrategyByType(hcp *hyperv1.HostedCluster, svcType hyperv1
// both the HostedCluster and the HostedControlPlane which the autoscaler takes
// inputs from.
func (r *HostedClusterReconciler) reconcileAutoscaler(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, utilitiesImage string) error {
clusterAutoscalerImage, err := r.getPayloadImage(ctx, hcluster, ImageStreamAutoscalerImage)
clusterAutoscalerImage, err := hyperutil.GetPayloadImage(ctx, hcluster, ImageStreamAutoscalerImage)
if err != nil {
return fmt.Errorf("failed to get image for machine approver: %w", err)
}
Expand Down Expand Up @@ -2437,7 +2437,7 @@ func reconcileCAPIManagerDeployment(deployment *appsv1.Deployment, hc *hyperv1.H
},
},
},
Command: []string{"/manager"},
Command: []string{"/bin/cluster-api-controller-manager"},
Args: []string{"--namespace", "$(MY_NAMESPACE)",
"--alsologtostderr",
"--v=4",
Expand Down Expand Up @@ -3077,7 +3077,7 @@ func (r *HostedClusterReconciler) reconcileClusterPrometheusRBAC(ctx context.Con
}

func (r *HostedClusterReconciler) reconcileMachineApprover(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, utilitiesImage string) error {
machineApproverImage, err := r.getPayloadImage(ctx, hcluster, ImageStreamClusterMachineApproverImage)
machineApproverImage, err := hyperutil.GetPayloadImage(ctx, hcluster, ImageStreamClusterMachineApproverImage)
if err != nil {
return fmt.Errorf("failed to get image for machine approver: %w", err)
}
Expand Down Expand Up @@ -3916,33 +3916,6 @@ func validateClusterID(hc *hyperv1.HostedCluster) error {
return nil
}

// getReleaseImage get the releaseInfo releaseImage for a given HC release image reference.
func (r *HostedClusterReconciler) getReleaseImage(ctx context.Context, hc *hyperv1.HostedCluster) (*releaseinfo.ReleaseImage, error) {
var pullSecret corev1.Secret
if err := r.Client.Get(ctx, types.NamespacedName{Namespace: hc.Namespace, Name: hc.Spec.PullSecret.Name}, &pullSecret); err != nil {
return nil, fmt.Errorf("failed to get pull secret: %w", err)
}
pullSecretBytes, ok := pullSecret.Data[corev1.DockerConfigJsonKey]
if !ok {
return nil, fmt.Errorf("expected %s key in pull secret", corev1.DockerConfigJsonKey)
}
return r.ReleaseProvider.Lookup(ctx, hc.Spec.Release.Image, pullSecretBytes)
}

// getPayloadImage get an image from the payload for a particular component.
func (r *HostedClusterReconciler) getPayloadImage(ctx context.Context, hc *hyperv1.HostedCluster, component string) (string, error) {
releaseImage, err := r.getReleaseImage(ctx, hc)
if err != nil {
return "", fmt.Errorf("failed to lookup release image: %w", err)
}

image, exists := releaseImage.ComponentImages()[component]
if !exists {
return "", fmt.Errorf("image does not exist for release: %q", image)
}
return image, nil
}

func (r *HostedClusterReconciler) reconcileServiceAccountSigningKey(ctx context.Context, hc *hyperv1.HostedCluster, targetNamespace string, createOrUpdate upsert.CreateOrUpdateFN) error {
privateBytes, publicBytes, err := r.serviceAccountSigningKeyBytes(ctx, hc)
if err != nil {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,7 @@ import (
)

const (
// Image built from https://github.com/openshift/cluster-api-provider-aws/tree/release-1.1
// Upstream canonical image comes from https://console.cloud.google.com/gcr/images/k8s-artifacts-prod
// us.gcr.io/k8s-artifacts-prod/cluster-api-aws/cluster-api-aws-controller:v1.1.0
imageCAPA = "registry.ci.openshift.org/hypershift/cluster-api-aws-controller:v1.1.0"
ImageStreamCAPA = "aws-cluster-api-controllers"
)

func New(utilitiesImage string) *AWS {
Expand Down Expand Up @@ -63,7 +60,10 @@ func (p AWS) ReconcileCAPIInfraCR(ctx context.Context, c client.Client, createOr
}

func (p AWS) CAPIProviderDeploymentSpec(hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane) (*appsv1.DeploymentSpec, error) {
providerImage := imageCAPA
providerImage, err := util.GetPayloadImage(context.TODO(), hcluster, ImageStreamCAPA)
if err != nil {
return nil, err
}
if envImage := os.Getenv(images.AWSCAPIProviderEnvVar); len(envImage) > 0 {
providerImage = envImage
}
Expand Down Expand Up @@ -161,7 +161,7 @@ func (p AWS) CAPIProviderDeploymentSpec(hcluster *hyperv1.HostedCluster, hcp *hy
Value: "true",
},
},
Command: []string{"/manager"},
Command: []string{"/bin/cluster-api-provider-aws-controller-manager"},
Args: []string{"--namespace", "$(MY_NAMESPACE)",
"--alsologtostderr",
"--v=4",
Expand Down
95 changes: 80 additions & 15 deletions hypershift-operator/controllers/nodepool/nodepool_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,7 @@ func (r *NodePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
if err := r.delete(ctx, nodePool, controlPlaneNamespace); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to delete nodepool: %w", err)
}

// Now we can remove the finalizer.
if controllerutil.ContainsFinalizer(nodePool, finalizer) {
controllerutil.RemoveFinalizer(nodePool, finalizer)
Expand Down Expand Up @@ -868,6 +869,7 @@ func deleteMachineHealthCheck(ctx context.Context, c client.Client, mhc *capiv1.
}

func (r *NodePoolReconciler) delete(ctx context.Context, nodePool *hyperv1.NodePool, controlPlaneNamespace string) error {
allMachinesTerminiated := false
md := machineDeployment(nodePool, controlPlaneNamespace)
ms := machineSet(nodePool, controlPlaneNamespace)
mhc := machineHealthCheck(nodePool, controlPlaneNamespace)
Expand All @@ -881,15 +883,16 @@ func (r *NodePoolReconciler) delete(ctx context.Context, nodePool *hyperv1.NodeP
}
}

// Delete any secret belonging to this NodePool i.e token Secret and userdata Secret.
secrets, err := r.listSecrets(ctx, nodePool)
if err != nil {
return fmt.Errorf("failed to list secrets: %w", err)
if err := deleteMachineDeployment(ctx, r.Client, md); err != nil {
return fmt.Errorf("failed to delete MachineDeployment: %w", err)
}
for k := range secrets {
if err := r.Delete(ctx, &secrets[k]); err != nil && !apierrors.IsNotFound(err) {
return fmt.Errorf("failed to delete secret: %w", err)
}

if err := deleteMachineHealthCheck(ctx, r.Client, mhc); err != nil {
return fmt.Errorf("failed to delete MachineHealthCheck: %w", err)
}

if err := deleteMachineSet(ctx, r.Client, ms); err != nil {
return fmt.Errorf("failed to delete MachineSet: %w", err)
}

// Delete any ConfigMap belonging to this NodePool i.e. TunedConfig ConfigMaps.
Expand All @@ -901,16 +904,34 @@ func (r *NodePoolReconciler) delete(ctx context.Context, nodePool *hyperv1.NodeP
return fmt.Errorf("failed to delete ConfigMaps with nodePool label: %w", err)
}

if err := deleteMachineDeployment(ctx, r.Client, md); err != nil {
return fmt.Errorf("failed to delete MachineDeployment: %w", err)
if nodePool.Spec.Platform.AWS != nil {
for !allMachinesTerminiated {
time.Sleep(5 * time.Second)
allMachinesTerminiated, err = r.allMachinesTerminated(nodePool)
if err != nil {
return err
}
}
}

if err := deleteMachineSet(ctx, r.Client, ms); err != nil {
return fmt.Errorf("failed to delete MachineSet: %w", err)
// Delete all secrets related to the NodePool
if err := r.deleteNodePoolSecrets(ctx, nodePool); err != nil {
return fmt.Errorf("failed to delete NodePool secrets: %w", err)
}

if err := deleteMachineHealthCheck(ctx, r.Client, mhc); err != nil {
return fmt.Errorf("failed to delete MachineHealthCheck: %w", err)
return nil
}

// deleteNodePoolSecrets deletes any secret belonging to this NodePool (ex. token Secret and userdata Secret)
func (r *NodePoolReconciler) deleteNodePoolSecrets(ctx context.Context, nodePool *hyperv1.NodePool) error {
secrets, err := r.listSecrets(ctx, nodePool)
if err != nil {
return fmt.Errorf("failed to list secrets: %w", err)
}
for k := range secrets {
if err := r.Delete(ctx, &secrets[k]); err != nil && !apierrors.IsNotFound(err) {
return fmt.Errorf("failed to delete secret: %w", err)
}
}
return nil
}
Expand Down Expand Up @@ -1047,7 +1068,6 @@ func (r *NodePoolReconciler) reconcileMachineDeployment(log logr.Logger,
nodePoolAnnotation: client.ObjectKeyFromObject(nodePool).String(),
},
},

Spec: capiv1.MachineSpec{
ClusterName: CAPIClusterName,
Bootstrap: capiv1.Bootstrap{
Expand Down Expand Up @@ -2017,3 +2037,48 @@ func (r *NodePoolReconciler) doesCPODecompressAndDecodeConfig(ctx context.Contex
_, managesDecompressAndDecodeConfig := supportutil.ImageLabels(controlPlaneOperatorImageMetadata)[controlPlaneOperatorManagesDecompressAndDecodeConfig]
return managesDecompressAndDecodeConfig, nil
}

func (r *NodePoolReconciler) allMachinesTerminated(nodePool *hyperv1.NodePool) (bool, error) {
machineSetList := &unstructured.UnstructuredList{}
var gvk schema.GroupVersionKind
var err error

switch nodePool.Spec.Platform.Type {
// Define the desired template type and mutateTemplate function.
case hyperv1.AWSPlatform:
gvk, err = apiutil.GVKForObject(&capiaws.AWSMachineList{}, api.Scheme)
if err != nil {
return false, err
}
default:
// need a default path that returns a value that does not cause the hypershift operator to crash
// if no explicit machineSet is defined safe to assume none exist
return true, nil
}

machineSetList.SetGroupVersionKind(schema.GroupVersionKind{
Group: gvk.Group,
Kind: gvk.Kind,
Version: gvk.Version,
})

if err := r.List(context.Background(), machineSetList); err != nil {
return false, fmt.Errorf("failed to list MachineSet: %w", err)
}

var filteredMachines []client.Object
for i, machineSet := range machineSetList.Items {
if machineSet.GetAnnotations() != nil {
if annotation, ok := machineSet.GetAnnotations()[nodePoolAnnotation]; ok &&
annotation == client.ObjectKeyFromObject(nodePool).String() {
filteredMachines = append(filteredMachines, &machineSetList.Items[i])
}
}
}

if len(filteredMachines) > 0 {
return false, nil
}

return true, nil
}
42 changes: 42 additions & 0 deletions support/util/imagemetadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,18 @@ import (

"github.com/docker/distribution/registry/client/transport"
"github.com/golang/groupcache/lru"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest"

hyperv1 "github.com/openshift/hypershift/api/v1alpha1"
cmdUtil "github.com/openshift/hypershift/cmd/util"
"github.com/openshift/hypershift/support/releaseinfo"
"github.com/openshift/hypershift/support/thirdparty/library-go/pkg/image/dockerv1client"
"github.com/openshift/hypershift/support/thirdparty/library-go/pkg/image/reference"
"github.com/openshift/hypershift/support/thirdparty/library-go/pkg/image/registryclient"
"github.com/openshift/hypershift/support/thirdparty/oc/pkg/cli/image/manifest"
"github.com/openshift/hypershift/support/thirdparty/oc/pkg/cli/image/manifest/dockercredentials"
corev1 "k8s.io/api/core/v1"
)

var (
Expand Down Expand Up @@ -92,3 +97,40 @@ func ImageLabels(metadata *dockerv1client.DockerImageConfig) map[string]string {
return metadata.ContainerConfig.Labels
}
}

// getPullSecretFromHostedCluster gets the pull secret from the hosted cluster
func getPullSecretFromHostedCluster(ctx context.Context, hc *hyperv1.HostedCluster) ([]byte, error) {
var pullSecret corev1.Secret
c, err := cmdUtil.GetClient()
if err != nil {
return nil, fmt.Errorf("failed to setup client in CAPI: %w", err)
}
if err := c.Get(ctx, types.NamespacedName{Namespace: hc.Namespace, Name: hc.Spec.PullSecret.Name}, &pullSecret); err != nil {
return nil, fmt.Errorf("failed to get pull secret: %w", err)
}
pullSecretBytes, ok := pullSecret.Data[corev1.DockerConfigJsonKey]
if !ok {
return nil, fmt.Errorf("expected %s key in pull secret", corev1.DockerConfigJsonKey)
}
return pullSecretBytes, nil
}

// GetPayloadImage get an image from the payload for a particular component
func GetPayloadImage(ctx context.Context, hc *hyperv1.HostedCluster, component string) (string, error) {
pullSecretBytes, err := getPullSecretFromHostedCluster(context.TODO(), hc)
if err != nil {
return "", fmt.Errorf("failed to get pull secret to retrieve cluster-autoscaler component image: %w", err)
}

riprovider := &releaseinfo.RegistryClientProvider{}
releaseImage, err := releaseinfo.Provider.Lookup(riprovider, ctx, hc.Spec.Release.Image, pullSecretBytes)
if err != nil {
return "", fmt.Errorf("failed to lookup release image: %w", err)
}

image, exists := releaseImage.ComponentImages()[component]
if !exists {
return "", fmt.Errorf("image does not exist for release: %q", image)
}
return image, nil
}

0 comments on commit 33c6567

Please sign in to comment.