From 6af05595adfb9fc0de14292588d479832f31fcf0 Mon Sep 17 00:00:00 2001 From: Jon Huhn Date: Wed, 17 Apr 2024 16:08:47 -0500 Subject: [PATCH] ASOAPI: aggregate agentPoolProfiles for ManagedCluster create --- .../azureasomanagedcontrolplane_controller.go | 21 +- exp/mutators/azureasomanagedcontrolplane.go | 187 +++++++++++- .../azureasomanagedcontrolplane_test.go | 280 +++++++++++++++++- main.go | 10 +- ...cluster-template-aks-aso-clusterclass.yaml | 7 - templates/cluster-template-aks-aso.yaml | 5 - .../aks-aso-clusterclass/clusterclass.yaml | 7 - .../flavors/aks-aso/cluster-template.yaml | 7 - .../ci/cluster-template-prow-aks-aso.yaml | 5 - 9 files changed, 491 insertions(+), 38 deletions(-) diff --git a/exp/controllers/azureasomanagedcontrolplane_controller.go b/exp/controllers/azureasomanagedcontrolplane_controller.go index 923fe2a2945..23159374bdf 100644 --- a/exp/controllers/azureasomanagedcontrolplane_controller.go +++ b/exp/controllers/azureasomanagedcontrolplane_controller.go @@ -73,6 +73,12 @@ func (r *AzureASOManagedControlPlaneReconciler) SetupWithManager(ctx context.Con infracontroller.ClusterPauseChangeAndInfrastructureReady(log), ), ). + // User errors that CAPZ passes through agentPoolProfiles on create must be fixed in the + // AzureASOManagedMachinePool, so trigger a reconciliation to consume those fixes. + Watches( + &infrav1exp.AzureASOManagedMachinePool{}, + handler.EnqueueRequestsFromMapFunc(r.azureASOManagedMachinePoolToAzureASOManagedControlPlane), + ). Owns(&corev1.Secret{}). Build(r) if err != nil { @@ -106,6 +112,19 @@ func clusterToAzureASOManagedControlPlane(_ context.Context, o client.Object) [] return nil } +func (r *AzureASOManagedControlPlaneReconciler) azureASOManagedMachinePoolToAzureASOManagedControlPlane(ctx context.Context, o client.Object) []ctrl.Request { + asoManagedMachinePool := o.(*infrav1exp.AzureASOManagedMachinePool) + clusterName := asoManagedMachinePool.Labels[clusterv1.ClusterNameLabel] + if clusterName == "" { + return nil + } + cluster, err := util.GetClusterByName(ctx, r.Client, asoManagedMachinePool.Namespace, clusterName) + if client.IgnoreNotFound(err) != nil || cluster == nil { + return nil + } + return clusterToAzureASOManagedControlPlane(ctx, cluster) +} + //+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azureasomanagedcontrolplanes,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azureasomanagedcontrolplanes/status,verbs=get;update;patch //+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azureasomanagedcontrolplanes/finalizers,verbs=update @@ -180,7 +199,7 @@ func (r *AzureASOManagedControlPlaneReconciler) reconcileNormal(ctx context.Cont return ctrl.Result{Requeue: true}, nil } - resources, err := mutators.ApplyMutators(ctx, asoManagedControlPlane.Spec.Resources, mutators.SetManagedClusterDefaults(asoManagedControlPlane, cluster)) + resources, err := mutators.ApplyMutators(ctx, asoManagedControlPlane.Spec.Resources, mutators.SetManagedClusterDefaults(r.Client, asoManagedControlPlane, cluster)) if err != nil { return ctrl.Result{}, err } diff --git a/exp/mutators/azureasomanagedcontrolplane.go b/exp/mutators/azureasomanagedcontrolplane.go index 8fad06554c4..5e6a3b351e2 100644 --- a/exp/mutators/azureasomanagedcontrolplane.go +++ b/exp/mutators/azureasomanagedcontrolplane.go @@ -18,24 +18,32 @@ package mutators import ( "context" + "errors" "fmt" "strings" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + asocontainerservicev1hub "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001/storage" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/cluster-api-provider-azure/azure" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) var ( // ErrNoManagedClusterDefined describes an AzureASOManagedControlPlane without a ManagedCluster. - ErrNoManagedClusterDefined = fmt.Errorf("no %s ManagedCluster defined in AzureASOManagedControlPlane spec.resources", asocontainerservicev1.GroupVersion.Group) + ErrNoManagedClusterDefined = fmt.Errorf("no %s ManagedCluster defined in AzureASOManagedControlPlane spec.resources", asocontainerservicev1hub.GroupVersion.Group) + + // ErrNoAzureASOManagedMachinePools means no AzureASOManagedMachinePools exist for an AzureASOManagedControlPlane. + ErrNoAzureASOManagedMachinePools = errors.New("no AzureASOManagedMachinePools found for AzureASOManagedControlPlane") ) // SetManagedClusterDefaults propagates values defined by Cluster API to an ASO ManagedCluster. -func SetManagedClusterDefaults(asoManagedControlPlane *infrav1exp.AzureASOManagedControlPlane, cluster *clusterv1.Cluster) ResourcesMutator { +func SetManagedClusterDefaults(ctrlClient client.Client, asoManagedControlPlane *infrav1exp.AzureASOManagedControlPlane, cluster *clusterv1.Cluster) ResourcesMutator { return func(ctx context.Context, us []*unstructured.Unstructured) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "mutators.SetManagedClusterDefaults") defer done() @@ -43,7 +51,7 @@ func SetManagedClusterDefaults(asoManagedControlPlane *infrav1exp.AzureASOManage var managedCluster *unstructured.Unstructured var managedClusterPath string for i, u := range us { - if u.GroupVersionKind().Group == asocontainerservicev1.GroupVersion.Group && + if u.GroupVersionKind().Group == asocontainerservicev1hub.GroupVersion.Group && u.GroupVersionKind().Kind == "ManagedCluster" { managedCluster = u managedClusterPath = fmt.Sprintf("spec.resources[%d]", i) @@ -66,6 +74,10 @@ func SetManagedClusterDefaults(asoManagedControlPlane *infrav1exp.AzureASOManage return err } + if err := setManagedClusterAgentPoolProfiles(ctx, ctrlClient, asoManagedControlPlane.Namespace, cluster, managedClusterPath, managedCluster); err != nil { + return err + } + return nil } } @@ -165,3 +177,172 @@ func setManagedClusterPodCIDR(ctx context.Context, cluster *clusterv1.Cluster, m logMutation(log, setPodCIDR) return unstructured.SetNestedField(managedCluster.UnstructuredContent(), capiCIDR, podCIDRPath...) } + +func setManagedClusterAgentPoolProfiles(ctx context.Context, ctrlClient client.Client, namespace string, cluster *clusterv1.Cluster, managedClusterPath string, managedCluster *unstructured.Unstructured) error { + ctx, log, done := tele.StartSpanWithLogger(ctx, "mutators.setManagedClusterAgentPoolProfiles") + defer done() + + agentPoolProfilesPath := []string{"spec", "agentPoolProfiles"} + userAgentPoolProfiles, agentPoolProfilesFound, err := unstructured.NestedSlice(managedCluster.UnstructuredContent(), agentPoolProfilesPath...) + if err != nil { + return err + } + setAgentPoolProfiles := mutation{ + location: managedClusterPath + "." + strings.Join(agentPoolProfilesPath, "."), + val: "nil", + reason: "because agent pool definitions must be inherited from AzureASOManagedMachinePools", + } + if agentPoolProfilesFound { + return Incompatible{ + mutation: setAgentPoolProfiles, + userVal: fmt.Sprintf("", len(userAgentPoolProfiles)), + } + } + + // AKS requires ManagedClusters to be created with agent pools: https://github.com/Azure/azure-service-operator/issues/2791 + getMC := &asocontainerservicev1.ManagedCluster{} + err = ctrlClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: managedCluster.GetName()}, getMC) + if client.IgnoreNotFound(err) != nil { + return err + } + if len(getMC.Status.AgentPoolProfiles) != 0 { + return nil + } + + log.V(4).Info("gathering agent pool profiles to include in ManagedCluster create") + agentPools, err := agentPoolsFromManagedMachinePools(ctx, ctrlClient, cluster.Name, namespace) + if err != nil { + return err + } + mc, err := ctrlClient.Scheme().New(managedCluster.GroupVersionKind()) + if err != nil { + return err + } + err = ctrlClient.Scheme().Convert(managedCluster, mc, nil) + if err != nil { + return err + } + setAgentPoolProfiles.val = fmt.Sprintf("", len(agentPools)) + logMutation(log, setAgentPoolProfiles) + err = setAgentPoolProfilesFromAgentPools(mc.(conversion.Convertible), agentPools) + if err != nil { + return err + } + err = ctrlClient.Scheme().Convert(mc, managedCluster, nil) + if err != nil { + return err + } + + return nil +} + +func agentPoolsFromManagedMachinePools(ctx context.Context, ctrlClient client.Client, clusterName string, namespace string) ([]conversion.Convertible, error) { + ctx, _, done := tele.StartSpanWithLogger(ctx, "mutators.agentPoolsFromManagedMachinePools") + defer done() + + asoManagedMachinePools := &infrav1exp.AzureASOManagedMachinePoolList{} + err := ctrlClient.List(ctx, asoManagedMachinePools, + client.InNamespace(namespace), + client.MatchingLabels{ + clusterv1.ClusterNameLabel: clusterName, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to list AzureASOManagedMachinePools: %w", err) + } + + var agentPools []conversion.Convertible + for _, asoManagedMachinePool := range asoManagedMachinePools.Items { + resources, err := ApplyMutators(ctx, asoManagedMachinePool.Spec.Resources) + if err != nil { + return nil, err + } + + for _, u := range resources { + if u.GroupVersionKind().Group != asocontainerservicev1hub.GroupVersion.Group || + u.GroupVersionKind().Kind != "ManagedClustersAgentPool" { + continue + } + + agentPool, err := ctrlClient.Scheme().New(u.GroupVersionKind()) + if err != nil { + return nil, fmt.Errorf("error creating new %v: %w", u.GroupVersionKind(), err) + } + err = ctrlClient.Scheme().Convert(u, agentPool, nil) + if err != nil { + return nil, err + } + + agentPools = append(agentPools, agentPool.(conversion.Convertible)) + break + } + } + + return agentPools, nil +} + +func setAgentPoolProfilesFromAgentPools(managedCluster conversion.Convertible, agentPools []conversion.Convertible) error { + hubMC := &asocontainerservicev1hub.ManagedCluster{} + err := managedCluster.ConvertTo(hubMC) + if err != nil { + return err + } + hubMC.Spec.AgentPoolProfiles = nil + + for _, agentPool := range agentPools { + hubPool := &asocontainerservicev1hub.ManagedClustersAgentPool{} + err := agentPool.ConvertTo(hubPool) + if err != nil { + return err + } + + profile := asocontainerservicev1hub.ManagedClusterAgentPoolProfile{ + AvailabilityZones: hubPool.Spec.AvailabilityZones, + CapacityReservationGroupReference: hubPool.Spec.CapacityReservationGroupReference, + Count: hubPool.Spec.Count, + CreationData: hubPool.Spec.CreationData, + EnableAutoScaling: hubPool.Spec.EnableAutoScaling, + EnableEncryptionAtHost: hubPool.Spec.EnableEncryptionAtHost, + EnableFIPS: hubPool.Spec.EnableFIPS, + EnableNodePublicIP: hubPool.Spec.EnableNodePublicIP, + EnableUltraSSD: hubPool.Spec.EnableUltraSSD, + GpuInstanceProfile: hubPool.Spec.GpuInstanceProfile, + HostGroupReference: hubPool.Spec.HostGroupReference, + KubeletConfig: hubPool.Spec.KubeletConfig, + KubeletDiskType: hubPool.Spec.KubeletDiskType, + LinuxOSConfig: hubPool.Spec.LinuxOSConfig, + MaxCount: hubPool.Spec.MaxCount, + MaxPods: hubPool.Spec.MaxPods, + MinCount: hubPool.Spec.MinCount, + Mode: hubPool.Spec.Mode, + Name: azure.AliasOrNil[string](&hubPool.Spec.AzureName), + NetworkProfile: hubPool.Spec.NetworkProfile, + NodeLabels: hubPool.Spec.NodeLabels, + NodePublicIPPrefixReference: hubPool.Spec.NodePublicIPPrefixReference, + NodeTaints: hubPool.Spec.NodeTaints, + OrchestratorVersion: hubPool.Spec.OrchestratorVersion, + OsDiskSizeGB: hubPool.Spec.OsDiskSizeGB, + OsDiskType: hubPool.Spec.OsDiskType, + OsSKU: hubPool.Spec.OsSKU, + OsType: hubPool.Spec.OsType, + PodSubnetReference: hubPool.Spec.PodSubnetReference, + PowerState: hubPool.Spec.PowerState, + PropertyBag: hubPool.Spec.PropertyBag, + ProximityPlacementGroupReference: hubPool.Spec.ProximityPlacementGroupReference, + ScaleDownMode: hubPool.Spec.ScaleDownMode, + ScaleSetEvictionPolicy: hubPool.Spec.ScaleSetEvictionPolicy, + ScaleSetPriority: hubPool.Spec.ScaleSetPriority, + SpotMaxPrice: hubPool.Spec.SpotMaxPrice, + Tags: hubPool.Spec.Tags, + Type: hubPool.Spec.Type, + UpgradeSettings: hubPool.Spec.UpgradeSettings, + VmSize: hubPool.Spec.VmSize, + VnetSubnetReference: hubPool.Spec.VnetSubnetReference, + WorkloadRuntime: hubPool.Spec.WorkloadRuntime, + } + + hubMC.Spec.AgentPoolProfiles = append(hubMC.Spec.AgentPoolProfiles, profile) + } + + return managedCluster.ConvertFrom(hubMC) +} diff --git a/exp/mutators/azureasomanagedcontrolplane_test.go b/exp/mutators/azureasomanagedcontrolplane_test.go index 58c35d54f6a..0e0a0f44a57 100644 --- a/exp/mutators/azureasomanagedcontrolplane_test.go +++ b/exp/mutators/azureasomanagedcontrolplane_test.go @@ -21,6 +21,7 @@ import ( "encoding/json" "testing" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" @@ -30,6 +31,8 @@ import ( "k8s.io/utils/ptr" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/conversion" ) func TestSetManagedClusterDefaults(t *testing.T) { @@ -98,7 +101,14 @@ func TestSetManagedClusterDefaults(t *testing.T) { t.Run(test.name, func(t *testing.T) { g := NewGomegaWithT(t) - mutator := SetManagedClusterDefaults(test.asoManagedControlPlane, test.cluster) + s := runtime.NewScheme() + g.Expect(asocontainerservicev1.AddToScheme(s)).To(Succeed()) + g.Expect(infrav1exp.AddToScheme(s)).To(Succeed()) + c := fakeclient.NewClientBuilder(). + WithScheme(s). + Build() + + mutator := SetManagedClusterDefaults(c, test.asoManagedControlPlane, test.cluster) actual, err := ApplyMutators(ctx, test.asoManagedControlPlane.Spec.Resources, mutator) if test.expectedErr != nil { g.Expect(err).To(MatchError(test.expectedErr)) @@ -478,6 +488,267 @@ func TestSetManagedClusterPodCIDR(t *testing.T) { } } +func TestSetManagedClusterAgentPoolProfiles(t *testing.T) { + g := NewGomegaWithT(t) + ctx := context.Background() + s := runtime.NewScheme() + g.Expect(asocontainerservicev1.AddToScheme(s)).To(Succeed()) + g.Expect(infrav1exp.AddToScheme(s)).To(Succeed()) + fakeClientBuilder := func() *fakeclient.ClientBuilder { + return fakeclient.NewClientBuilder().WithScheme(s) + } + + t.Run("agent pools should not be defined on user's ManagedCluster", func(t *testing.T) { + g := NewGomegaWithT(t) + + umc := mcUnstructured(g, &asocontainerservicev1.ManagedCluster{ + Spec: asocontainerservicev1.ManagedCluster_Spec{ + AgentPoolProfiles: []asocontainerservicev1.ManagedClusterAgentPoolProfile{{}}, + }, + }) + + err := setManagedClusterAgentPoolProfiles(ctx, nil, "", nil, "", umc) + g.Expect(err).To(MatchError(Incompatible{ + mutation: mutation{ + location: ".spec.agentPoolProfiles", + val: "nil", + reason: "because agent pool definitions must be inherited from AzureASOManagedMachinePools", + }, + userVal: "", + })) + }) + + t.Run("agent pool profiles already created", func(t *testing.T) { + g := NewGomegaWithT(t) + + namespace := "ns" + managedCluster := &asocontainerservicev1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mc", + Namespace: namespace, + }, + Status: asocontainerservicev1.ManagedCluster_STATUS{ + AgentPoolProfiles: []asocontainerservicev1.ManagedClusterAgentPoolProfile_STATUS{{}}, + }, + } + umc := mcUnstructured(g, managedCluster) + + c := fakeClientBuilder(). + WithObjects(managedCluster). + Build() + + err := setManagedClusterAgentPoolProfiles(ctx, c, namespace, nil, "", umc) + g.Expect(err).NotTo(HaveOccurred()) + }) + + t.Run("agent pool profiles derived from managed machine pools", func(t *testing.T) { + g := NewGomegaWithT(t) + + namespace := "ns" + clusterName := "cluster" + managedCluster := &asocontainerservicev1.ManagedCluster{} + umc := mcUnstructured(g, managedCluster) + + asoManagedMachinePools := &infrav1exp.AzureASOManagedMachinePoolList{ + Items: []infrav1exp.AzureASOManagedMachinePool{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "wrong-label", + Namespace: namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "not-" + clusterName, + }, + }, + Spec: infrav1exp.AzureASOManagedMachinePoolSpec{ + AzureASOManagedMachinePoolTemplateResourceSpec: infrav1exp.AzureASOManagedMachinePoolTemplateResourceSpec{ + Resources: []runtime.RawExtension{ + { + Raw: apJSON(g, &asocontainerservicev1.ManagedClustersAgentPool{ + Spec: asocontainerservicev1.ManagedClusters_AgentPool_Spec{ + AzureName: "no", + }, + }), + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "wrong-namespace", + Namespace: "not-" + namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: clusterName, + }, + }, + Spec: infrav1exp.AzureASOManagedMachinePoolSpec{ + AzureASOManagedMachinePoolTemplateResourceSpec: infrav1exp.AzureASOManagedMachinePoolTemplateResourceSpec{ + Resources: []runtime.RawExtension{ + { + Raw: apJSON(g, &asocontainerservicev1.ManagedClustersAgentPool{ + Spec: asocontainerservicev1.ManagedClusters_AgentPool_Spec{ + AzureName: "no", + }, + }), + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pool0", + Namespace: namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: clusterName, + }, + }, + Spec: infrav1exp.AzureASOManagedMachinePoolSpec{ + AzureASOManagedMachinePoolTemplateResourceSpec: infrav1exp.AzureASOManagedMachinePoolTemplateResourceSpec{ + Resources: []runtime.RawExtension{ + { + Raw: apJSON(g, &asocontainerservicev1.ManagedClustersAgentPool{ + Spec: asocontainerservicev1.ManagedClusters_AgentPool_Spec{ + AzureName: "azpool0", + }, + }), + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pool1", + Namespace: namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: clusterName, + }, + }, + Spec: infrav1exp.AzureASOManagedMachinePoolSpec{ + AzureASOManagedMachinePoolTemplateResourceSpec: infrav1exp.AzureASOManagedMachinePoolTemplateResourceSpec{ + Resources: []runtime.RawExtension{ + { + Raw: apJSON(g, &asocontainerservicev1.ManagedClustersAgentPool{ + Spec: asocontainerservicev1.ManagedClusters_AgentPool_Spec{ + AzureName: "azpool1", + }, + }), + }, + }, + }, + }, + }, + }, + } + expected := &asocontainerservicev1.ManagedCluster{ + Spec: asocontainerservicev1.ManagedCluster_Spec{ + AgentPoolProfiles: []asocontainerservicev1.ManagedClusterAgentPoolProfile{ + {Name: ptr.To("azpool0")}, + {Name: ptr.To("azpool1")}, + }, + }, + } + + c := fakeClientBuilder(). + WithLists(asoManagedMachinePools). + Build() + + cluster := &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: clusterName}} + err := setManagedClusterAgentPoolProfiles(ctx, c, namespace, cluster, "", umc) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(s.Convert(umc, managedCluster, nil)).To(Succeed()) + g.Expect(cmp.Diff(expected, managedCluster)).To(BeEmpty()) + }) +} + +func TestSetAgentPoolProfilesFromAgentPools(t *testing.T) { + t.Run("stable with no pools", func(t *testing.T) { + g := NewGomegaWithT(t) + + mc := &asocontainerservicev1.ManagedCluster{} + var pools []conversion.Convertible + var expected []asocontainerservicev1.ManagedClusterAgentPoolProfile + + err := setAgentPoolProfilesFromAgentPools(mc, pools) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(cmp.Diff(expected, mc.Spec.AgentPoolProfiles)).To(BeEmpty()) + }) + + t.Run("stable with pools", func(t *testing.T) { + g := NewGomegaWithT(t) + + mc := &asocontainerservicev1.ManagedCluster{} + pools := []conversion.Convertible{ + &asocontainerservicev1.ManagedClustersAgentPool{ + Spec: asocontainerservicev1.ManagedClusters_AgentPool_Spec{ + AzureName: "pool0", + MaxCount: ptr.To(1), + }, + }, + // Not all pools have to be the same version, or the same version as the cluster. + &asocontainerservicev1preview.ManagedClustersAgentPool{ + Spec: asocontainerservicev1preview.ManagedClusters_AgentPool_Spec{ + AzureName: "pool1", + MinCount: ptr.To(2), + EnableCustomCATrust: ptr.To(true), + }, + }, + } + expected := []asocontainerservicev1.ManagedClusterAgentPoolProfile{ + { + Name: ptr.To("pool0"), + MaxCount: ptr.To(1), + }, + { + Name: ptr.To("pool1"), + MinCount: ptr.To(2), + // EnableCustomCATrust is a preview-only feature that can't be represented here, so it should be lost. + }, + } + + err := setAgentPoolProfilesFromAgentPools(mc, pools) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(cmp.Diff(expected, mc.Spec.AgentPoolProfiles)).To(BeEmpty()) + }) + + t.Run("preview with pools", func(t *testing.T) { + g := NewGomegaWithT(t) + + mc := &asocontainerservicev1preview.ManagedCluster{} + pools := []conversion.Convertible{ + &asocontainerservicev1.ManagedClustersAgentPool{ + Spec: asocontainerservicev1.ManagedClusters_AgentPool_Spec{ + AzureName: "pool0", + MaxCount: ptr.To(1), + }, + }, + &asocontainerservicev1preview.ManagedClustersAgentPool{ + Spec: asocontainerservicev1preview.ManagedClusters_AgentPool_Spec{ + AzureName: "pool1", + MinCount: ptr.To(2), + EnableCustomCATrust: ptr.To(true), + }, + }, + } + expected := []asocontainerservicev1preview.ManagedClusterAgentPoolProfile{ + { + Name: ptr.To("pool0"), + MaxCount: ptr.To(1), + }, + { + Name: ptr.To("pool1"), + MinCount: ptr.To(2), + EnableCustomCATrust: ptr.To(true), + }, + } + + err := setAgentPoolProfilesFromAgentPools(mc, pools) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(cmp.Diff(expected, mc.Spec.AgentPoolProfiles)).To(BeEmpty()) + }) +} + func mcJSON(g Gomega, mc *asocontainerservicev1.ManagedCluster) []byte { mc.SetGroupVersionKind(asocontainerservicev1.GroupVersion.WithKind("ManagedCluster")) j, err := json.Marshal(mc) @@ -485,6 +756,13 @@ func mcJSON(g Gomega, mc *asocontainerservicev1.ManagedCluster) []byte { return j } +func apJSON(g Gomega, mc *asocontainerservicev1.ManagedClustersAgentPool) []byte { + mc.SetGroupVersionKind(asocontainerservicev1.GroupVersion.WithKind("ManagedClustersAgentPool")) + j, err := json.Marshal(mc) + g.Expect(err).NotTo(HaveOccurred()) + return j +} + func mcUnstructured(g Gomega, mc *asocontainerservicev1.ManagedCluster) *unstructured.Unstructured { s := runtime.NewScheme() g.Expect(asocontainerservicev1.AddToScheme(s)).To(Succeed()) diff --git a/main.go b/main.go index 534ad202209..11833afc738 100644 --- a/main.go +++ b/main.go @@ -26,9 +26,12 @@ import ( "time" // +kubebuilder:scaffold:imports + asocontainerservicev1api20210501 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20210501" + asocontainerservicev1api20230201 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230201" asocontainerservicev1api20230202preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1api20230315preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230315preview" - asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + asocontainerservicev1api20231001 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + asocontainerservicev1api20231102preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231102preview" asokubernetesconfigurationv1 "github.com/Azure/azure-service-operator/v2/api/kubernetesconfiguration/v1api20230501" asonetworkv1api20201101 "github.com/Azure/azure-service-operator/v2/api/network/v1api20201101" asonetworkv1api20220701 "github.com/Azure/azure-service-operator/v2/api/network/v1api20220701" @@ -82,11 +85,14 @@ func init() { _ = expv1.AddToScheme(scheme) _ = kubeadmv1.AddToScheme(scheme) _ = asoresourcesv1.AddToScheme(scheme) - _ = asocontainerservicev1.AddToScheme(scheme) + _ = asocontainerservicev1api20210501.AddToScheme(scheme) + _ = asocontainerservicev1api20230201.AddToScheme(scheme) + _ = asocontainerservicev1api20231001.AddToScheme(scheme) _ = asonetworkv1api20220701.AddToScheme(scheme) _ = asonetworkv1api20201101.AddToScheme(scheme) _ = asocontainerservicev1api20230202preview.AddToScheme(scheme) _ = asocontainerservicev1api20230315preview.AddToScheme(scheme) + _ = asocontainerservicev1api20231102preview.AddToScheme(scheme) _ = asokubernetesconfigurationv1.AddToScheme(scheme) // +kubebuilder:scaffold:scheme } diff --git a/templates/cluster-template-aks-aso-clusterclass.yaml b/templates/cluster-template-aks-aso-clusterclass.yaml index 27aa069c625..fb0db4839cc 100644 --- a/templates/cluster-template-aks-aso-clusterclass.yaml +++ b/templates/cluster-template-aks-aso-clusterclass.yaml @@ -66,13 +66,6 @@ spec: adminCredentials: name: "{{ .builtin.cluster.name }}-kubeconfig" key: value - # This agent pool only exists to avoid having to aggregate definitions - # from AzureASOManagedMachinePools. That will be done soon. - agentPoolProfiles: - - name: stub - vmSize: ${AZURE_NODE_MACHINE_TYPE} - mode: System - count: 1 selector: apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 kind: AzureASOManagedControlPlaneTemplate diff --git a/templates/cluster-template-aks-aso.yaml b/templates/cluster-template-aks-aso.yaml index e8c53069469..d3862da75c8 100644 --- a/templates/cluster-template-aks-aso.yaml +++ b/templates/cluster-template-aks-aso.yaml @@ -27,11 +27,6 @@ spec: serviceoperator.azure.com/credential-from: ${ASO_CREDENTIAL_SECRET_NAME} name: ${CLUSTER_NAME} spec: - agentPoolProfiles: - - count: 1 - mode: System - name: stub - vmSize: ${AZURE_NODE_MACHINE_TYPE} dnsPrefix: ${CLUSTER_NAME} identity: type: SystemAssigned diff --git a/templates/flavors/aks-aso-clusterclass/clusterclass.yaml b/templates/flavors/aks-aso-clusterclass/clusterclass.yaml index 0062ed9730f..778ad1eb442 100644 --- a/templates/flavors/aks-aso-clusterclass/clusterclass.yaml +++ b/templates/flavors/aks-aso-clusterclass/clusterclass.yaml @@ -101,13 +101,6 @@ spec: adminCredentials: name: "{{ .builtin.cluster.name }}-kubeconfig" key: value - # This agent pool only exists to avoid having to aggregate definitions - # from AzureASOManagedMachinePools. That will be done soon. - agentPoolProfiles: - - name: stub - vmSize: ${AZURE_NODE_MACHINE_TYPE} - mode: System - count: 1 - name: azureasomanagedmachinepool-pool0-spec definitions: - selector: diff --git a/templates/flavors/aks-aso/cluster-template.yaml b/templates/flavors/aks-aso/cluster-template.yaml index f8d72bd7bf3..8d059145936 100644 --- a/templates/flavors/aks-aso/cluster-template.yaml +++ b/templates/flavors/aks-aso/cluster-template.yaml @@ -43,13 +43,6 @@ spec: adminCredentials: name: ${CLUSTER_NAME}-kubeconfig key: value - # This agent pool only exists to avoid having to aggregate definitions - # from AzureASOManagedMachinePools. That will be done soon. - agentPoolProfiles: - - name: stub - vmSize: ${AZURE_NODE_MACHINE_TYPE} - mode: System - count: 1 --- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 kind: AzureASOManagedCluster diff --git a/templates/test/ci/cluster-template-prow-aks-aso.yaml b/templates/test/ci/cluster-template-prow-aks-aso.yaml index 45a7134b752..c0ea2df3fe2 100644 --- a/templates/test/ci/cluster-template-prow-aks-aso.yaml +++ b/templates/test/ci/cluster-template-prow-aks-aso.yaml @@ -27,11 +27,6 @@ spec: serviceoperator.azure.com/credential-from: ${ASO_CREDENTIAL_SECRET_NAME} name: ${CLUSTER_NAME} spec: - agentPoolProfiles: - - count: 1 - mode: System - name: stub - vmSize: ${AZURE_NODE_MACHINE_TYPE} dnsPrefix: ${CLUSTER_NAME} identity: type: SystemAssigned