From ae1e38def01a4436d849d0b19682d25dc828e255 Mon Sep 17 00:00:00 2001 From: Jon Huhn Date: Wed, 28 Feb 2024 20:01:30 -0600 Subject: [PATCH] Expose AKS Preview Features Co-authored-by: Jon Huhn --- .../azuremanagedcontrolplane_webhook.go | 1 + .../azuremanagedcontrolplane_webhook_test.go | 5 + ...zuremanagedcontrolplanetemplate_default.go | 1 + ...anagedcontrolplanetemplate_webhook_test.go | 3 + api/v1beta1/types_class.go | 4 + api/v1beta1/zz_generated.deepcopy.go | 5 + azure/converters/managedagentpool.go | 42 +++++ azure/converters/managedagentpool_test.go | 84 ++++++++++ azure/scope/managedcontrolplane.go | 14 +- azure/scope/managedcontrolplane_test.go | 12 +- azure/scope/managedmachinepool.go | 12 +- azure/scope/managedmachinepool_test.go | 63 +++++++- azure/services/agentpools/agentpools.go | 31 +++- azure/services/agentpools/agentpools_test.go | 22 +++ .../mock_agentpools/agentpools_mock.go | 20 ++- azure/services/agentpools/spec.go | 54 ++++++- azure/services/agentpools/spec_test.go | 144 +++++++++++++++++- azure/services/aso/aso.go | 26 ++-- azure/services/aso/service.go | 5 +- .../managedclusters/managedclusters.go | 34 ++++- .../managedclusters/managedclusters_test.go | 124 ++++++++++----- .../managedclusters_mock.go | 20 ++- azure/services/managedclusters/spec.go | 110 ++++++++++--- azure/services/managedclusters/spec_test.go | 37 ++++- ...er.x-k8s.io_azuremanagedcontrolplanes.yaml | 4 + ....io_azuremanagedcontrolplanetemplates.yaml | 4 + ...azuremanagedmachinepool_controller_test.go | 1 + .../azuremanagedmachinepool_reconciler.go | 11 +- docs/book/src/topics/managedcluster.md | 30 ++++ main.go | 6 +- test/e2e/aks_clusterclass.go | 10 +- 31 files changed, 807 insertions(+), 132 deletions(-) diff --git a/api/v1beta1/azuremanagedcontrolplane_webhook.go b/api/v1beta1/azuremanagedcontrolplane_webhook.go index b020a752f09..0d206dfed0c 100644 --- a/api/v1beta1/azuremanagedcontrolplane_webhook.go +++ b/api/v1beta1/azuremanagedcontrolplane_webhook.go @@ -81,6 +81,7 @@ func (mw *azureManagedControlPlaneWebhook) Default(ctx context.Context, obj runt setDefault[*Identity](&m.Spec.Identity, &Identity{ Type: ManagedControlPlaneIdentityTypeSystemAssigned, }) + setDefault[*bool](&m.Spec.EnablePreviewFeatures, ptr.To(false)) m.Spec.Version = setDefaultVersion(m.Spec.Version) m.Spec.SKU = setDefaultSku(m.Spec.SKU) m.Spec.AutoScalerProfile = setDefaultAutoScalerProfile(m.Spec.AutoScalerProfile) diff --git a/api/v1beta1/azuremanagedcontrolplane_webhook_test.go b/api/v1beta1/azuremanagedcontrolplane_webhook_test.go index 82ea6ba1dbd..86cba627645 100644 --- a/api/v1beta1/azuremanagedcontrolplane_webhook_test.go +++ b/api/v1beta1/azuremanagedcontrolplane_webhook_test.go @@ -77,6 +77,8 @@ func TestDefaultingWebhook(t *testing.T) { g.Expect(amcp.Spec.DNSPrefix).NotTo(BeNil()) g.Expect(*amcp.Spec.DNSPrefix).To(Equal(amcp.Name)) g.Expect(amcp.Spec.Extensions[0].Plan.Name).To(Equal("fooName-test-product")) + g.Expect(amcp.Spec.EnablePreviewFeatures).NotTo(BeNil()) + g.Expect(*amcp.Spec.EnablePreviewFeatures).To(BeFalse()) t.Logf("Testing amcp defaulting webhook with baseline") netPlug := "kubenet" @@ -106,6 +108,7 @@ func TestDefaultingWebhook(t *testing.T) { IntervalHours: ptr.To(48), }, } + amcp.Spec.EnablePreviewFeatures = ptr.To(true) err = mcpw.Default(context.Background(), amcp) g.Expect(err).NotTo(HaveOccurred()) @@ -129,6 +132,8 @@ func TestDefaultingWebhook(t *testing.T) { g.Expect(amcp.Spec.SecurityProfile.ImageCleaner).NotTo(BeNil()) g.Expect(amcp.Spec.SecurityProfile.ImageCleaner.IntervalHours).NotTo(BeNil()) g.Expect(*amcp.Spec.SecurityProfile.ImageCleaner.IntervalHours).To(Equal(48)) + g.Expect(amcp.Spec.EnablePreviewFeatures).NotTo(BeNil()) + g.Expect(*amcp.Spec.EnablePreviewFeatures).To(BeTrue()) t.Logf("Testing amcp defaulting webhook with overlay") amcp = &AzureManagedControlPlane{ diff --git a/api/v1beta1/azuremanagedcontrolplanetemplate_default.go b/api/v1beta1/azuremanagedcontrolplanetemplate_default.go index fce6a8182d0..e7fd7f394ce 100644 --- a/api/v1beta1/azuremanagedcontrolplanetemplate_default.go +++ b/api/v1beta1/azuremanagedcontrolplanetemplate_default.go @@ -25,6 +25,7 @@ import ( func (mcp *AzureManagedControlPlaneTemplate) setDefaults() { setDefault[*string](&mcp.Spec.Template.Spec.NetworkPlugin, ptr.To(AzureNetworkPluginName)) setDefault[*string](&mcp.Spec.Template.Spec.LoadBalancerSKU, ptr.To("Standard")) + setDefault[*bool](&mcp.Spec.Template.Spec.EnablePreviewFeatures, ptr.To(false)) if mcp.Spec.Template.Spec.Version != "" && !strings.HasPrefix(mcp.Spec.Template.Spec.Version, "v") { mcp.Spec.Template.Spec.Version = setDefaultVersion(mcp.Spec.Template.Spec.Version) diff --git a/api/v1beta1/azuremanagedcontrolplanetemplate_webhook_test.go b/api/v1beta1/azuremanagedcontrolplanetemplate_webhook_test.go index b33c4d10d0d..2657f61d420 100644 --- a/api/v1beta1/azuremanagedcontrolplanetemplate_webhook_test.go +++ b/api/v1beta1/azuremanagedcontrolplanetemplate_webhook_test.go @@ -41,6 +41,7 @@ func TestControlPlaneTemplateDefaultingWebhook(t *testing.T) { g.Expect(amcpt.Spec.Template.Spec.VirtualNetwork.Subnet.Name).To(Equal("fooName")) g.Expect(amcpt.Spec.Template.Spec.VirtualNetwork.Subnet.CIDRBlock).To(Equal(defaultAKSNodeSubnetCIDR)) g.Expect(amcpt.Spec.Template.Spec.SKU.Tier).To(Equal(FreeManagedControlPlaneTier)) + g.Expect(*amcpt.Spec.Template.Spec.EnablePreviewFeatures).To(BeFalse()) t.Logf("Testing amcp defaulting webhook with baseline") netPlug := "kubenet" @@ -53,6 +54,7 @@ func TestControlPlaneTemplateDefaultingWebhook(t *testing.T) { amcpt.Spec.Template.Spec.VirtualNetwork.Name = "fooVnetName" amcpt.Spec.Template.Spec.VirtualNetwork.Subnet.Name = "fooSubnetName" amcpt.Spec.Template.Spec.SKU.Tier = PaidManagedControlPlaneTier + amcpt.Spec.Template.Spec.EnablePreviewFeatures = ptr.To(true) err = mcptw.Default(context.Background(), amcpt) g.Expect(err).NotTo(HaveOccurred()) @@ -63,6 +65,7 @@ func TestControlPlaneTemplateDefaultingWebhook(t *testing.T) { g.Expect(amcpt.Spec.Template.Spec.VirtualNetwork.Name).To(Equal("fooVnetName")) g.Expect(amcpt.Spec.Template.Spec.VirtualNetwork.Subnet.Name).To(Equal("fooSubnetName")) g.Expect(amcpt.Spec.Template.Spec.SKU.Tier).To(Equal(StandardManagedControlPlaneTier)) + g.Expect(*amcpt.Spec.Template.Spec.EnablePreviewFeatures).To(BeTrue()) } func TestControlPlaneTemplateUpdateWebhook(t *testing.T) { diff --git a/api/v1beta1/types_class.go b/api/v1beta1/types_class.go index 629a925f6d9..58ffb9ef5d9 100644 --- a/api/v1beta1/types_class.go +++ b/api/v1beta1/types_class.go @@ -242,6 +242,10 @@ type AzureManagedControlPlaneClassSpec struct { // operation is possible. // +optional ASOManagedClusterPatches []string `json:"asoManagedClusterPatches,omitempty"` + + // EnablePreviewFeatures enables preview features for the cluster. + // +optional + EnablePreviewFeatures *bool `json:"enablePreviewFeatures,omitempty"` } // ManagedClusterAutoUpgradeProfile defines the auto upgrade profile for a managed cluster. diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index ee778ef4b24..a849a4bfbaf 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -1455,6 +1455,11 @@ func (in *AzureManagedControlPlaneClassSpec) DeepCopyInto(out *AzureManagedContr *out = make([]string, len(*in)) copy(*out, *in) } + if in.EnablePreviewFeatures != nil { + in, out := &in.EnablePreviewFeatures, &out.EnablePreviewFeatures + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedControlPlaneClassSpec. diff --git a/azure/converters/managedagentpool.go b/azure/converters/managedagentpool.go index 13a642bb559..3b95838c6ef 100644 --- a/azure/converters/managedagentpool.go +++ b/azure/converters/managedagentpool.go @@ -17,6 +17,7 @@ limitations under the License. package converters import ( + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" "k8s.io/utils/ptr" ) @@ -59,3 +60,44 @@ func AgentPoolToManagedClusterAgentPoolProfile(pool *asocontainerservicev1.Manag } return agentPool } + +// AgentPoolToManagedClusterAgentPoolPreviewProfile converts an AgentPoolSpec to an Azure SDK ManagedClusterAgentPoolPreviewProfile used in managedcluster reconcile. +func AgentPoolToManagedClusterAgentPoolPreviewProfile(pool *asocontainerservicev1preview.ManagedClustersAgentPool) asocontainerservicev1preview.ManagedClusterAgentPoolProfile { + properties := pool.Spec + + // Populate the same properties as the stable version since the patcher will handle the preview-only fields. + agentPool := asocontainerservicev1preview.ManagedClusterAgentPoolProfile{ + Name: ptr.To(pool.AzureName()), + VmSize: properties.VmSize, + OsType: properties.OsType, + OsDiskSizeGB: properties.OsDiskSizeGB, + Count: properties.Count, + Type: properties.Type, + OrchestratorVersion: properties.OrchestratorVersion, + VnetSubnetReference: properties.VnetSubnetReference, + Mode: properties.Mode, + EnableAutoScaling: properties.EnableAutoScaling, + MaxCount: properties.MaxCount, + MinCount: properties.MinCount, + NodeTaints: properties.NodeTaints, + AvailabilityZones: properties.AvailabilityZones, + MaxPods: properties.MaxPods, + OsDiskType: properties.OsDiskType, + NodeLabels: properties.NodeLabels, + EnableUltraSSD: properties.EnableUltraSSD, + EnableNodePublicIP: properties.EnableNodePublicIP, + NodePublicIPPrefixReference: properties.NodePublicIPPrefixReference, + ScaleSetPriority: properties.ScaleSetPriority, + ScaleDownMode: properties.ScaleDownMode, + SpotMaxPrice: properties.SpotMaxPrice, + Tags: properties.Tags, + KubeletDiskType: properties.KubeletDiskType, + LinuxOSConfig: properties.LinuxOSConfig, + EnableFIPS: properties.EnableFIPS, + EnableEncryptionAtHost: properties.EnableEncryptionAtHost, + } + if properties.KubeletConfig != nil { + agentPool.KubeletConfig = properties.KubeletConfig + } + return agentPool +} diff --git a/azure/converters/managedagentpool_test.go b/azure/converters/managedagentpool_test.go index da19f15763c..9a0670faaae 100644 --- a/azure/converters/managedagentpool_test.go +++ b/azure/converters/managedagentpool_test.go @@ -19,6 +19,7 @@ package converters import ( "testing" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" "github.com/Azure/azure-service-operator/v2/pkg/genruntime" . "github.com/onsi/gomega" @@ -107,3 +108,86 @@ func Test_AgentPoolToManagedClusterAgentPoolProfile(t *testing.T) { }) } } + +func Test_AgentPoolToManagedClusterAgentPoolPreviewProfile(t *testing.T) { + cases := []struct { + name string + pool *asocontainerservicev1preview.ManagedClustersAgentPool + expect func(*GomegaWithT, asocontainerservicev1preview.ManagedClusterAgentPoolProfile) + }{ + { + name: "Should set all values correctly", + pool: &asocontainerservicev1preview.ManagedClustersAgentPool{ + Spec: asocontainerservicev1preview.ManagedClusters_AgentPool_Spec{ + AzureName: "agentpool1", + VmSize: ptr.To("Standard_D2s_v3"), + OsType: ptr.To(asocontainerservicev1preview.OSType_Linux), + OsDiskSizeGB: ptr.To[asocontainerservicev1preview.ContainerServiceOSDisk](100), + Count: ptr.To(2), + Type: ptr.To(asocontainerservicev1preview.AgentPoolType_VirtualMachineScaleSets), + OrchestratorVersion: ptr.To("1.22.6"), + VnetSubnetReference: &genruntime.ResourceReference{ + ARMID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg-123/providers/Microsoft.Network/virtualNetworks/vnet-123/subnets/subnet-123", + }, + Mode: ptr.To(asocontainerservicev1preview.AgentPoolMode_User), + EnableAutoScaling: ptr.To(true), + MaxCount: ptr.To(5), + MinCount: ptr.To(2), + NodeTaints: []string{"key1=value1:NoSchedule"}, + AvailabilityZones: []string{"zone1"}, + MaxPods: ptr.To(60), + OsDiskType: ptr.To(asocontainerservicev1preview.OSDiskType_Managed), + NodeLabels: map[string]string{ + "custom": "default", + }, + Tags: map[string]string{ + "custom": "default", + }, + EnableFIPS: ptr.To(true), + EnableEncryptionAtHost: ptr.To(true), + }, + }, + + expect: func(g *GomegaWithT, result asocontainerservicev1preview.ManagedClusterAgentPoolProfile) { + g.Expect(result).To(Equal(asocontainerservicev1preview.ManagedClusterAgentPoolProfile{ + Name: ptr.To("agentpool1"), + VmSize: ptr.To("Standard_D2s_v3"), + OsType: ptr.To(asocontainerservicev1preview.OSType_Linux), + OsDiskSizeGB: ptr.To[asocontainerservicev1preview.ContainerServiceOSDisk](100), + Count: ptr.To(2), + Type: ptr.To(asocontainerservicev1preview.AgentPoolType_VirtualMachineScaleSets), + OrchestratorVersion: ptr.To("1.22.6"), + VnetSubnetReference: &genruntime.ResourceReference{ + ARMID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg-123/providers/Microsoft.Network/virtualNetworks/vnet-123/subnets/subnet-123", + }, + Mode: ptr.To(asocontainerservicev1preview.AgentPoolMode_User), + EnableAutoScaling: ptr.To(true), + MaxCount: ptr.To(5), + MinCount: ptr.To(2), + NodeTaints: []string{"key1=value1:NoSchedule"}, + AvailabilityZones: []string{"zone1"}, + MaxPods: ptr.To(60), + OsDiskType: ptr.To(asocontainerservicev1preview.OSDiskType_Managed), + NodeLabels: map[string]string{ + "custom": "default", + }, + Tags: map[string]string{ + "custom": "default", + }, + EnableFIPS: ptr.To(true), + EnableEncryptionAtHost: ptr.To(true), + })) + }, + }, + } + + for _, c := range cases { + c := c + t.Run(c.name, func(t *testing.T) { + t.Parallel() + g := NewGomegaWithT(t) + result := AgentPoolToManagedClusterAgentPoolPreviewProfile(c.pool) + c.expect(g, result) + }) + } +} diff --git a/azure/scope/managedcontrolplane.go b/azure/scope/managedcontrolplane.go index 3f64e8c01c0..58c8633ce88 100644 --- a/azure/scope/managedcontrolplane.go +++ b/azure/scope/managedcontrolplane.go @@ -24,11 +24,11 @@ import ( "time" asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230315preview" - asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" asokubernetesconfigurationv1 "github.com/Azure/azure-service-operator/v2/api/kubernetesconfiguration/v1api20230501" asonetworkv1api20201101 "github.com/Azure/azure-service-operator/v2/api/network/v1api20201101" asonetworkv1api20220701 "github.com/Azure/azure-service-operator/v2/api/network/v1api20220701" asoresourcesv1 "github.com/Azure/azure-service-operator/v2/api/resources/v1api20200601" + "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "github.com/pkg/errors" "golang.org/x/mod/semver" "gopkg.in/yaml.v3" @@ -528,6 +528,11 @@ func (s *ManagedControlPlaneScope) IsManagedVersionUpgrade() bool { return isManagedVersionUpgrade(s.ControlPlane) } +// IsPreviewEnabled checks if the preview feature is enabled. +func (s *ManagedControlPlaneScope) IsPreviewEnabled() bool { + return ptr.Deref(s.ControlPlane.Spec.EnablePreviewFeatures, false) +} + func isManagedVersionUpgrade(managedControlPlane *infrav1.AzureManagedControlPlane) bool { return managedControlPlane.Spec.AutoUpgradeProfile != nil && managedControlPlane.Spec.AutoUpgradeProfile.UpgradeChannel != nil && @@ -536,7 +541,7 @@ func isManagedVersionUpgrade(managedControlPlane *infrav1.AzureManagedControlPla } // ManagedClusterSpec returns the managed cluster spec. -func (s *ManagedControlPlaneScope) ManagedClusterSpec() azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedCluster] { +func (s *ManagedControlPlaneScope) ManagedClusterSpec() azure.ASOResourceSpecGetter[genruntime.MetaObject] { managedClusterSpec := managedclusters.ManagedClusterSpec{ Name: s.ControlPlane.Name, ResourceGroup: s.ControlPlane.Spec.ResourceGroupName, @@ -559,6 +564,7 @@ func (s *ManagedControlPlaneScope) ManagedClusterSpec() azure.ASOResourceSpecGet NetworkPluginMode: s.ControlPlane.Spec.NetworkPluginMode, DNSPrefix: s.ControlPlane.Spec.DNSPrefix, Patches: s.ControlPlane.Spec.ASOManagedClusterPatches, + Preview: ptr.Deref(s.ControlPlane.Spec.EnablePreviewFeatures, false), } if s.ControlPlane.Spec.SSHPublicKey != nil { @@ -726,9 +732,9 @@ func (s *ManagedControlPlaneScope) getManagedClusterSecurityProfile() *managedcl } // GetAllAgentPoolSpecs gets a slice of azure.AgentPoolSpec for the list of agent pools. -func (s *ManagedControlPlaneScope) GetAllAgentPoolSpecs() ([]azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool], error) { +func (s *ManagedControlPlaneScope) GetAllAgentPoolSpecs() ([]azure.ASOResourceSpecGetter[genruntime.MetaObject], error) { var ( - ammps = make([]azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool], 0, len(s.ManagedMachinePools)) + ammps = make([]azure.ASOResourceSpecGetter[genruntime.MetaObject], 0, len(s.ManagedMachinePools)) foundSystemPool = false ) for _, pool := range s.ManagedMachinePools { diff --git a/azure/scope/managedcontrolplane_test.go b/azure/scope/managedcontrolplane_test.go index 6ed596527b4..b1905acf77f 100644 --- a/azure/scope/managedcontrolplane_test.go +++ b/azure/scope/managedcontrolplane_test.go @@ -21,10 +21,10 @@ import ( "reflect" "testing" - asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" asokubernetesconfigurationv1 "github.com/Azure/azure-service-operator/v2/api/kubernetesconfiguration/v1api20230501" asonetworkv1 "github.com/Azure/azure-service-operator/v2/api/network/v1api20220701" asoresourcesv1 "github.com/Azure/azure-service-operator/v2/api/resources/v1api20200601" + "github.com/Azure/azure-service-operator/v2/pkg/genruntime" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -141,7 +141,7 @@ func TestManagedControlPlaneScope_PoolVersion(t *testing.T) { cases := []struct { Name string Input ManagedControlPlaneScopeParams - Expected []azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool] + Expected []azure.ASOResourceSpecGetter[genruntime.MetaObject] Err string }{ { @@ -176,7 +176,7 @@ func TestManagedControlPlaneScope_PoolVersion(t *testing.T) { }, }, }, - Expected: []azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool]{ + Expected: []azure.ASOResourceSpecGetter[genruntime.MetaObject]{ &agentpools.AgentPoolSpec{ Name: "pool0", AzureName: "pool0", @@ -221,7 +221,7 @@ func TestManagedControlPlaneScope_PoolVersion(t *testing.T) { }, }, }, - Expected: []azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool]{ + Expected: []azure.ASOResourceSpecGetter[genruntime.MetaObject]{ &agentpools.AgentPoolSpec{ Name: "pool0", AzureName: "pool0", @@ -428,7 +428,7 @@ func TestManagedControlPlaneScope_OSType(t *testing.T) { cases := []struct { Name string Input ManagedControlPlaneScopeParams - Expected []azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool] + Expected []azure.ASOResourceSpecGetter[genruntime.MetaObject] Err string }{ { @@ -472,7 +472,7 @@ func TestManagedControlPlaneScope_OSType(t *testing.T) { }, }, }, - Expected: []azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool]{ + Expected: []azure.ASOResourceSpecGetter[genruntime.MetaObject]{ &agentpools.AgentPoolSpec{ Name: "pool0", AzureName: "pool0", diff --git a/azure/scope/managedmachinepool.go b/azure/scope/managedmachinepool.go index 548ebbd588f..d25b21ecaad 100644 --- a/azure/scope/managedmachinepool.go +++ b/azure/scope/managedmachinepool.go @@ -21,7 +21,7 @@ import ( "fmt" "strings" - asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "github.com/pkg/errors" "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -146,7 +146,7 @@ func (s *ManagedMachinePoolScope) SetSubnetName() { } // AgentPoolSpec returns an azure.ResourceSpecGetter for currently reconciled AzureManagedMachinePool. -func (s *ManagedMachinePoolScope) AgentPoolSpec() azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool] { +func (s *ManagedMachinePoolScope) AgentPoolSpec() azure.ASOResourceSpecGetter[genruntime.MetaObject] { return buildAgentPoolSpec(s.ControlPlane, s.MachinePool, s.InfraMachinePool) } @@ -159,7 +159,7 @@ func getAgentPoolSubnet(controlPlane *infrav1.AzureManagedControlPlane, infraMac func buildAgentPoolSpec(managedControlPlane *infrav1.AzureManagedControlPlane, machinePool *expv1.MachinePool, - managedMachinePool *infrav1.AzureManagedMachinePool) azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool] { + managedMachinePool *infrav1.AzureManagedMachinePool) azure.ASOResourceSpecGetter[genruntime.MetaObject] { normalizedVersion := getManagedMachinePoolVersion(managedControlPlane, machinePool) replicas := int32(1) @@ -198,6 +198,7 @@ func buildAgentPoolSpec(managedControlPlane *infrav1.AzureManagedControlPlane, EnableFIPS: managedMachinePool.Spec.EnableFIPS, EnableEncryptionAtHost: managedMachinePool.Spec.EnableEncryptionAtHost, Patches: managedMachinePool.Spec.ASOManagedClustersAgentPoolPatches, + Preview: ptr.Deref(managedControlPlane.Spec.EnablePreviewFeatures, false), } if managedMachinePool.Spec.OSDiskSizeGB != nil { @@ -243,6 +244,11 @@ func buildAgentPoolSpec(managedControlPlane *infrav1.AzureManagedControlPlane, return agentPoolSpec } +// IsPreviewEnabled returns the value of the EnablePreviewFeatures field from the AzureManagedControlPlane. +func (s *ManagedMachinePoolScope) IsPreviewEnabled() bool { + return ptr.Deref(s.ControlPlane.Spec.EnablePreviewFeatures, false) +} + // SetAgentPoolProviderIDList sets a list of agent pool's Azure VM IDs. func (s *ManagedMachinePoolScope) SetAgentPoolProviderIDList(providerIDs []string) { s.InfraMachinePool.Spec.ProviderIDList = providerIDs diff --git a/azure/scope/managedmachinepool_test.go b/azure/scope/managedmachinepool_test.go index 27b93c9779d..ea27b7a704b 100644 --- a/azure/scope/managedmachinepool_test.go +++ b/azure/scope/managedmachinepool_test.go @@ -22,6 +22,7 @@ import ( "testing" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -43,7 +44,7 @@ func TestManagedMachinePoolScope_Autoscaling(t *testing.T) { cases := []struct { Name string Input ManagedMachinePoolScopeParams - Expected azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool] + Expected azure.ASOResourceSpecGetter[genruntime.MetaObject] }{ { Name: "Without Autoscaling", @@ -145,7 +146,7 @@ func TestManagedMachinePoolScope_NodeLabels(t *testing.T) { cases := []struct { Name string Input ManagedMachinePoolScopeParams - Expected azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool] + Expected azure.ASOResourceSpecGetter[genruntime.MetaObject] }{ { Name: "Without node labels", @@ -248,7 +249,7 @@ func TestManagedMachinePoolScope_AdditionalTags(t *testing.T) { cases := []struct { Name string Input ManagedMachinePoolScopeParams - Expected azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool] + Expected azure.ASOResourceSpecGetter[genruntime.MetaObject] }{ { Name: "Without additional tags", @@ -351,7 +352,7 @@ func TestManagedMachinePoolScope_MaxPods(t *testing.T) { cases := []struct { Name string Input ManagedMachinePoolScopeParams - Expected azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool] + Expected azure.ASOResourceSpecGetter[genruntime.MetaObject] }{ { Name: "Without MaxPods", @@ -450,7 +451,7 @@ func TestManagedMachinePoolScope_Taints(t *testing.T) { cases := []struct { Name string Input ManagedMachinePoolScopeParams - Expected azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool] + Expected azure.ASOResourceSpecGetter[genruntime.MetaObject] }{ { Name: "Without taints", @@ -556,7 +557,7 @@ func TestManagedMachinePoolScope_OSDiskType(t *testing.T) { cases := []struct { Name string Input ManagedMachinePoolScopeParams - Expected azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool] + Expected azure.ASOResourceSpecGetter[genruntime.MetaObject] }{ { Name: "Without OsDiskType", @@ -655,7 +656,7 @@ func TestManagedMachinePoolScope_SubnetName(t *testing.T) { cases := []struct { Name string Input ManagedMachinePoolScopeParams - Expected azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool] + Expected azure.ASOResourceSpecGetter[genruntime.MetaObject] }{ { Name: "Without Vnet and SubnetName", @@ -807,7 +808,7 @@ func TestManagedMachinePoolScope_KubeletDiskType(t *testing.T) { cases := []struct { Name string Input ManagedMachinePoolScopeParams - Expected azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool] + Expected azure.ASOResourceSpecGetter[genruntime.MetaObject] }{ { Name: "Without KubeletDiskType", @@ -898,6 +899,52 @@ func TestManagedMachinePoolScope_KubeletDiskType(t *testing.T) { } } +func TestManagedMachinePoolScope_EnablePreviewFeatures(t *testing.T) { + cases := []struct { + Name string + previewEnabled *bool + Expected bool + }{ + { + Name: "Without EnablePreviewFeatures", + previewEnabled: nil, + Expected: false, + }, + { + Name: "With EnablePreviewFeatures false", + previewEnabled: ptr.To(false), + Expected: false, + }, + { + Name: "With EnablePreviewFeatures true", + previewEnabled: ptr.To(true), + Expected: true, + }, + } + for _, c := range cases { + c := c + t.Run(c.Name, func(t *testing.T) { + g := NewWithT(t) + s := &ManagedMachinePoolScope{ + ControlPlane: &infrav1.AzureManagedControlPlane{ + Spec: infrav1.AzureManagedControlPlaneSpec{ + AzureManagedControlPlaneClassSpec: infrav1.AzureManagedControlPlaneClassSpec{ + EnablePreviewFeatures: c.previewEnabled, + }, + }, + }, + MachinePool: &expv1.MachinePool{}, + InfraMachinePool: &infrav1.AzureManagedMachinePool{}, + } + agentPoolGetter := s.AgentPoolSpec() + agentPool, ok := agentPoolGetter.(*agentpools.AgentPoolSpec) + g.Expect(ok).To(BeTrue()) + g.Expect(agentPool.Preview).To(Equal(c.Expected)) + g.Expect(s.IsPreviewEnabled()).To(Equal(c.Expected)) + }) + } +} + func Test_getManagedMachinePoolVersion(t *testing.T) { cases := []struct { name string diff --git a/azure/services/agentpools/agentpools.go b/azure/services/agentpools/agentpools.go index 0876833ee54..649003f4796 100644 --- a/azure/services/agentpools/agentpools.go +++ b/azure/services/agentpools/agentpools.go @@ -19,7 +19,10 @@ package agentpools import ( "context" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + asocontainerservicev1hub "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001/storage" + "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -35,7 +38,7 @@ type AgentPoolScope interface { Name() string NodeResourceGroup() string - AgentPoolSpec() azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool] + AgentPoolSpec() azure.ASOResourceSpecGetter[genruntime.MetaObject] SetAgentPoolProviderIDList([]string) SetAgentPoolReplicas(int32) SetAgentPoolReady(bool) @@ -43,21 +46,39 @@ type AgentPoolScope interface { SetCAPIMachinePoolAnnotation(key, value string) RemoveCAPIMachinePoolAnnotation(key string) SetSubnetName() + IsPreviewEnabled() bool } // New creates a new service. -func New(scope AgentPoolScope) *aso.Service[*asocontainerservicev1.ManagedClustersAgentPool, AgentPoolScope] { - svc := aso.NewService[*asocontainerservicev1.ManagedClustersAgentPool](serviceName, scope) - svc.Specs = []azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool]{scope.AgentPoolSpec()} +func New(scope AgentPoolScope) *aso.Service[genruntime.MetaObject, AgentPoolScope] { + svc := aso.NewService[genruntime.MetaObject](serviceName, scope) + svc.Specs = []azure.ASOResourceSpecGetter[genruntime.MetaObject]{scope.AgentPoolSpec()} svc.ConditionType = infrav1.AgentPoolsReadyCondition svc.PostCreateOrUpdateResourceHook = postCreateOrUpdateResourceHook return svc } -func postCreateOrUpdateResourceHook(ctx context.Context, scope AgentPoolScope, agentPool *asocontainerservicev1.ManagedClustersAgentPool, err error) error { +func postCreateOrUpdateResourceHook(ctx context.Context, scope AgentPoolScope, obj genruntime.MetaObject, err error) error { if err != nil { return err } + var existing *asocontainerservicev1.ManagedClustersAgentPool + if scope.IsPreviewEnabled() { + existingPreview := obj.(*asocontainerservicev1preview.ManagedClustersAgentPool) + hub := &asocontainerservicev1hub.ManagedClustersAgentPool{} + if err := existingPreview.ConvertTo(hub); err != nil { + return err + } + stable := &asocontainerservicev1.ManagedClustersAgentPool{} + if err := stable.ConvertFrom(hub); err != nil { + return err + } + existing = stable + } else { + existing = obj.(*asocontainerservicev1.ManagedClustersAgentPool) + } + agentPool := existing + // When autoscaling is set, add the annotation to the machine pool and update the replica count. if ptr.Deref(agentPool.Status.EnableAutoScaling, false) { scope.SetCAPIMachinePoolAnnotation(clusterv1.ReplicasManagedByAnnotation, "true") diff --git a/azure/services/agentpools/agentpools_test.go b/azure/services/agentpools/agentpools_test.go index 0fb2e32c181..aab1217b12c 100644 --- a/azure/services/agentpools/agentpools_test.go +++ b/azure/services/agentpools/agentpools_test.go @@ -20,6 +20,7 @@ import ( "context" "testing" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" . "github.com/onsi/gomega" "github.com/pkg/errors" @@ -45,6 +46,7 @@ func TestPostCreateOrUpdateResourceHook(t *testing.T) { scope := mock_agentpools.NewMockAgentPoolScope(mockCtrl) scope.EXPECT().RemoveCAPIMachinePoolAnnotation(clusterv1.ReplicasManagedByAnnotation) + scope.EXPECT().IsPreviewEnabled().Return(false) managedCluster := &asocontainerservicev1.ManagedClustersAgentPool{ Status: asocontainerservicev1.ManagedClusters_AgentPool_STATUS{ @@ -63,6 +65,7 @@ func TestPostCreateOrUpdateResourceHook(t *testing.T) { scope.EXPECT().SetCAPIMachinePoolAnnotation(clusterv1.ReplicasManagedByAnnotation, "true") scope.EXPECT().SetCAPIMachinePoolReplicas(ptr.To(1234)) + scope.EXPECT().IsPreviewEnabled().Return(false) managedCluster := &asocontainerservicev1.ManagedClustersAgentPool{ Status: asocontainerservicev1.ManagedClusters_AgentPool_STATUS{ @@ -74,4 +77,23 @@ func TestPostCreateOrUpdateResourceHook(t *testing.T) { err := postCreateOrUpdateResourceHook(context.Background(), scope, managedCluster, nil) g.Expect(err).NotTo(HaveOccurred()) }) + + t.Run("successful create or update, preview enabled", func(t *testing.T) { + g := NewGomegaWithT(t) + mockCtrl := gomock.NewController(t) + scope := mock_agentpools.NewMockAgentPoolScope(mockCtrl) + + scope.EXPECT().SetCAPIMachinePoolAnnotation(clusterv1.ReplicasManagedByAnnotation, "true") + scope.EXPECT().SetCAPIMachinePoolReplicas(ptr.To(1234)) + scope.EXPECT().IsPreviewEnabled().Return(true) + + agentPool := &asocontainerservicev1preview.ManagedClustersAgentPool{ + Status: asocontainerservicev1preview.ManagedClusters_AgentPool_STATUS{ + EnableAutoScaling: ptr.To(true), + Count: ptr.To(1234), + }, + } + + g.Expect(postCreateOrUpdateResourceHook(context.Background(), scope, agentPool, nil)).To(Succeed()) + }) } diff --git a/azure/services/agentpools/mock_agentpools/agentpools_mock.go b/azure/services/agentpools/mock_agentpools/agentpools_mock.go index acc2255784c..14a17a10f9c 100644 --- a/azure/services/agentpools/mock_agentpools/agentpools_mock.go +++ b/azure/services/agentpools/mock_agentpools/agentpools_mock.go @@ -29,7 +29,7 @@ import ( reflect "reflect" time "time" - v1api20231001 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + genruntime "github.com/Azure/azure-service-operator/v2/pkg/genruntime" gomock "go.uber.org/mock/gomock" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" azure "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -75,10 +75,10 @@ func (mr *MockAgentPoolScopeMockRecorder) ASOOwner() *gomock.Call { } // AgentPoolSpec mocks base method. -func (m *MockAgentPoolScope) AgentPoolSpec() azure.ASOResourceSpecGetter[*v1api20231001.ManagedClustersAgentPool] { +func (m *MockAgentPoolScope) AgentPoolSpec() azure.ASOResourceSpecGetter[genruntime.MetaObject] { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AgentPoolSpec") - ret0, _ := ret[0].(azure.ASOResourceSpecGetter[*v1api20231001.ManagedClustersAgentPool]) + ret0, _ := ret[0].(azure.ASOResourceSpecGetter[genruntime.MetaObject]) return ret0 } @@ -184,6 +184,20 @@ func (mr *MockAgentPoolScopeMockRecorder) GetLongRunningOperationState(arg0, arg return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLongRunningOperationState", reflect.TypeOf((*MockAgentPoolScope)(nil).GetLongRunningOperationState), arg0, arg1, arg2) } +// IsPreviewEnabled mocks base method. +func (m *MockAgentPoolScope) IsPreviewEnabled() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsPreviewEnabled") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsPreviewEnabled indicates an expected call of IsPreviewEnabled. +func (mr *MockAgentPoolScopeMockRecorder) IsPreviewEnabled() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsPreviewEnabled", reflect.TypeOf((*MockAgentPoolScope)(nil).IsPreviewEnabled)) +} + // Name mocks base method. func (m *MockAgentPoolScope) Name() string { m.ctrl.T.Helper() diff --git a/azure/services/agentpools/spec.go b/azure/services/agentpools/spec.go index 91977598447..e17993d6cff 100644 --- a/azure/services/agentpools/spec.go +++ b/azure/services/agentpools/spec.go @@ -19,7 +19,9 @@ package agentpools import ( "context" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + asocontainerservicev1hub "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001/storage" "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -154,10 +156,20 @@ type AgentPoolSpec struct { // Patches are extra patches to be applied to the ASO resource. Patches []string + + // Preview indicates whether the agent pool is using a preview version of ASO. + Preview bool } // ResourceRef implements azure.ASOResourceSpecGetter. -func (s *AgentPoolSpec) ResourceRef() *asocontainerservicev1.ManagedClustersAgentPool { +func (s *AgentPoolSpec) ResourceRef() genruntime.MetaObject { + if s.Preview { + return &asocontainerservicev1preview.ManagedClustersAgentPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: s.Name, + }, + } + } return &asocontainerservicev1.ManagedClustersAgentPool{ ObjectMeta: metav1.ObjectMeta{ Name: s.Name, @@ -181,10 +193,31 @@ func (s *AgentPoolSpec) getManagedMachinePoolVersion(existing *asocontainerservi } // Parameters returns the parameters for the agent pool. -func (s *AgentPoolSpec) Parameters(ctx context.Context, existing *asocontainerservicev1.ManagedClustersAgentPool) (params *asocontainerservicev1.ManagedClustersAgentPool, err error) { +func (s *AgentPoolSpec) Parameters(ctx context.Context, existingObj genruntime.MetaObject) (params genruntime.MetaObject, err error) { _, _, done := tele.StartSpanWithLogger(ctx, "agentpools.Service.Parameters") defer done() + // If existing is preview, convert to stable then back to preview at the end of the function. + var existing *asocontainerservicev1.ManagedClustersAgentPool + var existingStatus asocontainerservicev1preview.ManagedClusters_AgentPool_STATUS + if existingObj != nil { + if s.Preview { + existingPreview := existingObj.(*asocontainerservicev1preview.ManagedClustersAgentPool) + existingStatus = existingPreview.Status + hub := &asocontainerservicev1hub.ManagedClustersAgentPool{} + if err := existingPreview.ConvertTo(hub); err != nil { + return nil, err + } + stable := &asocontainerservicev1.ManagedClustersAgentPool{} + if err := stable.ConvertFrom(hub); err != nil { + return nil, err + } + existing = stable + } else { + existing = existingObj.(*asocontainerservicev1.ManagedClustersAgentPool) + } + } + agentPool := existing if agentPool == nil { agentPool = &asocontainerservicev1.ManagedClustersAgentPool{} @@ -302,11 +335,26 @@ func (s *AgentPoolSpec) Parameters(ctx context.Context, existing *asocontainerse agentPool.Spec.Count = agentPool.Status.Count } + if s.Preview { + hub := &asocontainerservicev1hub.ManagedClustersAgentPool{} + if err := agentPool.ConvertTo(hub); err != nil { + return nil, err + } + prev := &asocontainerservicev1preview.ManagedClustersAgentPool{} + if err := prev.ConvertFrom(hub); err != nil { + return nil, err + } + if existing != nil { + prev.Status = existingStatus + } + return prev, nil + } + return agentPool, nil } // WasManaged implements azure.ASOResourceSpecGetter. -func (s *AgentPoolSpec) WasManaged(resource *asocontainerservicev1.ManagedClustersAgentPool) bool { +func (s *AgentPoolSpec) WasManaged(resource genruntime.MetaObject) bool { // CAPZ has never supported BYO agent pools. return true } diff --git a/azure/services/agentpools/spec_test.go b/azure/services/agentpools/spec_test.go index bb38a8f8972..c75c639c8d3 100644 --- a/azure/services/agentpools/spec_test.go +++ b/azure/services/agentpools/spec_test.go @@ -20,6 +20,7 @@ import ( "context" "testing" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "github.com/google/go-cmp/cmp" @@ -125,6 +126,102 @@ func TestParameters(t *testing.T) { g.Expect(cmp.Diff(actual, expected)).To(BeEmpty()) }) + t.Run("no existing preview agent pool", func(t *testing.T) { + g := NewGomegaWithT(t) + + spec := &AgentPoolSpec{ + Preview: true, + Name: "name", + AzureName: "azure name", + ResourceGroup: "rg", + Cluster: "cluster", + Version: ptr.To("1.26.6"), + SKU: "sku", + Replicas: 1, + OSDiskSizeGB: 2, + VnetSubnetID: "vnet subnet id", + Mode: "mode", + MaxCount: ptr.To(3), + MinCount: ptr.To(4), + NodeLabels: map[string]string{"node": "labels"}, + NodeTaints: []string{"node taints"}, + EnableAutoScaling: true, + AvailabilityZones: []string{"zones"}, + MaxPods: ptr.To(5), + OsDiskType: ptr.To("disk type"), + EnableUltraSSD: ptr.To(false), + OSType: ptr.To("os type"), + EnableNodePublicIP: ptr.To(true), + NodePublicIPPrefixID: "public IP prefix ID", + ScaleSetPriority: ptr.To("scaleset priority"), + ScaleDownMode: ptr.To("scale down mode"), + SpotMaxPrice: ptr.To(resource.MustParse("123")), + KubeletConfig: &KubeletConfig{ + CPUManagerPolicy: ptr.To("cpu manager policy"), + }, + KubeletDiskType: ptr.To(infrav1.KubeletDiskType("kubelet disk type")), + AdditionalTags: map[string]string{"additional": "tags"}, + LinuxOSConfig: &infrav1.LinuxOSConfig{ + Sysctls: &infrav1.SysctlConfig{ + FsNrOpen: ptr.To(6), + }, + }, + EnableFIPS: ptr.To(true), + EnableEncryptionAtHost: ptr.To(false), + } + expected := &asocontainerservicev1preview.ManagedClustersAgentPool{ + Spec: asocontainerservicev1preview.ManagedClusters_AgentPool_Spec{ + AzureName: "azure name", + Owner: &genruntime.KnownResourceReference{ + Name: "cluster", + }, + AvailabilityZones: []string{"zones"}, + Count: ptr.To(1), + EnableAutoScaling: ptr.To(true), + EnableUltraSSD: ptr.To(false), + EnableEncryptionAtHost: ptr.To(false), + KubeletDiskType: ptr.To(asocontainerservicev1preview.KubeletDiskType("kubelet disk type")), + MaxCount: ptr.To(3), + MaxPods: ptr.To(5), + MinCount: ptr.To(4), + Mode: ptr.To(asocontainerservicev1preview.AgentPoolMode("mode")), + NodeLabels: map[string]string{"node": "labels"}, + NodeTaints: []string{"node taints"}, + OrchestratorVersion: ptr.To("1.26.6"), + OsDiskSizeGB: ptr.To(asocontainerservicev1preview.ContainerServiceOSDisk(2)), + OsDiskType: ptr.To(asocontainerservicev1preview.OSDiskType("disk type")), + OsType: ptr.To(asocontainerservicev1preview.OSType("os type")), + ScaleSetPriority: ptr.To(asocontainerservicev1preview.ScaleSetPriority("scaleset priority")), + ScaleDownMode: ptr.To(asocontainerservicev1preview.ScaleDownMode("scale down mode")), + Type: ptr.To(asocontainerservicev1preview.AgentPoolType_VirtualMachineScaleSets), + EnableNodePublicIP: ptr.To(true), + Tags: map[string]string{"additional": "tags"}, + EnableFIPS: ptr.To(true), + KubeletConfig: &asocontainerservicev1preview.KubeletConfig{ + CpuManagerPolicy: ptr.To("cpu manager policy"), + }, + VmSize: ptr.To("sku"), + SpotMaxPrice: ptr.To(ptr.To(resource.MustParse("123")).AsApproximateFloat64()), + VnetSubnetReference: &genruntime.ResourceReference{ + ARMID: "vnet subnet id", + }, + NodePublicIPPrefixReference: &genruntime.ResourceReference{ + ARMID: "public IP prefix ID", + }, + LinuxOSConfig: &asocontainerservicev1preview.LinuxOSConfig{ + Sysctls: &asocontainerservicev1preview.SysctlConfig{ + FsNrOpen: ptr.To(6), + }, + }, + }, + } + + actual, err := spec.Parameters(context.Background(), nil) + + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(cmp.Diff(actual, expected)).To(BeEmpty()) + }) + t.Run("with existing agent pool", func(t *testing.T) { g := NewGomegaWithT(t) @@ -148,12 +245,49 @@ func TestParameters(t *testing.T) { } actual, err := spec.Parameters(context.Background(), existing) + actualTyped, ok := actual.(*asocontainerservicev1.ManagedClustersAgentPool) + g.Expect(ok).To(BeTrue()) + + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(actualTyped.Spec.AzureName).To(Equal("managed by CAPZ")) + g.Expect(actualTyped.Spec.Count).To(Equal(ptr.To(1212))) + g.Expect(actualTyped.Spec.PowerState.Code).To(Equal(ptr.To(asocontainerservicev1.PowerState_Code("set by the user")))) + g.Expect(actualTyped.Spec.OrchestratorVersion).NotTo(BeNil()) + g.Expect(*actualTyped.Spec.OrchestratorVersion).To(Equal("1.27.2")) + }) + + t.Run("with existing preview agent pool", func(t *testing.T) { + g := NewGomegaWithT(t) + + spec := &AgentPoolSpec{ + AzureName: "managed by CAPZ", + Replicas: 3, + EnableAutoScaling: true, + Version: ptr.To("1.26.6"), + Preview: true, + } + existing := &asocontainerservicev1preview.ManagedClustersAgentPool{ + Spec: asocontainerservicev1preview.ManagedClusters_AgentPool_Spec{ + AzureName: "set by the user", + PowerState: &asocontainerservicev1preview.PowerState{ + Code: ptr.To(asocontainerservicev1preview.PowerState_Code("set by the user")), + }, + OrchestratorVersion: ptr.To("1.27.2"), + }, + Status: asocontainerservicev1preview.ManagedClusters_AgentPool_STATUS{ + Count: ptr.To(1212), + }, + } + + actual, err := spec.Parameters(context.Background(), existing) + actualTyped, ok := actual.(*asocontainerservicev1preview.ManagedClustersAgentPool) + g.Expect(ok).To(BeTrue()) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(actual.Spec.AzureName).To(Equal("managed by CAPZ")) - g.Expect(actual.Spec.Count).To(Equal(ptr.To(1212))) - g.Expect(actual.Spec.PowerState.Code).To(Equal(ptr.To(asocontainerservicev1.PowerState_Code("set by the user")))) - g.Expect(actual.Spec.OrchestratorVersion).NotTo(BeNil()) - g.Expect(*actual.Spec.OrchestratorVersion).To(Equal("1.27.2")) + g.Expect(actualTyped.Spec.AzureName).To(Equal("managed by CAPZ")) + g.Expect(actualTyped.Spec.Count).To(Equal(ptr.To(1212))) + g.Expect(actualTyped.Spec.PowerState.Code).To(Equal(ptr.To(asocontainerservicev1preview.PowerState_Code("set by the user")))) + g.Expect(actualTyped.Spec.OrchestratorVersion).NotTo(BeNil()) + g.Expect(*actualTyped.Spec.OrchestratorVersion).To(Equal("1.27.2")) }) } diff --git a/azure/services/aso/aso.go b/azure/services/aso/aso.go index 96c0d7a391c..53a4d410961 100644 --- a/azure/services/aso/aso.go +++ b/azure/services/aso/aso.go @@ -54,16 +54,9 @@ const ( deleteFutureType = "ASODelete" ) -// deepCopier is a genruntime.MetaObject with a typed DeepCopy method, usually generated by kubebuilder. -type deepCopier[T any] interface { - genruntime.MetaObject - DeepCopy() T - SetGroupVersionKind(schema.GroupVersionKind) -} - // reconciler is an implementation of the Reconciler interface. It handles creation // and deletion of resources using ASO. -type reconciler[T deepCopier[T]] struct { +type reconciler[T genruntime.MetaObject] struct { client.Client clusterName string @@ -71,7 +64,7 @@ type reconciler[T deepCopier[T]] struct { } // New creates a new ASO reconciler. -func New[T deepCopier[T]](ctrlClient client.Client, clusterName string, owner client.Object) Reconciler[T] { +func New[T genruntime.MetaObject](ctrlClient client.Client, clusterName string, owner client.Object) Reconciler[T] { return &reconciler[T]{ Client: ctrlClient, clusterName: clusterName, @@ -156,7 +149,11 @@ func (r *reconciler[T]) CreateOrUpdateResource(ctx context.Context, spec azure.A } // Construct parameters using the resource spec and information from the existing resource, if there is one. - parameters, err := PatchedParameters(ctx, r.Scheme(), spec, existing.DeepCopy()) + var existingCopy T + if resourceExists { + existingCopy = existing.DeepCopyObject().(T) + } + parameters, err := PatchedParameters(ctx, r.Scheme(), spec, existingCopy) if err != nil { return zero, errors.Wrapf(err, "failed to get desired parameters for resource %s/%s (service: %s)", resourceNamespace, resourceName, serviceName) } @@ -231,7 +228,7 @@ func (r *reconciler[T]) CreateOrUpdateResource(ctx context.Context, spec azure.A } // PatchedParameters returns the Parameters of spec with patches applied. -func PatchedParameters[T deepCopier[T]](ctx context.Context, scheme *runtime.Scheme, spec azure.ASOResourceSpecGetter[T], existing T) (T, error) { +func PatchedParameters[T genruntime.MetaObject](ctx context.Context, scheme *runtime.Scheme, spec azure.ASOResourceSpecGetter[T], existing T) (T, error) { var zero T // to be returned with non-nil errors parameters, err := spec.Parameters(ctx, existing) if err != nil { @@ -240,7 +237,7 @@ func PatchedParameters[T deepCopier[T]](ctx context.Context, scheme *runtime.Sch return applyPatches(scheme, spec, parameters) } -func applyPatches[T deepCopier[T]](scheme *runtime.Scheme, spec azure.ASOResourceSpecGetter[T], parameters T) (T, error) { +func applyPatches[T genruntime.MetaObject](scheme *runtime.Scheme, spec azure.ASOResourceSpecGetter[T], parameters T) (T, error) { p, ok := spec.(Patcher) if !ok { return parameters, nil @@ -252,7 +249,8 @@ func applyPatches[T deepCopier[T]](scheme *runtime.Scheme, spec azure.ASOResourc if err != nil { return zero, errors.Wrap(err, "failed to get GroupVersionKind for object") } - parameters.SetGroupVersionKind(gvk) + + (genruntime.MetaObject)(parameters).(interface{ SetGroupVersionKind(schema.GroupVersionKind) }).SetGroupVersionKind(gvk) paramData, err := json.Marshal(parameters) if err != nil { return zero, errors.Wrap(err, "failed to marshal JSON for patch") @@ -407,7 +405,7 @@ func (r *reconciler[T]) PauseResource(ctx context.Context, resource T, serviceNa } log.V(4).Info("Pausing resource") - before := resource.DeepCopy() + before := resource.DeepCopyObject().(genruntime.MetaObject) if annotations == nil { annotations = make(map[string]string, 2) diff --git a/azure/services/aso/service.go b/azure/services/aso/service.go index aa6fa5d399d..7eaf4639124 100644 --- a/azure/services/aso/service.go +++ b/azure/services/aso/service.go @@ -19,6 +19,7 @@ package aso import ( "context" + "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "github.com/pkg/errors" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/util/tele" @@ -27,7 +28,7 @@ import ( ) // Service provides operations on Azure resources. -type Service[T deepCopier[T], S Scope] struct { +type Service[T genruntime.MetaObject, S Scope] struct { Reconciler[T] Scope S @@ -46,7 +47,7 @@ type Service[T deepCopier[T], S Scope] struct { } // NewService creates a new Service. -func NewService[T deepCopier[T], S Scope](name string, scope S) *Service[T, S] { +func NewService[T genruntime.MetaObject, S Scope](name string, scope S) *Service[T, S] { return &Service[T, S]{ Reconciler: New[T](scope.GetClient(), scope.ClusterName(), scope.ASOOwner()), Scope: scope, diff --git a/azure/services/managedclusters/managedclusters.go b/azure/services/managedclusters/managedclusters.go index fe0626d2df0..092e33daacf 100644 --- a/azure/services/managedclusters/managedclusters.go +++ b/azure/services/managedclusters/managedclusters.go @@ -20,7 +20,10 @@ import ( "context" "fmt" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + asocontainerservicev1hub "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001/storage" + "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/clientcmd" @@ -50,7 +53,7 @@ const ( type ManagedClusterScope interface { aso.Scope azure.Authorizer - ManagedClusterSpec() azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedCluster] + ManagedClusterSpec() azure.ASOResourceSpecGetter[genruntime.MetaObject] SetControlPlaneEndpoint(clusterv1.APIEndpoint) MakeEmptyKubeConfigSecret() corev1.Secret GetAdminKubeconfigData() []byte @@ -65,22 +68,43 @@ type ManagedClusterScope interface { SetAutoUpgradeVersionStatus(version string) SetVersionStatus(version string) IsManagedVersionUpgrade() bool + IsPreviewEnabled() bool } // New creates a new service. -func New(scope ManagedClusterScope) *aso.Service[*asocontainerservicev1.ManagedCluster, ManagedClusterScope] { - svc := aso.NewService[*asocontainerservicev1.ManagedCluster](serviceName, scope) - svc.Specs = []azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedCluster]{scope.ManagedClusterSpec()} +func New(scope ManagedClusterScope) *aso.Service[genruntime.MetaObject, ManagedClusterScope] { + // genruntime.MetaObject is used here instead of an *asocontainerservicev1.ManagedCluster to better + // facilitate returning different API versions. + svc := aso.NewService[genruntime.MetaObject](serviceName, scope) + svc.Specs = []azure.ASOResourceSpecGetter[genruntime.MetaObject]{scope.ManagedClusterSpec()} svc.ConditionType = infrav1.ManagedClusterRunningCondition svc.PostCreateOrUpdateResourceHook = postCreateOrUpdateResourceHook return svc } -func postCreateOrUpdateResourceHook(ctx context.Context, scope ManagedClusterScope, managedCluster *asocontainerservicev1.ManagedCluster, err error) error { +func postCreateOrUpdateResourceHook(ctx context.Context, scope ManagedClusterScope, obj genruntime.MetaObject, err error) error { if err != nil { return err } + // If existing is preview, convert to stable for this function. + var existing *asocontainerservicev1.ManagedCluster + if scope.IsPreviewEnabled() { + existingPreview := obj.(*asocontainerservicev1preview.ManagedCluster) + hub := &asocontainerservicev1hub.ManagedCluster{} + if err := existingPreview.ConvertTo(hub); err != nil { + return err + } + prev := &asocontainerservicev1.ManagedCluster{} + if err := prev.ConvertFrom(hub); err != nil { + return err + } + existing = prev + } else { + existing = obj.(*asocontainerservicev1.ManagedCluster) + } + managedCluster := existing + // Update control plane endpoint. endpoint := clusterv1.APIEndpoint{ Host: ptr.Deref(managedCluster.Status.Fqdn, ""), diff --git a/azure/services/managedclusters/managedclusters_test.go b/azure/services/managedclusters/managedclusters_test.go index 27fa93b5023..78c5333b201 100644 --- a/azure/services/managedclusters/managedclusters_test.go +++ b/azure/services/managedclusters/managedclusters_test.go @@ -21,6 +21,7 @@ import ( "errors" "testing" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" . "github.com/onsi/gomega" "go.uber.org/mock/gomock" @@ -46,65 +47,55 @@ func TestPostCreateOrUpdateResourceHook(t *testing.T) { t.Run("successful create or update", func(t *testing.T) { g := NewGomegaWithT(t) - mockCtrl := gomock.NewController(t) - scope := mock_managedclusters.NewMockManagedClusterScope(mockCtrl) namespace := "default" - clusterName := "cluster" + scope := setupMockScope(t) + scope.EXPECT().IsPreviewEnabled().Return(false) - adminASOKubeconfig := &corev1.Secret{ + managedCluster := &asocontainerservicev1.ManagedCluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, - Name: adminKubeconfigSecretName(clusterName), - }, - Data: map[string][]byte{ - secret.KubeconfigDataName: []byte("admin credentials"), }, - } - userASOKubeconfig := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: userKubeconfigSecretName(clusterName), + Spec: asocontainerservicev1.ManagedCluster_Spec{ + KubernetesVersion: ptr.To("1.19.0"), + AutoUpgradeProfile: &asocontainerservicev1.ManagedClusterAutoUpgradeProfile{ + UpgradeChannel: ptr.To(asocontainerservicev1.ManagedClusterAutoUpgradeProfile_UpgradeChannel_Stable), + }, }, - Data: map[string][]byte{ - secret.KubeconfigDataName: []byte("user credentials"), + Status: asocontainerservicev1.ManagedCluster_STATUS{ + Fqdn: ptr.To("fdqn"), + PrivateFQDN: ptr.To("private fqdn"), + OidcIssuerProfile: &asocontainerservicev1.ManagedClusterOIDCIssuerProfile_STATUS{ + IssuerURL: ptr.To("oidc"), + }, + CurrentKubernetesVersion: ptr.To("1.19.0"), }, } - kclient := fakeclient.NewClientBuilder(). - WithObjects(adminASOKubeconfig, userASOKubeconfig). - Build() - scope.EXPECT().GetClient().Return(kclient).AnyTimes() - scope.EXPECT().SetControlPlaneEndpoint(clusterv1.APIEndpoint{ - Host: "fdqn", - Port: 443, - }) - scope.EXPECT().ClusterName().Return(clusterName).AnyTimes() - scope.EXPECT().IsAADEnabled().Return(true) - scope.EXPECT().AreLocalAccountsDisabled().Return(false) - scope.EXPECT().SetAdminKubeconfigData([]byte("admin credentials")) - scope.EXPECT().SetUserKubeconfigData([]byte("user credentials")) - scope.EXPECT().SetOIDCIssuerProfileStatus(gomock.Nil()) - scope.EXPECT().SetOIDCIssuerProfileStatus(&infrav1.OIDCIssuerProfileStatus{ - IssuerURL: ptr.To("oidc"), - }) - scope.EXPECT().SetVersionStatus("v1.19.0") - scope.EXPECT().IsManagedVersionUpgrade().Return(true) - scope.EXPECT().SetAutoUpgradeVersionStatus("v1.19.0") + err := postCreateOrUpdateResourceHook(context.Background(), scope, managedCluster, nil) + g.Expect(err).NotTo(HaveOccurred()) + }) - managedCluster := &asocontainerservicev1.ManagedCluster{ + t.Run("successful create or update, preview enabled", func(t *testing.T) { + g := NewGomegaWithT(t) + namespace := "default" + scope := setupMockScope(t) + + scope.EXPECT().IsPreviewEnabled().Return(true) + + managedCluster := &asocontainerservicev1preview.ManagedCluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, }, - Spec: asocontainerservicev1.ManagedCluster_Spec{ + Spec: asocontainerservicev1preview.ManagedCluster_Spec{ KubernetesVersion: ptr.To("1.19.0"), - AutoUpgradeProfile: &asocontainerservicev1.ManagedClusterAutoUpgradeProfile{ - UpgradeChannel: ptr.To(asocontainerservicev1.ManagedClusterAutoUpgradeProfile_UpgradeChannel_Stable), + AutoUpgradeProfile: &asocontainerservicev1preview.ManagedClusterAutoUpgradeProfile{ + UpgradeChannel: ptr.To(asocontainerservicev1preview.ManagedClusterAutoUpgradeProfile_UpgradeChannel_Stable), }, }, - Status: asocontainerservicev1.ManagedCluster_STATUS{ + Status: asocontainerservicev1preview.ManagedCluster_STATUS{ Fqdn: ptr.To("fdqn"), PrivateFQDN: ptr.To("private fqdn"), - OidcIssuerProfile: &asocontainerservicev1.ManagedClusterOIDCIssuerProfile_STATUS{ + OidcIssuerProfile: &asocontainerservicev1preview.ManagedClusterOIDCIssuerProfile_STATUS{ IssuerURL: ptr.To("oidc"), }, CurrentKubernetesVersion: ptr.To("1.19.0"), @@ -132,6 +123,7 @@ func TestPostCreateOrUpdateResourceHook(t *testing.T) { }) scope.EXPECT().ClusterName().Return(clusterName).AnyTimes() scope.EXPECT().IsAADEnabled().Return(true) + scope.EXPECT().IsPreviewEnabled().Return(false) managedCluster := &asocontainerservicev1.ManagedCluster{ ObjectMeta: metav1.ObjectMeta{ @@ -151,3 +143,53 @@ func TestPostCreateOrUpdateResourceHook(t *testing.T) { g.Expect(err).To(HaveOccurred()) }) } + +func setupMockScope(t *testing.T) *mock_managedclusters.MockManagedClusterScope { + t.Helper() + mockCtrl := gomock.NewController(t) + scope := mock_managedclusters.NewMockManagedClusterScope(mockCtrl) + namespace := "default" + clusterName := "cluster" + + adminASOKubeconfig := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: adminKubeconfigSecretName(clusterName), + }, + Data: map[string][]byte{ + secret.KubeconfigDataName: []byte("admin credentials"), + }, + } + userASOKubeconfig := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: userKubeconfigSecretName(clusterName), + }, + Data: map[string][]byte{ + secret.KubeconfigDataName: []byte("user credentials"), + }, + } + kclient := fakeclient.NewClientBuilder(). + WithObjects(adminASOKubeconfig, userASOKubeconfig). + Build() + scope.EXPECT().GetClient().Return(kclient).AnyTimes() + + scope.EXPECT().SetControlPlaneEndpoint(clusterv1.APIEndpoint{ + Host: "fdqn", + Port: 443, + }) + scope.EXPECT().ClusterName().Return(clusterName).AnyTimes() + scope.EXPECT().IsAADEnabled().Return(true) + scope.EXPECT().AreLocalAccountsDisabled().Return(false) + scope.EXPECT().SetAdminKubeconfigData([]byte("admin credentials")) + scope.EXPECT().SetUserKubeconfigData([]byte("user credentials")) + scope.EXPECT().SetOIDCIssuerProfileStatus(gomock.Nil()) + scope.EXPECT().SetOIDCIssuerProfileStatus(&infrav1.OIDCIssuerProfileStatus{ + IssuerURL: ptr.To("oidc"), + }) + scope.EXPECT().SetVersionStatus("v1.19.0") + scope.EXPECT().IsManagedVersionUpgrade().Return(true) + scope.EXPECT().SetAutoUpgradeVersionStatus("v1.19.0") + + return scope +} diff --git a/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go b/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go index a0e7171e303..ca0649c1b2e 100644 --- a/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go +++ b/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go @@ -31,7 +31,7 @@ import ( time "time" azcore "github.com/Azure/azure-sdk-for-go/sdk/azcore" - v1api20231001 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + genruntime "github.com/Azure/azure-service-operator/v2/pkg/genruntime" gomock "go.uber.org/mock/gomock" v1 "k8s.io/api/core/v1" v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" @@ -313,6 +313,20 @@ func (mr *MockManagedClusterScopeMockRecorder) IsManagedVersionUpgrade() *gomock return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsManagedVersionUpgrade", reflect.TypeOf((*MockManagedClusterScope)(nil).IsManagedVersionUpgrade)) } +// IsPreviewEnabled mocks base method. +func (m *MockManagedClusterScope) IsPreviewEnabled() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsPreviewEnabled") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsPreviewEnabled indicates an expected call of IsPreviewEnabled. +func (mr *MockManagedClusterScopeMockRecorder) IsPreviewEnabled() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsPreviewEnabled", reflect.TypeOf((*MockManagedClusterScope)(nil).IsPreviewEnabled)) +} + // MakeClusterCA mocks base method. func (m *MockManagedClusterScope) MakeClusterCA() *v1.Secret { m.ctrl.T.Helper() @@ -342,10 +356,10 @@ func (mr *MockManagedClusterScopeMockRecorder) MakeEmptyKubeConfigSecret() *gomo } // ManagedClusterSpec mocks base method. -func (m *MockManagedClusterScope) ManagedClusterSpec() azure.ASOResourceSpecGetter[*v1api20231001.ManagedCluster] { +func (m *MockManagedClusterScope) ManagedClusterSpec() azure.ASOResourceSpecGetter[genruntime.MetaObject] { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ManagedClusterSpec") - ret0, _ := ret[0].(azure.ASOResourceSpecGetter[*v1api20231001.ManagedCluster]) + ret0, _ := ret[0].(azure.ASOResourceSpecGetter[genruntime.MetaObject]) return ret0 } diff --git a/azure/services/managedclusters/spec.go b/azure/services/managedclusters/spec.go index 5a31a6694eb..6cee11078fd 100644 --- a/azure/services/managedclusters/spec.go +++ b/azure/services/managedclusters/spec.go @@ -22,7 +22,9 @@ import ( "fmt" "net" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + asocontainerservicev1hub "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001/storage" "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -86,7 +88,7 @@ type ManagedClusterSpec struct { SSHPublicKey string // GetAllAgentPools is a function that returns the list of agent pool specifications in this cluster. - GetAllAgentPools func() ([]azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool], error) + GetAllAgentPools func() ([]azure.ASOResourceSpecGetter[genruntime.MetaObject], error) // PodCIDR is the CIDR block for IP addresses distributed to pods PodCIDR string @@ -141,6 +143,9 @@ type ManagedClusterSpec struct { // Patches are extra patches to be applied to the ASO resource. Patches []string + + // Preview enables the preview API version. + Preview bool } // ManagedClusterAutoUpgradeProfile auto upgrade profile for a managed cluster. @@ -268,15 +273,6 @@ type OIDCIssuerProfile struct { Enabled *bool } -// ResourceRef implements azure.ASOResourceSpecGetter. -func (s *ManagedClusterSpec) ResourceRef() *asocontainerservicev1.ManagedCluster { - return &asocontainerservicev1.ManagedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: s.Name, - }, - } -} - // ManagedClusterSecurityProfile defines the security profile for the cluster. type ManagedClusterSecurityProfile struct { // AzureKeyVaultKms defines Azure Key Vault key management service settings for the security profile. @@ -384,13 +380,50 @@ func (s *ManagedClusterSpec) getManagedClusterVersion(existing *asocontainerserv return versions.GetHigherK8sVersion(s.Version, *existing.Status.CurrentKubernetesVersion) } +// ResourceRef implements azure.ASOResourceSpecGetter. +func (s *ManagedClusterSpec) ResourceRef() genruntime.MetaObject { + if s.Preview { + return &asocontainerservicev1preview.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: s.Name, + }, + } + } + return &asocontainerservicev1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: s.Name, + }, + } +} + // Parameters returns the parameters for the managed clusters. // //nolint:gocyclo // Function requires a lot of nil checks that raise complexity. -func (s *ManagedClusterSpec) Parameters(ctx context.Context, existing *asocontainerservicev1.ManagedCluster) (params *asocontainerservicev1.ManagedCluster, err error) { +func (s *ManagedClusterSpec) Parameters(ctx context.Context, existingObj genruntime.MetaObject) (params genruntime.MetaObject, err error) { ctx, _, done := tele.StartSpanWithLogger(ctx, "managedclusters.Service.Parameters") defer done() + // If existing is preview, convert to stable then back to preview at the end of the function. + var existing *asocontainerservicev1.ManagedCluster + var existingStatus asocontainerservicev1preview.ManagedCluster_STATUS + if existingObj != nil { + if s.Preview { + existingPreview := existingObj.(*asocontainerservicev1preview.ManagedCluster) + existingStatus = existingPreview.Status + hub := &asocontainerservicev1hub.ManagedCluster{} + if err := existingPreview.ConvertTo(hub); err != nil { + return nil, err + } + stable := &asocontainerservicev1.ManagedCluster{} + if err := stable.ConvertFrom(hub); err != nil { + return nil, err + } + existing = stable.DeepCopy() + } else { + existing = existingObj.(*asocontainerservicev1.ManagedCluster) + } + } + managedCluster := existing if managedCluster == nil { managedCluster = &asocontainerservicev1.ManagedCluster{ @@ -651,6 +684,7 @@ func (s *ManagedClusterSpec) Parameters(ctx context.Context, existing *asocontai // Only include AgentPoolProfiles during initial cluster creation. Agent pools are managed solely by the // AzureManagedMachinePool controller thereafter. + var prevAgentPoolProfiles []asocontainerservicev1preview.ManagedClusterAgentPoolProfile managedCluster.Spec.AgentPoolProfiles = nil if managedCluster.Status.AgentPoolProfiles == nil { // Add all agent pools to cluster spec that will be submitted to the API @@ -663,18 +697,47 @@ func (s *ManagedClusterSpec) Parameters(ctx context.Context, existing *asocontai if err := asocontainerservicev1.AddToScheme(scheme); err != nil { return nil, errors.Wrap(err, "error constructing scheme") } + if err := asocontainerservicev1preview.AddToScheme(scheme); err != nil { + return nil, errors.Wrap(err, "error constructing scheme") + } for _, agentPoolSpec := range agentPoolSpecs { agentPool, err := aso.PatchedParameters(ctx, scheme, agentPoolSpec, nil) if err != nil { return nil, errors.Wrapf(err, "failed to get agent pool parameters for managed cluster %s", s.Name) } agentPoolSpecTyped := agentPoolSpec.(*agentpools.AgentPoolSpec) - agentPool.Spec.AzureName = agentPoolSpecTyped.AzureName - profile := converters.AgentPoolToManagedClusterAgentPoolProfile(agentPool) - managedCluster.Spec.AgentPoolProfiles = append(managedCluster.Spec.AgentPoolProfiles, profile) + if s.Preview { + agentPoolTyped := agentPool.(*asocontainerservicev1preview.ManagedClustersAgentPool) + agentPoolTyped.Spec.AzureName = agentPoolSpecTyped.AzureName + profile := converters.AgentPoolToManagedClusterAgentPoolPreviewProfile(agentPoolTyped) + prevAgentPoolProfiles = append(prevAgentPoolProfiles, profile) + } else { + agentPoolTyped := agentPool.(*asocontainerservicev1.ManagedClustersAgentPool) + agentPoolTyped.Spec.AzureName = agentPoolSpecTyped.AzureName + profile := converters.AgentPoolToManagedClusterAgentPoolProfile(agentPoolTyped) + managedCluster.Spec.AgentPoolProfiles = append(managedCluster.Spec.AgentPoolProfiles, profile) + } } } + if s.Preview { + hub := &asocontainerservicev1hub.ManagedCluster{} + if err := managedCluster.ConvertTo(hub); err != nil { + return nil, err + } + prev := &asocontainerservicev1preview.ManagedCluster{} + if err := prev.ConvertFrom(hub); err != nil { + return nil, err + } + if existing != nil { + prev.Status = existingStatus + } + if prevAgentPoolProfiles != nil { + prev.Spec.AgentPoolProfiles = prevAgentPoolProfiles + } + return prev, nil + } + return managedCluster, nil } @@ -750,12 +813,12 @@ func userKubeconfigSecretName(clusterName string) string { } // WasManaged implements azure.ASOResourceSpecGetter. -func (s *ManagedClusterSpec) WasManaged(resource *asocontainerservicev1.ManagedCluster) bool { +func (s *ManagedClusterSpec) WasManaged(resource genruntime.MetaObject) bool { // CAPZ has never supported BYO managed clusters. return true } -var _ aso.TagsGetterSetter[*asocontainerservicev1.ManagedCluster] = (*ManagedClusterSpec)(nil) +var _ aso.TagsGetterSetter[genruntime.MetaObject] = (*ManagedClusterSpec)(nil) // GetAdditionalTags implements aso.TagsGetterSetter. func (s *ManagedClusterSpec) GetAdditionalTags() infrav1.Tags { @@ -763,13 +826,20 @@ func (s *ManagedClusterSpec) GetAdditionalTags() infrav1.Tags { } // GetDesiredTags implements aso.TagsGetterSetter. -func (*ManagedClusterSpec) GetDesiredTags(resource *asocontainerservicev1.ManagedCluster) infrav1.Tags { - return resource.Spec.Tags +func (s *ManagedClusterSpec) GetDesiredTags(resource genruntime.MetaObject) infrav1.Tags { + if s.Preview { + return resource.(*asocontainerservicev1preview.ManagedCluster).Spec.Tags + } + return resource.(*asocontainerservicev1.ManagedCluster).Spec.Tags } // SetTags implements aso.TagsGetterSetter. -func (*ManagedClusterSpec) SetTags(resource *asocontainerservicev1.ManagedCluster, tags infrav1.Tags) { - resource.Spec.Tags = tags +func (s *ManagedClusterSpec) SetTags(resource genruntime.MetaObject, tags infrav1.Tags) { + if s.Preview { + resource.(*asocontainerservicev1preview.ManagedCluster).Spec.Tags = tags + return + } + resource.(*asocontainerservicev1.ManagedCluster).Spec.Tags = tags } var _ aso.Patcher = (*ManagedClusterSpec)(nil) diff --git a/azure/services/managedclusters/spec_test.go b/azure/services/managedclusters/spec_test.go index bde266dcf55..de4e54259d1 100644 --- a/azure/services/managedclusters/spec_test.go +++ b/azure/services/managedclusters/spec_test.go @@ -21,6 +21,7 @@ import ( "encoding/base64" "testing" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "github.com/google/go-cmp/cmp" @@ -51,8 +52,8 @@ func TestParameters(t *testing.T) { NetworkPolicy: "network policy", OutboundType: ptr.To(infrav1.ManagedControlPlaneOutboundType("outbound type")), SSHPublicKey: base64.StdEncoding.EncodeToString([]byte("ssh")), - GetAllAgentPools: func() ([]azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool], error) { - return []azure.ASOResourceSpecGetter[*asocontainerservicev1.ManagedClustersAgentPool]{ + GetAllAgentPools: func() ([]azure.ASOResourceSpecGetter[genruntime.MetaObject], error) { + return []azure.ASOResourceSpecGetter[genruntime.MetaObject]{ &agentpools.AgentPoolSpec{ Replicas: 5, Mode: "mode", @@ -291,6 +292,31 @@ func TestParameters(t *testing.T) { g.Expect(cmp.Diff(actual, expected)).To(BeEmpty()) }) + t.Run("no existing preview managed cluster", func(t *testing.T) { + g := NewGomegaWithT(t) + + spec := &ManagedClusterSpec{ + Name: "name", + Preview: true, + GetAllAgentPools: func() ([]azure.ASOResourceSpecGetter[genruntime.MetaObject], error) { + return []azure.ASOResourceSpecGetter[genruntime.MetaObject]{ + &agentpools.AgentPoolSpec{ + Replicas: 5, + Mode: "mode", + AzureName: "agentpool", + Patches: []string{`{"spec": {"tags": {"from": "patches"}}}`}, + Preview: true, + }, + }, nil + }, + } + + actual, err := spec.Parameters(context.Background(), nil) + g.Expect(err).NotTo(HaveOccurred()) + _, ok := actual.(*asocontainerservicev1preview.ManagedCluster) + g.Expect(ok).To(BeTrue()) + }) + t.Run("with existing managed cluster", func(t *testing.T) { g := NewGomegaWithT(t) @@ -311,7 +337,8 @@ func TestParameters(t *testing.T) { }, } - actual, err := spec.Parameters(context.Background(), existing) + actualObj, err := spec.Parameters(context.Background(), existing) + actual := actualObj.(*asocontainerservicev1.ManagedCluster) g.Expect(err).NotTo(HaveOccurred()) g.Expect(actual.Spec.AgentPoolProfiles).To(BeNil()) @@ -321,6 +348,7 @@ func TestParameters(t *testing.T) { g.Expect(actual.Spec.KubernetesVersion).NotTo(BeNil()) g.Expect(*actual.Spec.KubernetesVersion).To(Equal("1.26.6")) }) + t.Run("updating existing managed cluster to a non nil DNS Service IP", func(t *testing.T) { g := NewGomegaWithT(t) @@ -342,7 +370,8 @@ func TestParameters(t *testing.T) { }, } - actual, err := spec.Parameters(context.Background(), existing) + actualObj, err := spec.Parameters(context.Background(), existing) + actual := actualObj.(*asocontainerservicev1.ManagedCluster) g.Expect(err).NotTo(HaveOccurred()) g.Expect(actual.Spec.AgentPoolProfiles).To(BeNil()) diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml index 250b9f0aaef..6c5cb617335 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml @@ -300,6 +300,10 @@ spec: DNS service. It must be within the Kubernetes service address range specified in serviceCidr. Immutable. type: string + enablePreviewFeatures: + description: EnablePreviewFeatures enables preview features for the + cluster. + type: boolean extensions: description: Extensions is a list of AKS extensions to be installed on the cluster. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanetemplates.yaml index c38ab17415a..154b62d0051 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanetemplates.yaml @@ -282,6 +282,10 @@ spec: Kubernetes DNS service. It must be within the Kubernetes service address range specified in serviceCidr. Immutable. type: string + enablePreviewFeatures: + description: EnablePreviewFeatures enables preview features + for the cluster. + type: boolean extensions: description: Extensions is a list of AKS extensions to be installed on the cluster. diff --git a/controllers/azuremanagedmachinepool_controller_test.go b/controllers/azuremanagedmachinepool_controller_test.go index 95ce11a13ef..5f1c6d48055 100644 --- a/controllers/azuremanagedmachinepool_controller_test.go +++ b/controllers/azuremanagedmachinepool_controller_test.go @@ -72,6 +72,7 @@ func TestAzureManagedMachinePoolReconcile(t *testing.T) { agentpools.SetAgentPoolProviderIDList(providerIDs) agentpools.SetAgentPoolReplicas(int32(len(providerIDs))).Return() agentpools.SetAgentPoolReady(true).Return() + agentpools.IsPreviewEnabled().Return(false) nodelister.List(gomock2.AContext(), "fake-rg").Return(fakeVirtualMachineScaleSet, nil) nodelister.ListInstances(gomock2.AContext(), "fake-rg", "vmssName").Return(fakeVirtualMachineScaleSetVM, nil) diff --git a/controllers/azuremanagedmachinepool_reconciler.go b/controllers/azuremanagedmachinepool_reconciler.go index 0e081a0b588..c315f821eef 100644 --- a/controllers/azuremanagedmachinepool_reconciler.go +++ b/controllers/azuremanagedmachinepool_reconciler.go @@ -22,6 +22,8 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" + asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" "github.com/pkg/errors" azprovider "sigs.k8s.io/cloud-provider-azure/pkg/provider" "sigs.k8s.io/cluster-api-provider-azure/azure" @@ -112,7 +114,14 @@ func (s *azureManagedMachinePoolService) Reconcile(ctx context.Context) error { if err != nil { return errors.Wrap(err, "failed to get agent pool parameters") } - agentPoolName := agentPool.AzureName() + var agentPoolName string + if s.scope.IsPreviewEnabled() { + agentPoolTyped := agentPool.(*asocontainerservicev1preview.ManagedClustersAgentPool) + agentPoolName = agentPoolTyped.AzureName() + } else { + agentPoolTyped := agentPool.(*asocontainerservicev1.ManagedClustersAgentPool) + agentPoolName = agentPoolTyped.AzureName() + } if err := s.agentPoolsSvc.Reconcile(ctx); err != nil { return errors.Wrapf(err, "failed to reconcile machine pool %s", agentPoolName) diff --git a/docs/book/src/topics/managedcluster.md b/docs/book/src/topics/managedcluster.md index 6954f36db6c..63d13395041 100644 --- a/docs/book/src/topics/managedcluster.md +++ b/docs/book/src/topics/managedcluster.md @@ -364,6 +364,36 @@ spec: enabled: true ``` +### Enabling Preview API Features for ManagedClusters + +#### :warning: WARNING: This is meant to be used sparingly to enable features for development and testing that are not otherwise represented in the CAPZ API. Misconfiguration that conflicts with CAPZ's normal mode of operation is possible. + +To enable preview features for managed clusters, you can use the `enablePreviewFeatures` field in the `AzureManagedControlPlane` resource spec. To use any of the new fields included in the preview API version, use the `asoManagedClusterPatches` field in the `AzureManagedControlPlane` resource spec and the `asoManagedClustersAgentPoolPatches` field in the `AzureManagedMachinePool` resource spec to patch in the new fields. + +Please refer to the [ASO Docs](https://azure.github.io/azure-service-operator/reference/containerservice/) for the ContainerService API reference for the latest preview fields and their usage. + +Example for enabling preview features for managed clusters: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedControlPlane +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + enablePreviewFeatures: true + asoManagedClusterPatches: + - '{"spec": {"enableNamespaceResources": true}}' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureManagedMachinePool +metadata: + ... +spec: + asoManagedClustersAgentPoolPatches: + - '{"spec": {"enableCustomCATrust": true}}' +``` + #### OIDC Issuer on AKS Setting `AzureManagedControlPlane.Spec.oidcIssuerProfile.enabled` to `true` will enable OIDC issuer profile for the `AzureManagedControlPlane`. Once enabled, you will see a configmap named `-aso-oidc-issuer-profile` in the same namespace as the `AzureManagedControlPlane` resource. This configmap will contain the OIDC issuer profile url under the `oidc-issuer-profile-url` key. diff --git a/main.go b/main.go index 0195b39bead..160f0330d50 100644 --- a/main.go +++ b/main.go @@ -26,7 +26,8 @@ import ( "time" // +kubebuilder:scaffold:imports - asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230315preview" + asocontainerservicev1api20230202preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" + asocontainerservicev1api20230315preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230315preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" asokubernetesconfigurationv1 "github.com/Azure/azure-service-operator/v2/api/kubernetesconfiguration/v1api20230501" asonetworkv1api20201101 "github.com/Azure/azure-service-operator/v2/api/network/v1api20201101" @@ -81,7 +82,8 @@ func init() { _ = asocontainerservicev1.AddToScheme(scheme) _ = asonetworkv1api20220701.AddToScheme(scheme) _ = asonetworkv1api20201101.AddToScheme(scheme) - _ = asocontainerservicev1preview.AddToScheme(scheme) + _ = asocontainerservicev1api20230202preview.AddToScheme(scheme) + _ = asocontainerservicev1api20230315preview.AddToScheme(scheme) _ = asokubernetesconfigurationv1.AddToScheme(scheme) // +kubebuilder:scaffold:scheme } diff --git a/test/e2e/aks_clusterclass.go b/test/e2e/aks_clusterclass.go index ef7eebce46f..e85e1594375 100644 --- a/test/e2e/aks_clusterclass.go +++ b/test/e2e/aks_clusterclass.go @@ -80,8 +80,10 @@ func AKSClusterClassSpec(ctx context.Context, inputGetter func() AKSClusterClass Name: clusterClass.Spec.Workers.MachinePools[i].Template.Infrastructure.Ref.Name, }, ammpt) Expect(err).NotTo(HaveOccurred()) - ammpt.Spec.Template.Spec.ScaleDownMode = ptr.To("Deallocate") - g.Expect(mgmtClient.Update(ctx, ammpt)).To(Succeed()) + if ammpt.Spec.Template.Spec.OsDiskType != nil && *ammpt.Spec.Template.Spec.OsDiskType != "Ephemeral" { + ammpt.Spec.Template.Spec.ScaleDownMode = ptr.To("Deallocate") + g.Expect(mgmtClient.Update(ctx, ammpt)).To(Succeed()) + } } }, inputGetter().WaitIntervals...).Should(Succeed()) @@ -93,7 +95,9 @@ func AKSClusterClassSpec(ctx context.Context, inputGetter func() AKSClusterClass Name: input.MachinePool.Spec.Template.Spec.InfrastructureRef.Name, }, ammp) Expect(err).NotTo(HaveOccurred()) - g.Expect(ammp.Spec.ScaleDownMode).To(Equal(ptr.To("Deallocate"))) + if ammp.Spec.OsDiskType != nil && *ammp.Spec.OsDiskType != "Ephemeral" { + g.Expect(ammp.Spec.ScaleDownMode).To(Equal(ptr.To("Deallocate"))) + } }, inputGetter().WaitIntervals...).Should(Succeed()) Eventually(func(g Gomega) {