From 99bd11506b4694cc4a80afe890a32c56df577e35 Mon Sep 17 00:00:00 2001 From: LochanRn Date: Fri, 13 Oct 2023 21:47:58 +0530 Subject: [PATCH] support for auto upgrade channel --- api/v1beta1/azuremanagedcontrolplane_types.go | 68 +++++++++- .../azuremanagedcontrolplane_webhook.go | 39 ++++++ .../azuremanagedcontrolplane_webhook_test.go | 111 +++++++++++++++++ api/v1beta1/zz_generated.deepcopy.go | 30 +++++ azure/scope/managedcontrolplane.go | 27 ++++ azure/scope/managedcontrolplane_test.go | 92 ++++++++++++++ azure/scope/managedmachinepool.go | 28 ++++- azure/scope/managedmachinepool_test.go | 116 ++++++++++++++++++ azure/services/agentpools/spec.go | 27 ++++ azure/services/agentpools/spec_test.go | 62 +++++++++- .../managedclusters/managedclusters.go | 14 ++- .../managedclusters/managedclusters_test.go | 43 +++++++ .../managedclusters_mock.go | 26 ++++ azure/services/managedclusters/spec.go | 98 +++++++++------ ...er.x-k8s.io_azuremanagedcontrolplanes.yaml | 28 +++++ util/versions/version.go | 40 ++++++ util/versions/version_test.go | 100 +++++++++++++++ 17 files changed, 898 insertions(+), 51 deletions(-) create mode 100644 util/versions/version.go create mode 100644 util/versions/version_test.go diff --git a/api/v1beta1/azuremanagedcontrolplane_types.go b/api/v1beta1/azuremanagedcontrolplane_types.go index 545deffdb8ae..6651fae2a906 100644 --- a/api/v1beta1/azuremanagedcontrolplane_types.go +++ b/api/v1beta1/azuremanagedcontrolplane_types.go @@ -34,6 +34,50 @@ const ( PrivateDNSZoneModeNone string = "None" ) +// UpgradeChannel determines the type of upgrade channel for automatically upgrading the cluster. +type UpgradeChannel string + +const ( + // UpgradeChannelNodeImage automatically upgrades the node image to the latest version available. + // Consider using nodeOSUpgradeChannel instead as that allows you to configure node OS patching separate from Kubernetes version patching. + UpgradeChannelNodeImage UpgradeChannel = "node-image" + // UpgradeChannelNone disables auto-upgrades and keeps the cluster at its current version of Kubernetes. + UpgradeChannelNone UpgradeChannel = "none" + // UpgradeChannelPatch automatically upgrade the cluster to the latest supported patch version when it becomes available + // while keeping the minor version the same. For example, if a cluster is running version 1.17.7 and versions 1.17.9, 1.18.4, + // 1.18.6, and 1.19.1 are available, your cluster is upgraded to 1.17.9. + UpgradeChannelPatch UpgradeChannel = "patch" + // UpgradeChannelRapid automatically upgrade the cluster to the latest supported patch release on the latest supported minor + // version. In cases where the cluster is at a version of Kubernetes that is at an N-2 minor version where N is the latest + // supported minor version, the cluster first upgrades to the latest supported patch version on N-1 minor version. For example, + // if a cluster is running version 1.17.7 and versions 1.17.9, 1.18.4, 1.18.6, and 1.19.1 are available, your cluster first + // is upgraded to 1.18.6, then is upgraded to 1.19.1. + UpgradeChannelRapid UpgradeChannel = "rapid" + // UpgradeChannelStable automatically upgrade the cluster to the latest supported patch release on minor version N-1, where + // N is the latest supported minor version. For example, if a cluster is running version 1.17.7 and versions 1.17.9, 1.18.4, + // 1.18.6, and 1.19.1 are available, your cluster is upgraded to 1.18.6. + UpgradeChannelStable UpgradeChannel = "stable" +) + +// NodeOSUpgradeChannel determines the manner in which the OS on your nodes is updated. The default is NodeImage. +type NodeOSUpgradeChannel string + +const ( + // NodeOSUpgradeChannelNodeImage channel instructs AKS to update the nodes with a newly patched VHD containing security fixes and bugfixes + // on a weekly cadence. With the VHD update machines will be rolling reimaged to that VHD following maintenance windows and + // surge settings. No extra VHD cost is incurred when choosing this option as AKS hosts the images. + NodeOSUpgradeChannelNodeImage NodeOSUpgradeChannel = "NodeImage" + // NodeOSUpgradeChannelNone channel instructs AKS to not perform update on your machines OS, either by OS or by rolling VHDs. This means + // you are responsible for your security updates. + NodeOSUpgradeChannelNone NodeOSUpgradeChannel = "None" + // NodeOSUpgradeChannelUnmanaged channel instructs AKS to apply OS updates automatically through the OS built-in patching infrastructure. + // Newly scaled in machines will be unpatched initially and will be patched at some point by the OS's infrastructure. Behavior + // of this option depends on the OS in question. Ubuntu and Mariner apply security patches through unattended upgrade roughly + // once a day around 06:00 UTC. Windows does not apply security patches automatically and so for them this option is equivalent + // to None till further notice. + NodeOSUpgradeChannelUnmanaged NodeOSUpgradeChannel = "Unmanaged" +) + // ManagedControlPlaneOutboundType enumerates the values for the managed control plane OutboundType. type ManagedControlPlaneOutboundType string @@ -77,7 +121,7 @@ const ( // AzureManagedControlPlaneSpec defines the desired state of AzureManagedControlPlane. type AzureManagedControlPlaneSpec struct { // Version defines the desired Kubernetes version. - // +kubebuilder:validation:MinLength:=2 + // +kubebuilder:validation:MinLength=2 Version string `json:"version"` // ResourceGroupName is the name of the Azure resource group for this AKS Cluster. @@ -221,6 +265,23 @@ type AzureManagedControlPlaneSpec struct { // DisableLocalAccounts disables getting static credentials for this cluster when set. Expected to only be used for AAD clusters. // +optional DisableLocalAccounts *bool `json:"disableLocalAccounts,omitempty"` + + // AutoUpgradeProfile - Profile of auto upgrade configuration. + // +optional + AutoUpgradeProfile *ManagedClusterAutoUpgradeProfile `json:"autoUpgradeProfile,omitempty"` +} + +// ManagedClusterAutoUpgradeProfile - Auto upgrade profile for a managed cluster. +type ManagedClusterAutoUpgradeProfile struct { + // NodeOSUpgradeChannel is a manner in which the OS on your nodes is updated. The default is NodeImage. Possible values include: NodeImage,Unmanaged,None + // +kubebuilder:validation:Enum=NodeImage;Unmanaged;None + // +optional + NodeOSUpgradeChannel *NodeOSUpgradeChannel `json:"nodeOSUpgradeChannel,omitempty"` + + // UpgradeChannel upgrade channel for auto upgrade. Possible values include: 'node-image','none','patch','rapid','stable' + // +kubebuilder:validation:Enum=node-image;none;patch;rapid;stable + // +optional + UpgradeChannel *UpgradeChannel `json:"upgradeChannel,omitempty"` } // HTTPProxyConfig is the HTTP proxy configuration for the cluster. @@ -364,6 +425,11 @@ type ManagedControlPlaneSubnet struct { // AzureManagedControlPlaneStatus defines the observed state of AzureManagedControlPlane. type AzureManagedControlPlaneStatus struct { + // AutoUpgradeVersion is the Kubernetes version populated after autoupgrade based on the upgrade channel. + // +kubebuilder:validation:MinLength=2 + // +optional + AutoUpgradeVersion string `json:"autoUpgradeVersion,omitempty"` + // Ready is true when the provider resource is ready. // +optional Ready bool `json:"ready,omitempty"` diff --git a/api/v1beta1/azuremanagedcontrolplane_webhook.go b/api/v1beta1/azuremanagedcontrolplane_webhook.go index 5637c32e3a3e..bbe203fff97a 100644 --- a/api/v1beta1/azuremanagedcontrolplane_webhook.go +++ b/api/v1beta1/azuremanagedcontrolplane_webhook.go @@ -27,12 +27,14 @@ import ( "strings" "time" + semverv4 "github.com/blang/semver" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api-provider-azure/feature" + "sigs.k8s.io/cluster-api-provider-azure/util/versions" webhookutils "sigs.k8s.io/cluster-api-provider-azure/util/webhook" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" capifeature "sigs.k8s.io/cluster-api/feature" @@ -256,6 +258,10 @@ func (mw *azureManagedControlPlaneWebhook) ValidateUpdate(ctx context.Context, o allErrs = append(allErrs, errs...) } + if errs := m.validateAutoUpgradeProfile(old); len(errs) > 0 { + allErrs = append(allErrs, errs...) + } + if errs := m.validateOIDCIssuerProfileUpdate(old); len(errs) > 0 { allErrs = append(allErrs, errs...) } @@ -332,6 +338,10 @@ func (m *AzureManagedControlPlane) validateVersion(_ client.Client) error { return errors.New("must be a valid semantic version") } + if _, err := semverv4.ParseTolerant(m.Spec.Version); err != nil { + return errors.Join(err, errors.New("must be a valid semantic version")) + } + return nil } @@ -492,6 +502,35 @@ func (m *AzureManagedControlPlane) validateManagedClusterNetwork(cli client.Clie return nil } +// validateAutoUpgradeProfile validates auto upgrade profile. +func (m *AzureManagedControlPlane) validateAutoUpgradeProfile(old *AzureManagedControlPlane) field.ErrorList { + var allErrs field.ErrorList + if old.Spec.AutoUpgradeProfile != nil && m.Spec.AutoUpgradeProfile == nil { + // Prevent AutoUpgradeProfile to be set to nil. + // Unsetting the field is not allowed. + allErrs = append(allErrs, + field.Invalid( + field.NewPath("Spec", "AutoUpgradeProfile"), + m.Spec.AutoUpgradeProfile, + "field cannot be set to nil, to disable auto upgrades set the channel to none.")) + } + + if hv := versions.GetHigherK8sVersion(m.Spec.Version, old.Spec.Version); hv != m.Spec.Version { + allErrs = append(allErrs, field.Invalid(field.NewPath("Spec", "Version"), + m.Spec.Version, "field version cannot be downgraded"), + ) + } + + if old.Status.AutoUpgradeVersion != "" && m.Spec.Version != old.Spec.Version { + if hv := versions.GetHigherK8sVersion(m.Spec.Version, old.Status.AutoUpgradeVersion); hv != m.Spec.Version { + allErrs = append(allErrs, field.Invalid(field.NewPath("Spec", "Version"), + m.Spec.Version, "version is auto-upgraded to "+old.Status.AutoUpgradeVersion+",cannot be downgraded"), + ) + } + } + return allErrs +} + // validateAPIServerAccessProfileUpdate validates update to APIServerAccessProfile. func (m *AzureManagedControlPlane) validateAPIServerAccessProfileUpdate(old *AzureManagedControlPlane) field.ErrorList { var allErrs field.ErrorList diff --git a/api/v1beta1/azuremanagedcontrolplane_webhook_test.go b/api/v1beta1/azuremanagedcontrolplane_webhook_test.go index 904780e3e3df..7d5ba4a9cb51 100644 --- a/api/v1beta1/azuremanagedcontrolplane_webhook_test.go +++ b/api/v1beta1/azuremanagedcontrolplane_webhook_test.go @@ -1037,6 +1037,117 @@ func TestAzureManagedControlPlane_ValidateUpdate(t *testing.T) { amcp: createAzureManagedControlPlane("192.168.0.10", "1.999.9", generateSSHPublicKey(true)), wantErr: true, }, + { + name: "AzureManagedControlPlane invalid version downgrade change", + oldAMCP: &AzureManagedControlPlane{ + Spec: AzureManagedControlPlaneSpec{ + DNSServiceIP: ptr.To("192.168.0.0"), + Version: "v1.18.0", + }, + }, + amcp: &AzureManagedControlPlane{ + Spec: AzureManagedControlPlaneSpec{ + DNSServiceIP: ptr.To("192.168.0.0"), + Version: "v1.17.0", + }, + }, + wantErr: true, + }, + { + name: "AzureManagedControlPlane invalid version downgrade change", + oldAMCP: &AzureManagedControlPlane{ + Spec: AzureManagedControlPlaneSpec{ + DNSServiceIP: ptr.To("192.168.0.0"), + Version: "v1.18.0", + }, + Status: AzureManagedControlPlaneStatus{ + AutoUpgradeVersion: "v1.18.3", + }, + }, + amcp: &AzureManagedControlPlane{ + Spec: AzureManagedControlPlaneSpec{ + DNSServiceIP: ptr.To("192.168.0.0"), + Version: "v1.18.1", + }, + }, + wantErr: true, + }, + { + name: "AzureManagedControlPlane invalid version downgrade change", + oldAMCP: &AzureManagedControlPlane{ + Spec: AzureManagedControlPlaneSpec{ + DNSServiceIP: ptr.To("192.168.0.0"), + Version: "v1.18.0", + }, + Status: AzureManagedControlPlaneStatus{ + AutoUpgradeVersion: "1.19.3", + }, + }, + amcp: &AzureManagedControlPlane{ + Spec: AzureManagedControlPlaneSpec{ + DNSServiceIP: ptr.To("192.168.0.0"), + Version: "v1.18.6", + }, + }, + wantErr: true, + }, + { + name: "AzureManagedControlPlane no version change", + oldAMCP: &AzureManagedControlPlane{ + Spec: AzureManagedControlPlaneSpec{ + DNSServiceIP: ptr.To("192.168.0.0"), + Version: "v1.18.0", + }, + Status: AzureManagedControlPlaneStatus{ + AutoUpgradeVersion: "1.19.3", + }, + }, + amcp: &AzureManagedControlPlane{ + Spec: AzureManagedControlPlaneSpec{ + DNSServiceIP: ptr.To("192.168.0.0"), + Version: "v1.18.0", + }, + }, + wantErr: false, + }, + { + name: "AzureManagedControlPlane valid version upgrade change", + oldAMCP: &AzureManagedControlPlane{ + Spec: AzureManagedControlPlaneSpec{ + DNSServiceIP: ptr.To("192.168.0.0"), + Version: "v1.18.0", + }, + Status: AzureManagedControlPlaneStatus{ + AutoUpgradeVersion: "1.19.3", + }, + }, + amcp: &AzureManagedControlPlane{ + Spec: AzureManagedControlPlaneSpec{ + DNSServiceIP: ptr.To("192.168.0.0"), + Version: "v1.19.5", + }, + }, + wantErr: false, + }, + { + name: "AzureManagedControlPlane valid version change", + oldAMCP: &AzureManagedControlPlane{ + Spec: AzureManagedControlPlaneSpec{ + DNSServiceIP: ptr.To("192.168.0.0"), + Version: "v1.18.0", + }, + Status: AzureManagedControlPlaneStatus{ + AutoUpgradeVersion: "1.19.3", + }, + }, + amcp: &AzureManagedControlPlane{ + Spec: AzureManagedControlPlaneSpec{ + DNSServiceIP: ptr.To("192.168.0.0"), + Version: "v1.19.3", + }, + }, + wantErr: false, + }, { name: "AzureManagedControlPlane SubscriptionID is immutable", oldAMCP: &AzureManagedControlPlane{ diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 6e83abe36b1b..a665defeeca8 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -1257,6 +1257,11 @@ func (in *AzureManagedControlPlaneSpec) DeepCopyInto(out *AzureManagedControlPla *out = new(bool) **out = **in } + if in.AutoUpgradeProfile != nil { + in, out := &in.AutoUpgradeProfile, &out.AutoUpgradeProfile + *out = new(ManagedClusterAutoUpgradeProfile) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureManagedControlPlaneSpec. @@ -2196,6 +2201,31 @@ func (in *LoadBalancerSpec) DeepCopy() *LoadBalancerSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedClusterAutoUpgradeProfile) DeepCopyInto(out *ManagedClusterAutoUpgradeProfile) { + *out = *in + if in.NodeOSUpgradeChannel != nil { + in, out := &in.NodeOSUpgradeChannel, &out.NodeOSUpgradeChannel + *out = new(NodeOSUpgradeChannel) + **out = **in + } + if in.UpgradeChannel != nil { + in, out := &in.UpgradeChannel, &out.UpgradeChannel + *out = new(UpgradeChannel) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedClusterAutoUpgradeProfile. +func (in *ManagedClusterAutoUpgradeProfile) DeepCopy() *ManagedClusterAutoUpgradeProfile { + if in == nil { + return nil + } + out := new(ManagedClusterAutoUpgradeProfile) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManagedControlPlaneSubnet) DeepCopyInto(out *ManagedControlPlaneSubnet) { *out = *in diff --git a/azure/scope/managedcontrolplane.go b/azure/scope/managedcontrolplane.go index c52a4c5c9e91..c350e7192b8d 100644 --- a/azure/scope/managedcontrolplane.go +++ b/azure/scope/managedcontrolplane.go @@ -478,6 +478,23 @@ func (s *ManagedControlPlaneScope) IsAADEnabled() bool { return false } +// SetAutoUpgradeVersionStatus sets the auto upgrade version in status. +func (s *ManagedControlPlaneScope) SetAutoUpgradeVersionStatus(version string) { + s.ControlPlane.Status.AutoUpgradeVersion = version +} + +// IsManagedVersionUpgrade checks if version is auto managed by AKS. +func (s *ManagedControlPlaneScope) IsManagedVersionUpgrade() bool { + return isManagedVersionUpgrade(s.ControlPlane) +} + +func isManagedVersionUpgrade(managedControlPlane *infrav1.AzureManagedControlPlane) bool { + return managedControlPlane.Spec.AutoUpgradeProfile != nil && + managedControlPlane.Spec.AutoUpgradeProfile.UpgradeChannel != nil && + (*managedControlPlane.Spec.AutoUpgradeProfile.UpgradeChannel != infrav1.UpgradeChannelNone && + *managedControlPlane.Spec.AutoUpgradeProfile.UpgradeChannel != infrav1.UpgradeChannelNodeImage) +} + // ManagedClusterSpec returns the managed cluster spec. func (s *ManagedControlPlaneScope) ManagedClusterSpec() azure.ResourceSpecGetter { managedClusterSpec := managedclusters.ManagedClusterSpec{ @@ -609,6 +626,16 @@ func (s *ManagedControlPlaneScope) ManagedClusterSpec() azure.ResourceSpecGetter } } + if s.ControlPlane.Spec.AutoUpgradeProfile != nil { + managedClusterSpec.AutoUpgradeProfile = &managedclusters.ManagedClusterAutoUpgradeProfile{} + if s.ControlPlane.Spec.AutoUpgradeProfile.UpgradeChannel != nil { + managedClusterSpec.AutoUpgradeProfile.UpgradeChannel = s.ControlPlane.Spec.AutoUpgradeProfile.UpgradeChannel + } + if s.ControlPlane.Spec.AutoUpgradeProfile.NodeOSUpgradeChannel != nil { + managedClusterSpec.AutoUpgradeProfile.NodeOSUpgradeChannel = s.ControlPlane.Spec.AutoUpgradeProfile.NodeOSUpgradeChannel + } + } + return &managedClusterSpec } diff --git a/azure/scope/managedcontrolplane_test.go b/azure/scope/managedcontrolplane_test.go index 48f5668a3b6d..f91ca0654765 100644 --- a/azure/scope/managedcontrolplane_test.go +++ b/azure/scope/managedcontrolplane_test.go @@ -1038,3 +1038,95 @@ func TestAreLocalAccountsDisabled(t *testing.T) { }) } } + +func TestManagedControlPlaneScope_AutoUpgradeProfile(t *testing.T) { + scheme := runtime.NewScheme() + _ = clusterv1.AddToScheme(scheme) + _ = infrav1.AddToScheme(scheme) + + cases := []struct { + Name string + Input ManagedControlPlaneScopeParams + Expected *managedclusters.ManagedClusterAutoUpgradeProfile + }{ + { + Name: "Without AutoUpgradeProfile", + Input: ManagedControlPlaneScopeParams{ + AzureClients: AzureClients{ + Authorizer: autorest.NullAuthorizer{}, + }, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: "default", + }, + }, + ControlPlane: &infrav1.AzureManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: "default", + }, + Spec: infrav1.AzureManagedControlPlaneSpec{ + SubscriptionID: "00000000-0000-0000-0000-000000000000", + }, + }, + ManagedMachinePools: []ManagedMachinePool{ + { + MachinePool: getMachinePool("pool0"), + InfraMachinePool: getAzureMachinePool("pool0", infrav1.NodePoolModeSystem), + }, + }, + }, + Expected: nil, + }, + { + Name: "With AutoUpgradeProfile UpgradeChannelNodeImage", + Input: ManagedControlPlaneScopeParams{ + AzureClients: AzureClients{ + Authorizer: autorest.NullAuthorizer{}, + }, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: "default", + }, + }, + ControlPlane: &infrav1.AzureManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: "default", + }, + Spec: infrav1.AzureManagedControlPlaneSpec{ + SubscriptionID: "00000000-0000-0000-0000-000000000000", + AutoUpgradeProfile: &infrav1.ManagedClusterAutoUpgradeProfile{ + UpgradeChannel: ptr.To(infrav1.UpgradeChannelNodeImage), + }, + }, + }, + ManagedMachinePools: []ManagedMachinePool{ + { + MachinePool: getMachinePool("pool0"), + InfraMachinePool: getAzureMachinePool("pool0", infrav1.NodePoolModeSystem), + }, + }, + }, + Expected: &managedclusters.ManagedClusterAutoUpgradeProfile{ + UpgradeChannel: ptr.To(infrav1.UpgradeChannelNodeImage), + }, + }, + } + for _, c := range cases { + c := c + t.Run(c.Name, func(t *testing.T) { + g := NewWithT(t) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(c.Input.ControlPlane).Build() + c.Input.Client = fakeClient + s, err := NewManagedControlPlaneScope(context.TODO(), c.Input) + g.Expect(err).To(Succeed()) + managedClusterGetter := s.ManagedClusterSpec() + managedCluster, ok := managedClusterGetter.(*managedclusters.ManagedClusterSpec) + g.Expect(ok).To(BeTrue()) + g.Expect(managedCluster.AutoUpgradeProfile).To(Equal(c.Expected)) + }) + } +} diff --git a/azure/scope/managedmachinepool.go b/azure/scope/managedmachinepool.go index 2208db235a38..0645a0f01d74 100644 --- a/azure/scope/managedmachinepool.go +++ b/azure/scope/managedmachinepool.go @@ -29,6 +29,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/util/futures" "sigs.k8s.io/cluster-api-provider-azure/util/maps" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + "sigs.k8s.io/cluster-api-provider-azure/util/versions" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" @@ -155,11 +156,7 @@ func buildAgentPoolSpec(managedControlPlane *infrav1.AzureManagedControlPlane, machinePool *expv1.MachinePool, managedMachinePool *infrav1.AzureManagedMachinePool, agentPoolAnnotations map[string]string) azure.ResourceSpecGetter { - var normalizedVersion *string - if machinePool.Spec.Template.Spec.Version != nil { - v := strings.TrimPrefix(*machinePool.Spec.Template.Spec.Version, "v") - normalizedVersion = &v - } + normalizedVersion := getManagedMachinePoolVersion(managedControlPlane, machinePool) replicas := int32(1) if machinePool.Spec.Replicas != nil { @@ -341,3 +338,24 @@ func (s *ManagedMachinePoolScope) GetCAPIMachinePoolAnnotation(key string) (succ val, ok := s.MachinePool.Annotations[key] return ok, val } + +// IsManagedAutoUpgrade checks if version is auto managed by AKS. +func (s *ManagedMachinePoolScope) IsManagedAutoUpgrade() bool { + return isManagedVersionUpgrade(s.ControlPlane) +} + +func getManagedMachinePoolVersion(managedControlPlane *infrav1.AzureManagedControlPlane, machinePool *expv1.MachinePool) *string { + var v, av string + if machinePool != nil { + v = ptr.Deref(machinePool.Spec.Template.Spec.Version, "") + } + if managedControlPlane != nil { + av = managedControlPlane.Status.AutoUpgradeVersion + } + higherVersion := versions.GetHigherK8sVersion(v, av) + if higherVersion == "" { + // When both mp.Version and mcp.Status.AutoUpgradeVersion are not set we return nil + return nil + } + return ptr.To(strings.TrimPrefix(higherVersion, "v")) +} diff --git a/azure/scope/managedmachinepool_test.go b/azure/scope/managedmachinepool_test.go index 59f912feda23..3f2ca8e48f9e 100644 --- a/azure/scope/managedmachinepool_test.go +++ b/azure/scope/managedmachinepool_test.go @@ -860,6 +860,122 @@ func TestManagedMachinePoolScope_KubeletDiskType(t *testing.T) { } } +func Test_getManagedMachinePoolVersion(t *testing.T) { + cases := []struct { + Name string + managedControlPlane *infrav1.AzureManagedControlPlane + machinePool *expv1.MachinePool + Expected *string + }{ + { + Name: "Empty configs", + managedControlPlane: nil, + machinePool: nil, + Expected: nil, + }, + { + Name: "Empty mp", + managedControlPlane: &infrav1.AzureManagedControlPlane{}, + machinePool: nil, + Expected: nil, + }, + { + Name: "Only machine pool is available", + managedControlPlane: nil, + machinePool: &expv1.MachinePool{ + Spec: expv1.MachinePoolSpec{ + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + Version: ptr.To("v1.15.0"), + }, + }, + }, + }, + Expected: ptr.To("1.15.0"), + }, + { + Name: "Only machine pool is available and cp is nil", + managedControlPlane: nil, + machinePool: &expv1.MachinePool{ + Spec: expv1.MachinePoolSpec{ + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + Version: ptr.To("v1.15.0"), + }, + }, + }, + }, + Expected: ptr.To("1.15.0"), + }, + { + Name: "mcp.status.autoUpgradeVersion > mp.spec.template.spec.version", + managedControlPlane: &infrav1.AzureManagedControlPlane{ + Status: infrav1.AzureManagedControlPlaneStatus{ + AutoUpgradeVersion: "1.20.3", + }, + }, + machinePool: &expv1.MachinePool{ + Spec: expv1.MachinePoolSpec{ + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + Version: ptr.To("v1.15.0"), + }, + }, + }, + }, + Expected: ptr.To("1.20.3"), + }, + { + Name: "suffix + mcp.status.autoUpgradeVersion > mp.spec.template.spec.version", + managedControlPlane: &infrav1.AzureManagedControlPlane{ + Status: infrav1.AzureManagedControlPlaneStatus{ + AutoUpgradeVersion: "v1.20.3", + }, + }, + machinePool: &expv1.MachinePool{ + Spec: expv1.MachinePoolSpec{ + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + Version: ptr.To("v1.15.0"), + }, + }, + }, + }, + Expected: ptr.To("1.20.3"), + }, + { + Name: "mcp.status.autoUpgradeVersion < mp.spec.template.spec.version", + managedControlPlane: &infrav1.AzureManagedControlPlane{ + Status: infrav1.AzureManagedControlPlaneStatus{ + AutoUpgradeVersion: "v1.20.3", + }, + }, + machinePool: &expv1.MachinePool{ + Spec: expv1.MachinePoolSpec{ + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + Version: ptr.To("v1.21.0"), + }, + }, + }, + }, + Expected: ptr.To("1.21.0"), + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + g := NewWithT(t) + v := getManagedMachinePoolVersion(c.managedControlPlane, c.machinePool) + if c.Expected != nil { + g.Expect(*v).To(Equal(*c.Expected)) + } else { + g.Expect(v).To(Equal(c.Expected)) + } + }) + } +} + func getAzureMachinePool(name string, mode infrav1.NodePoolMode) *infrav1.AzureManagedMachinePool { return &infrav1.AzureManagedMachinePool{ ObjectMeta: metav1.ObjectMeta{ diff --git a/azure/services/agentpools/spec.go b/azure/services/agentpools/spec.go index b4a3b3c5f25d..619f96d60a11 100644 --- a/azure/services/agentpools/spec.go +++ b/azure/services/agentpools/spec.go @@ -31,6 +31,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/azure/converters" azureutil "sigs.k8s.io/cluster-api-provider-azure/util/azure" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + "sigs.k8s.io/cluster-api-provider-azure/util/versions" ) // KubeletConfig defines the set of kubelet configurations for nodes in pools. @@ -172,6 +173,25 @@ func (s *AgentPoolSpec) CustomHeaders() map[string]string { return s.Headers } +// GetManagedMachinePoolVersion gets the desired managed k8s version. +// If autoupgrade channels is set to patch, stable or rapid, clusters can be upgraded to higher version by AKS. +// If autoupgrade is triggered, existing kubernetes version will be higher than the user desired kubernetes version. +// CAPZ should honour the upgrade and it should not downgrade to the lower desired version. +func (s *AgentPoolSpec) GetManagedMachinePoolVersion(existing interface{}) (*string, error) { + version := s.Version + if existing != nil && version != nil { + existingPool, ok := existing.(armcontainerservice.AgentPool) + if !ok { + return version, fmt.Errorf("%T is not a containerservice.ManagedCluster", existing) + } + v := versions.GetHigherK8sVersion( + *version, + ptr.Deref(existingPool.Properties.OrchestratorVersion, *version)) + version = ptr.To(v) + } + return version, nil +} + // Parameters returns the parameters for the agent pool. func (s *AgentPoolSpec) Parameters(ctx context.Context, existing interface{}) (params interface{}, err error) { _, log, done := tele.StartSpanWithLogger(ctx, "agentpools.Service.Parameters") @@ -224,6 +244,13 @@ func (s *AgentPoolSpec) Parameters(ctx context.Context, existing interface{}) (p Tags: converters.TagsToMap(s.AdditionalTags), }, } + + if kubernetesVersion, err := s.GetManagedMachinePoolVersion(existing); err != nil { + return nil, err + } else { + normalizedProfile.Properties.OrchestratorVersion = kubernetesVersion + } + if len(normalizedProfile.Properties.NodeTaints) == 0 { normalizedProfile.Properties.NodeTaints = nil } diff --git a/azure/services/agentpools/spec_test.go b/azure/services/agentpools/spec_test.go index 68d53d016c5a..7c9cafa6665a 100644 --- a/azure/services/agentpools/spec_test.go +++ b/azure/services/agentpools/spec_test.go @@ -53,7 +53,7 @@ func fakeAgentPool(changes ...func(*AgentPoolSpec)) AgentPoolSpec { OSType: ptr.To("fake-os-type"), Replicas: 1, SKU: "fake-sku", - Version: ptr.To("fake-version"), + Version: ptr.To("1.25.11"), VnetSubnetID: "fake-vnet-subnet-id", Headers: map[string]string{"fake-header": "fake-value"}, AdditionalTags: infrav1.Tags{"fake": "tag"}, @@ -98,7 +98,7 @@ func sdkFakeAgentPool(changes ...func(*armcontainerservice.AgentPool)) armcontai Mode: ptr.To(armcontainerservice.AgentPoolMode("fake-mode")), // updates if changed NodeLabels: map[string]*string{"fake-label": ptr.To("fake-value")}, // updates if changed NodeTaints: []*string{ptr.To("fake-taint")}, // updates if changed - OrchestratorVersion: ptr.To("fake-version"), // updates if changed + OrchestratorVersion: ptr.To("1.25.11"), // updates if changed OSDiskSizeGB: ptr.To[int32](2), OSDiskType: ptr.To(armcontainerservice.OSDiskType("fake-os-disk-type")), OSType: ptr.To(armcontainerservice.OSType("fake-os-type")), @@ -495,6 +495,64 @@ func TestParameters(t *testing.T) { expected: nil, expectedError: nil, }, + { + name: "existing kubernetes version is nil", + spec: fakeAgentPool(), + existing: sdkFakeAgentPool( + func(ap *armcontainerservice.AgentPool) { + ap.Properties.OrchestratorVersion = nil + }, + sdkWithProvisioningState("Succeeded"), + ), + expected: sdkFakeAgentPool( + func(ap *armcontainerservice.AgentPool) { + ap.Properties.OrchestratorVersion = ptr.To("1.25.11") + }, + ), + expectedError: nil, + }, + { + name: "existing kubernetes version is higher", + spec: fakeAgentPool(), + existing: sdkFakeAgentPool( + func(ap *armcontainerservice.AgentPool) { + ap.Properties.OrchestratorVersion = ptr.To("1.26.6") + }, + sdkWithProvisioningState("Succeeded"), + ), + expected: nil, + expectedError: nil, + }, + { + name: "desired kubernetes version is higher", + spec: fakeAgentPool( + func(aps *AgentPoolSpec) { + aps.Version = ptr.To("1.27.3") + }, + ), + existing: sdkFakeAgentPool( + sdkWithProvisioningState("Succeeded"), + ), + expected: sdkFakeAgentPool( + func(ap *armcontainerservice.AgentPool) { + ap.Properties.OrchestratorVersion = ptr.To("1.27.3") + }, + ), + expectedError: nil, + }, + { + name: "kubernetes version did not change", + spec: fakeAgentPool( + func(aps *AgentPoolSpec) { + aps.Version = ptr.To("1.25.11") + }, + ), + existing: sdkFakeAgentPool( + sdkWithProvisioningState("Succeeded"), + ), + expected: nil, + expectedError: nil, + }, } for _, tc := range testcases { tc := tc diff --git a/azure/services/managedclusters/managedclusters.go b/azure/services/managedclusters/managedclusters.go index c66c8989a4ed..7dfb89725ec9 100644 --- a/azure/services/managedclusters/managedclusters.go +++ b/azure/services/managedclusters/managedclusters.go @@ -18,6 +18,7 @@ package managedclusters import ( "context" + "fmt" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4" "github.com/pkg/errors" @@ -57,6 +58,8 @@ type ManagedClusterScope interface { IsAADEnabled() bool AreLocalAccountsDisabled() bool SetOIDCIssuerProfileStatus(*infrav1.OIDCIssuerProfileStatus) + SetAutoUpgradeVersionStatus(version string) + IsManagedVersionUpgrade() bool } // Service provides operations on azure resources. @@ -133,6 +136,11 @@ func (s *Service) Reconcile(ctx context.Context) error { IssuerURL: managedCluster.Properties.OidcIssuerProfile.IssuerURL, }) } + + if s.Scope.IsManagedVersionUpgrade() && managedCluster.Properties.KubernetesVersion != nil { + kubernetesVersion := fmt.Sprintf("v%s", *managedCluster.Properties.KubernetesVersion) + s.Scope.SetAutoUpgradeVersionStatus(kubernetesVersion) + } } s.Scope.UpdatePutStatus(infrav1.ManagedClusterRunningCondition, serviceName, resultErr) return resultErr @@ -164,11 +172,11 @@ func (s *Service) IsManaged(ctx context.Context) (bool, error) { // ReconcileKubeconfig will reconcile admin kubeconfig and user kubeconfig. /* Returns the admin kubeconfig and user kubeconfig - If aad is enabled a user kubeconfig will also get generated and stored in the secret -kubeconfig-user - If we disable local accounts for aad clusters we do not have access to admin kubeconfig, hence we need to create + If AAD is enabled a user kubeconfig will also get generated and stored in the secret -kubeconfig-user + If we disable local accounts for AAD clusters we do not have access to admin kubeconfig, hence we need to create the admin kubeconfig by authenticating with the user credentials and retrieving the token for kubeconfig. The token is used to create the admin kubeconfig. - The user needs to ensure to provide service principle with admin aad privileges. + The user needs to ensure to provide service principal with admin AAD privileges. */ func (s *Service) ReconcileKubeconfig(ctx context.Context, managedClusterSpec azure.ResourceSpecGetter) (userKubeConfigData []byte, adminKubeConfigData []byte, err error) { if s.Scope.IsAADEnabled() { diff --git a/azure/services/managedclusters/managedclusters_test.go b/azure/services/managedclusters/managedclusters_test.go index 25eb36badfb2..ea5226c2b83b 100644 --- a/azure/services/managedclusters/managedclusters_test.go +++ b/azure/services/managedclusters/managedclusters_test.go @@ -99,6 +99,7 @@ func TestReconcile(t *testing.T) { s.SetOIDCIssuerProfileStatus(&infrav1.OIDCIssuerProfileStatus{ IssuerURL: ptr.To("oidc issuer url"), }) + s.IsManagedVersionUpgrade().Return(false) s.UpdatePutStatus(infrav1.ManagedClusterRunningCondition, serviceName, nil) }, }, @@ -137,6 +138,48 @@ func TestReconcile(t *testing.T) { s.SetOIDCIssuerProfileStatus(&infrav1.OIDCIssuerProfileStatus{ IssuerURL: ptr.To("oidc issuer url"), }) + s.IsManagedVersionUpgrade().Return(false) + s.UpdatePutStatus(infrav1.ManagedClusterRunningCondition, serviceName, nil) + }, + }, + { + name: "create managed cluster succeeds, update autoupgrade status", + expectedError: "", + expect: func(m *mock_managedclusters.MockCredentialGetterMockRecorder, s *mock_managedclusters.MockManagedClusterScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { + var userKubeConfigData []byte + s.ManagedClusterSpec().Return(fakeManagedClusterSpec) + r.CreateOrUpdateResource(gomockinternal.AContext(), fakeManagedClusterSpec, serviceName).Return(armcontainerservice.ManagedCluster{ + Properties: &armcontainerservice.ManagedClusterProperties{ + Fqdn: ptr.To("my-managedcluster-fqdn"), + ProvisioningState: ptr.To("Succeeded"), + KubernetesVersion: ptr.To("1.27.3"), + IdentityProfile: map[string]*armcontainerservice.UserAssignedIdentity{ + kubeletIdentityKey: { + ResourceID: ptr.To("kubelet-id"), + }, + }, + OidcIssuerProfile: &armcontainerservice.ManagedClusterOIDCIssuerProfile{ + Enabled: ptr.To(true), + IssuerURL: ptr.To("oidc issuer url"), + }, + }, + }, nil) + s.SetControlPlaneEndpoint(clusterv1.APIEndpoint{ + Host: "my-managedcluster-fqdn", + Port: 443, + }) + s.IsAADEnabled().Return(false) + s.AreLocalAccountsDisabled().Return(false) + m.GetCredentials(gomockinternal.AContext(), "my-rg", "my-managedcluster").Return([]byte("credentials"), nil) + s.SetAdminKubeconfigData([]byte("credentials")) + s.SetUserKubeconfigData(userKubeConfigData) + s.SetKubeletIdentity("kubelet-id") + s.SetOIDCIssuerProfileStatus(nil) + s.SetOIDCIssuerProfileStatus(&infrav1.OIDCIssuerProfileStatus{ + IssuerURL: ptr.To("oidc issuer url"), + }) + s.IsManagedVersionUpgrade().Return(true) + s.SetAutoUpgradeVersionStatus("v1.27.3") s.UpdatePutStatus(infrav1.ManagedClusterRunningCondition, serviceName, nil) }, }, diff --git a/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go b/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go index 671e2b18aad3..24943e786ee6 100644 --- a/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go +++ b/azure/services/managedclusters/mock_managedclusters/managedclusters_mock.go @@ -210,6 +210,20 @@ func (mr *MockManagedClusterScopeMockRecorder) IsAADEnabled() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsAADEnabled", reflect.TypeOf((*MockManagedClusterScope)(nil).IsAADEnabled)) } +// IsManagedVersionUpgrade mocks base method. +func (m *MockManagedClusterScope) IsManagedVersionUpgrade() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsManagedVersionUpgrade") + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsManagedVersionUpgrade indicates an expected call of IsManagedVersionUpgrade. +func (mr *MockManagedClusterScopeMockRecorder) IsManagedVersionUpgrade() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsManagedVersionUpgrade", reflect.TypeOf((*MockManagedClusterScope)(nil).IsManagedVersionUpgrade)) +} + // MakeEmptyKubeConfigSecret mocks base method. func (m *MockManagedClusterScope) MakeEmptyKubeConfigSecret() v1.Secret { m.ctrl.T.Helper() @@ -250,6 +264,18 @@ func (mr *MockManagedClusterScopeMockRecorder) SetAdminKubeconfigData(arg0 any) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAdminKubeconfigData", reflect.TypeOf((*MockManagedClusterScope)(nil).SetAdminKubeconfigData), arg0) } +// SetAutoUpgradeVersionStatus mocks base method. +func (m *MockManagedClusterScope) SetAutoUpgradeVersionStatus(version string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetAutoUpgradeVersionStatus", version) +} + +// SetAutoUpgradeVersionStatus indicates an expected call of SetAutoUpgradeVersionStatus. +func (mr *MockManagedClusterScopeMockRecorder) SetAutoUpgradeVersionStatus(version any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAutoUpgradeVersionStatus", reflect.TypeOf((*MockManagedClusterScope)(nil).SetAutoUpgradeVersionStatus), version) +} + // SetControlPlaneEndpoint mocks base method. func (m *MockManagedClusterScope) SetControlPlaneEndpoint(arg0 v1beta10.APIEndpoint) { m.ctrl.T.Helper() diff --git a/azure/services/managedclusters/spec.go b/azure/services/managedclusters/spec.go index ac723ee0e721..7c99f011d5cc 100644 --- a/azure/services/managedclusters/spec.go +++ b/azure/services/managedclusters/spec.go @@ -24,6 +24,7 @@ import ( "reflect" "time" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4" "github.com/google/go-cmp/cmp" "github.com/pkg/errors" @@ -32,6 +33,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/converters" "sigs.k8s.io/cluster-api-provider-azure/util/tele" + "sigs.k8s.io/cluster-api-provider-azure/util/versions" ) // ManagedClusterSpec contains properties to create a managed cluster. @@ -125,8 +127,20 @@ type ManagedClusterSpec struct { // DNSPrefix allows the user to customize dns prefix. DNSPrefix *string + // DisableLocalAccounts disables getting static credentials for this cluster when set. Expected to only be used for AAD clusters. DisableLocalAccounts *bool + + // AutoUpgradeProfile - Profile of auto upgrade configuration. + AutoUpgradeProfile *ManagedClusterAutoUpgradeProfile +} + +// ManagedClusterAutoUpgradeProfile auto upgrade profile for a managed cluster. +type ManagedClusterAutoUpgradeProfile struct { + // NodeOSUpgradeChannel is a manner in which the OS on your nodes is updated. The default is NodeImage. Possible values include: NodeImage,Unmanaged,None + NodeOSUpgradeChannel *infrav1.NodeOSUpgradeChannel + // UpgradeChannel - upgrade channel for auto upgrade. Possible values include: 'UpgradeChannelRapid', 'UpgradeChannelStable', 'UpgradeChannelPatch', 'UpgradeChannelNodeImage', 'UpgradeChannelNone' + UpgradeChannel *infrav1.UpgradeChannel } // HTTPProxyConfig is the HTTP proxy configuration for the cluster. @@ -301,6 +315,24 @@ func buildAutoScalerProfile(autoScalerProfile *AutoScalerProfile) *armcontainers return mcAutoScalerProfile } +// GetManagedClusterVersion gets the desired managed k8s version. +// If autoupgrade channels is set to patch, stable or rapid, clusters can be upgraded to higher version by AKS. +// If autoupgrade is triggered, existing kubernetes version will be higher than the user desired kubernetes version. +// CAPZ should honour the upgrade and it should not downgrade to the lower desired version. +func (s *ManagedClusterSpec) GetManagedClusterVersion(existing interface{}) (string, error) { + version := s.Version + if existing != nil && version != "" { + existingMC, ok := existing.(armcontainerservice.ManagedCluster) + if !ok { + return version, fmt.Errorf("%T is not a containerservice.ManagedCluster", existing) + } + version = versions.GetHigherK8sVersion( + version, + ptr.Deref(existingMC.Properties.KubernetesVersion, version)) + } + return version, nil +} + // Parameters returns the parameters for the managed clusters. // //nolint:gocyclo // Function requires a lot of nil checks that raise complexity. @@ -332,8 +364,6 @@ func (s *ManagedClusterSpec) Parameters(ctx context.Context, existing interface{ NodeResourceGroup: &s.NodeResourceGroup, EnableRBAC: ptr.To(true), DNSPrefix: s.DNSPrefix, - KubernetesVersion: &s.Version, - ServicePrincipalProfile: &armcontainerservice.ManagedClusterServicePrincipalProfile{ ClientID: ptr.To("msi"), }, @@ -346,6 +376,12 @@ func (s *ManagedClusterSpec) Parameters(ctx context.Context, existing interface{ }, } + if kubernetesVersion, err := s.GetManagedClusterVersion(existing); err != nil { + return nil, err + } else { + managedCluster.Properties.KubernetesVersion = &kubernetesVersion + } + if decodedSSHPublicKey != nil { managedCluster.Properties.LinuxProfile = &armcontainerservice.LinuxProfile{ AdminUsername: ptr.To(azure.DefaultAKSUserName), @@ -474,6 +510,16 @@ func (s *ManagedClusterSpec) Parameters(ctx context.Context, existing interface{ } } + if s.AutoUpgradeProfile != nil { + managedCluster.Properties.AutoUpgradeProfile = &armcontainerservice.ManagedClusterAutoUpgradeProfile{} + if s.AutoUpgradeProfile.UpgradeChannel != nil { + managedCluster.Properties.AutoUpgradeProfile.UpgradeChannel = to.Ptr(armcontainerservice.UpgradeChannel(*s.AutoUpgradeProfile.UpgradeChannel)) + } + if s.AutoUpgradeProfile.NodeOSUpgradeChannel != nil { + managedCluster.Properties.AutoUpgradeProfile.NodeOSUpgradeChannel = to.Ptr(armcontainerservice.NodeOSUpgradeChannel(*s.AutoUpgradeProfile.NodeOSUpgradeChannel)) + } + } + if existing != nil { existingMC, ok := existing.(armcontainerservice.ManagedCluster) if !ok { @@ -632,47 +678,11 @@ func computeDiffOfNormalizedClusters(managedCluster armcontainerservice.ManagedC } if managedCluster.Properties.AutoScalerProfile != nil { - propertiesNormalized.AutoScalerProfile = &armcontainerservice.ManagedClusterPropertiesAutoScalerProfile{ - BalanceSimilarNodeGroups: managedCluster.Properties.AutoScalerProfile.BalanceSimilarNodeGroups, - Expander: managedCluster.Properties.AutoScalerProfile.Expander, - MaxEmptyBulkDelete: managedCluster.Properties.AutoScalerProfile.MaxEmptyBulkDelete, - MaxGracefulTerminationSec: managedCluster.Properties.AutoScalerProfile.MaxGracefulTerminationSec, - MaxNodeProvisionTime: managedCluster.Properties.AutoScalerProfile.MaxNodeProvisionTime, - MaxTotalUnreadyPercentage: managedCluster.Properties.AutoScalerProfile.MaxTotalUnreadyPercentage, - NewPodScaleUpDelay: managedCluster.Properties.AutoScalerProfile.NewPodScaleUpDelay, - OkTotalUnreadyCount: managedCluster.Properties.AutoScalerProfile.OkTotalUnreadyCount, - ScanInterval: managedCluster.Properties.AutoScalerProfile.ScanInterval, - ScaleDownDelayAfterAdd: managedCluster.Properties.AutoScalerProfile.ScaleDownDelayAfterAdd, - ScaleDownDelayAfterDelete: managedCluster.Properties.AutoScalerProfile.ScaleDownDelayAfterDelete, - ScaleDownDelayAfterFailure: managedCluster.Properties.AutoScalerProfile.ScaleDownDelayAfterFailure, - ScaleDownUnneededTime: managedCluster.Properties.AutoScalerProfile.ScaleDownUnneededTime, - ScaleDownUnreadyTime: managedCluster.Properties.AutoScalerProfile.ScaleDownUnreadyTime, - ScaleDownUtilizationThreshold: managedCluster.Properties.AutoScalerProfile.ScaleDownUtilizationThreshold, - SkipNodesWithLocalStorage: managedCluster.Properties.AutoScalerProfile.SkipNodesWithLocalStorage, - SkipNodesWithSystemPods: managedCluster.Properties.AutoScalerProfile.SkipNodesWithSystemPods, - } + propertiesNormalized.AutoScalerProfile = managedCluster.Properties.AutoScalerProfile } if existingMC.Properties.AutoScalerProfile != nil { - existingMCPropertiesNormalized.AutoScalerProfile = &armcontainerservice.ManagedClusterPropertiesAutoScalerProfile{ - BalanceSimilarNodeGroups: existingMC.Properties.AutoScalerProfile.BalanceSimilarNodeGroups, - Expander: existingMC.Properties.AutoScalerProfile.Expander, - MaxEmptyBulkDelete: existingMC.Properties.AutoScalerProfile.MaxEmptyBulkDelete, - MaxGracefulTerminationSec: existingMC.Properties.AutoScalerProfile.MaxGracefulTerminationSec, - MaxNodeProvisionTime: existingMC.Properties.AutoScalerProfile.MaxNodeProvisionTime, - MaxTotalUnreadyPercentage: existingMC.Properties.AutoScalerProfile.MaxTotalUnreadyPercentage, - NewPodScaleUpDelay: existingMC.Properties.AutoScalerProfile.NewPodScaleUpDelay, - OkTotalUnreadyCount: existingMC.Properties.AutoScalerProfile.OkTotalUnreadyCount, - ScanInterval: existingMC.Properties.AutoScalerProfile.ScanInterval, - ScaleDownDelayAfterAdd: existingMC.Properties.AutoScalerProfile.ScaleDownDelayAfterAdd, - ScaleDownDelayAfterDelete: existingMC.Properties.AutoScalerProfile.ScaleDownDelayAfterDelete, - ScaleDownDelayAfterFailure: existingMC.Properties.AutoScalerProfile.ScaleDownDelayAfterFailure, - ScaleDownUnneededTime: existingMC.Properties.AutoScalerProfile.ScaleDownUnneededTime, - ScaleDownUnreadyTime: existingMC.Properties.AutoScalerProfile.ScaleDownUnreadyTime, - ScaleDownUtilizationThreshold: existingMC.Properties.AutoScalerProfile.ScaleDownUtilizationThreshold, - SkipNodesWithLocalStorage: existingMC.Properties.AutoScalerProfile.SkipNodesWithLocalStorage, - SkipNodesWithSystemPods: existingMC.Properties.AutoScalerProfile.SkipNodesWithSystemPods, - } + existingMCPropertiesNormalized.AutoScalerProfile = existingMC.Properties.AutoScalerProfile } if managedCluster.Properties.IdentityProfile != nil { @@ -753,6 +763,14 @@ func computeDiffOfNormalizedClusters(managedCluster armcontainerservice.ManagedC existingMCClusterNormalized.Properties.DisableLocalAccounts = existingMC.Properties.DisableLocalAccounts } + if managedCluster.Properties.AutoUpgradeProfile != nil { + clusterNormalized.Properties.AutoUpgradeProfile = managedCluster.Properties.AutoUpgradeProfile + } + + if existingMC.Properties.AutoUpgradeProfile != nil { + existingMCClusterNormalized.Properties.AutoUpgradeProfile = existingMC.Properties.AutoUpgradeProfile + } + diff := cmp.Diff(clusterNormalized, existingMCClusterNormalized) return diff } diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml index f0e7e06b8027..46741ab2f500 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml @@ -112,6 +112,29 @@ spec: - None type: string type: object + autoUpgradeProfile: + description: AutoUpgradeProfile - Profile of auto upgrade configuration. + properties: + nodeOSUpgradeChannel: + description: 'NodeOSUpgradeChannel is a manner in which the OS + on your nodes is updated. The default is NodeImage. Possible + values include: NodeImage,Unmanaged,None' + enum: + - NodeImage + - Unmanaged + - None + type: string + upgradeChannel: + description: 'UpgradeChannel upgrade channel for auto upgrade. + Possible values include: ''node-image'',''none'',''patch'',''rapid'',''stable''' + enum: + - node-image + - none + - patch + - rapid + - stable + type: string + type: object autoscalerProfile: description: AutoscalerProfile is the parameters to be applied to the cluster-autoscaler when enabled @@ -583,6 +606,11 @@ spec: description: AzureManagedControlPlaneStatus defines the observed state of AzureManagedControlPlane. properties: + autoUpgradeVersion: + description: AutoUpgradeVersion is the Kubernetes version populated + after autoupgrade based on the upgrade channel. + minLength: 2 + type: string conditions: description: Conditions defines current service state of the AzureManagedControlPlane. items: diff --git a/util/versions/version.go b/util/versions/version.go new file mode 100644 index 000000000000..1540a6f5ae56 --- /dev/null +++ b/util/versions/version.go @@ -0,0 +1,40 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versions + +import ( + semverv4 "github.com/blang/semver" +) + +// GetHigherK8sVersion returns the higher k8s version out of a and b. +func GetHigherK8sVersion(a, b string) string { + v1, errv1 := semverv4.ParseTolerant(a) + v2, errv2 := semverv4.ParseTolerant(b) + if errv1 != nil && errv2 != nil { + return "" + } + if errv1 != nil { + return b + } + if errv2 != nil { + return a + } + if v1.GTE(v2) { + return a + } + return b +} diff --git a/util/versions/version_test.go b/util/versions/version_test.go new file mode 100644 index 000000000000..6c8bdbf10d83 --- /dev/null +++ b/util/versions/version_test.go @@ -0,0 +1,100 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versions + +import ( + "testing" + + . "github.com/onsi/gomega" +) + +func TestGetHigherK8sVersion(t *testing.T) { + cases := []struct { + name string + a string + b string + output string + }{ + { + name: "a is greater than b", + a: "v1.17.8", + b: "v1.18.8", + output: "v1.18.8", + }, + { + name: "b is greater than a", + a: "v1.18.9", + b: "v1.18.8", + output: "v1.18.9", + }, + { + name: "b is greater than a", + a: "v1.18", + b: "v1.18.8", + output: "v1.18.8", + }, + { + name: "a is equal to b", + a: "v1.18.8", + b: "v1.18.8", + output: "v1.18.8", + }, + { + name: "a is greater than b and a is major.minor", + a: "v1.18", + b: "v1.17.8", + output: "v1.18", + }, + { + name: "a is greater than b and a is major.minor", + a: "1.18", + b: "1.17.8", + output: "1.18", + }, + { + name: "a is invalid", + a: "1.18.", + b: "v1.17.8", + output: "v1.17.8", + }, + { + name: "b is invalid", + a: "1.18.1", + b: "v1.17.8.", + output: "1.18.1", + }, + { + name: "b is invalid", + a: "9.99.9999", + b: "v1.17.8.", + output: "9.99.9999", + }, + { + name: "a & b is invalid", + a: "", + b: "v1.17.8.", + output: "", + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + g := NewWithT(t) + output := GetHigherK8sVersion(c.a, c.b) + g.Expect(output).To(Equal(c.output)) + }) + } +}