From 0d39da476e6bf5f8288d25ff29e610af1665570a Mon Sep 17 00:00:00 2001 From: Jonathan Tong Date: Thu, 4 Aug 2022 20:55:59 -0400 Subject: [PATCH] Make agent pools reconcile/delete async --- azure/converters/managedagentpool.go | 66 +-- azure/converters/managedagentpool_test.go | 108 +--- azure/scope/managedcontrolplane.go | 6 +- azure/scope/managedcontrolplane_test.go | 26 +- azure/scope/managedmachinepool.go | 19 +- azure/scope/managedmachinepool_test.go | 64 ++- azure/services/agentpools/agentpools.go | 129 +---- azure/services/agentpools/agentpools_test.go | 528 +++--------------- azure/services/agentpools/client.go | 148 +++-- .../mock_agentpools/agentpools_mock.go | 409 ++++++++++++-- .../agentpools/mock_agentpools/doc.go | 2 +- azure/services/agentpools/spec.go | 204 +++++++ azure/services/agentpools/spec_test.go | 344 ++++++++++++ azure/services/managedclusters/spec.go | 15 +- azure/services/managedclusters/spec_test.go | 47 +- azure/types.go | 60 -- .../azuremanagedmachinepool_reconciler.go | 6 +- 17 files changed, 1285 insertions(+), 896 deletions(-) create mode 100644 azure/services/agentpools/spec.go create mode 100644 azure/services/agentpools/spec_test.go diff --git a/azure/converters/managedagentpool.go b/azure/converters/managedagentpool.go index ab5ee6b5e2e..3f7fae2fd95 100644 --- a/azure/converters/managedagentpool.go +++ b/azure/converters/managedagentpool.go @@ -18,55 +18,29 @@ package converters import ( "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-05-01/containerservice" - "github.com/Azure/go-autorest/autorest/to" - "sigs.k8s.io/cluster-api-provider-azure/azure" ) // AgentPoolToManagedClusterAgentPoolProfile converts a AgentPoolSpec to an Azure SDK ManagedClusterAgentPoolProfile used in managedcluster reconcile. -func AgentPoolToManagedClusterAgentPoolProfile(pool azure.AgentPoolSpec) containerservice.ManagedClusterAgentPoolProfile { +func AgentPoolToManagedClusterAgentPoolProfile(pool containerservice.AgentPool) containerservice.ManagedClusterAgentPoolProfile { + properties := pool.ManagedClusterAgentPoolProfileProperties return containerservice.ManagedClusterAgentPoolProfile{ - Name: &pool.Name, - VMSize: &pool.SKU, - OsType: containerservice.OSType(to.String(pool.OSType)), - OsDiskSizeGB: &pool.OSDiskSizeGB, - Count: &pool.Replicas, - Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, - OrchestratorVersion: pool.Version, - VnetSubnetID: &pool.VnetSubnetID, - Mode: containerservice.AgentPoolMode(pool.Mode), - EnableAutoScaling: pool.EnableAutoScaling, - MaxCount: pool.MaxCount, - MinCount: pool.MinCount, - NodeTaints: &pool.NodeTaints, - AvailabilityZones: &pool.AvailabilityZones, - MaxPods: pool.MaxPods, - OsDiskType: containerservice.OSDiskType(to.String(pool.OsDiskType)), - NodeLabels: pool.NodeLabels, - EnableUltraSSD: pool.EnableUltraSSD, - } -} - -// AgentPoolToContainerServiceAgentPool converts a AgentPoolSpec to an Azure SDK AgentPool used in agentpool reconcile. -func AgentPoolToContainerServiceAgentPool(pool azure.AgentPoolSpec) containerservice.AgentPool { - return containerservice.AgentPool{ - ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ - VMSize: &pool.SKU, - OsType: containerservice.OSType(to.String(pool.OSType)), - OsDiskSizeGB: &pool.OSDiskSizeGB, - Count: &pool.Replicas, - Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, - OrchestratorVersion: pool.Version, - VnetSubnetID: &pool.VnetSubnetID, - Mode: containerservice.AgentPoolMode(pool.Mode), - EnableAutoScaling: pool.EnableAutoScaling, - MaxCount: pool.MaxCount, - MinCount: pool.MinCount, - NodeTaints: &pool.NodeTaints, - AvailabilityZones: &pool.AvailabilityZones, - MaxPods: pool.MaxPods, - OsDiskType: containerservice.OSDiskType(to.String(pool.OsDiskType)), - NodeLabels: pool.NodeLabels, - EnableUltraSSD: pool.EnableUltraSSD, - }, + Name: pool.Name, // Note: if converting from agentPoolSpec.Parameters(), this field will not be set + VMSize: properties.VMSize, + OsType: properties.OsType, + OsDiskSizeGB: properties.OsDiskSizeGB, + Count: properties.Count, + Type: properties.Type, + OrchestratorVersion: properties.OrchestratorVersion, + VnetSubnetID: properties.VnetSubnetID, + Mode: properties.Mode, + EnableAutoScaling: properties.EnableAutoScaling, + MaxCount: properties.MaxCount, + MinCount: properties.MinCount, + NodeTaints: properties.NodeTaints, + AvailabilityZones: properties.AvailabilityZones, + MaxPods: properties.MaxPods, + OsDiskType: properties.OsDiskType, + NodeLabels: properties.NodeLabels, + EnableUltraSSD: properties.EnableUltraSSD, } } diff --git a/azure/converters/managedagentpool_test.go b/azure/converters/managedagentpool_test.go index 3fc42a9650a..04c51284855 100644 --- a/azure/converters/managedagentpool_test.go +++ b/azure/converters/managedagentpool_test.go @@ -28,30 +28,33 @@ import ( func Test_AgentPoolToManagedClusterAgentPoolProfile(t *testing.T) { cases := []struct { name string - pool azure.AgentPoolSpec + pool containerservice.AgentPool expect func(*GomegaWithT, containerservice.ManagedClusterAgentPoolProfile) }{ { name: "Should set all values correctly", - pool: azure.AgentPoolSpec{ - SKU: "Standard_D2s_v3", - OSDiskSizeGB: 100, - Replicas: 2, - OSType: to.StringPtr(azure.LinuxOS), - Version: to.StringPtr("1.22.6"), - VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg-123/providers/Microsoft.Network/virtualNetworks/vnet-123/subnets/subnet-123", - Mode: "User", - EnableAutoScaling: to.BoolPtr(true), - MaxCount: to.Int32Ptr(5), - MinCount: to.Int32Ptr(2), - NodeTaints: []string{"key1=value1:NoSchedule"}, - AvailabilityZones: []string{"zone1"}, - MaxPods: to.Int32Ptr(60), - OsDiskType: to.StringPtr(string(containerservice.OSDiskTypeManaged)), - NodeLabels: map[string]*string{ - "custom": to.StringPtr("default"), + pool: containerservice.AgentPool{ + Name: to.StringPtr("agentpool1"), + ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ + VMSize: to.StringPtr("Standard_D2s_v3"), + OsType: azure.LinuxOS, + OsDiskSizeGB: to.Int32Ptr(100), + Count: to.Int32Ptr(2), + Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, + OrchestratorVersion: to.StringPtr("1.22.6"), + VnetSubnetID: to.StringPtr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg-123/providers/Microsoft.Network/virtualNetworks/vnet-123/subnets/subnet-123"), + Mode: containerservice.AgentPoolModeUser, + EnableAutoScaling: to.BoolPtr(true), + MaxCount: to.Int32Ptr(5), + MinCount: to.Int32Ptr(2), + NodeTaints: to.StringSlicePtr([]string{"key1=value1:NoSchedule"}), + AvailabilityZones: to.StringSlicePtr([]string{"zone1"}), + MaxPods: to.Int32Ptr(60), + OsDiskType: containerservice.OSDiskTypeManaged, + NodeLabels: map[string]*string{ + "custom": to.StringPtr("default"), + }, }, - Name: "agentpool1", }, expect: func(g *GomegaWithT, result containerservice.ManagedClusterAgentPoolProfile) { @@ -90,70 +93,3 @@ func Test_AgentPoolToManagedClusterAgentPoolProfile(t *testing.T) { }) } } - -func Test_AgentPoolToAgentPoolToContainerServiceAgentPool(t *testing.T) { - cases := []struct { - name string - pool azure.AgentPoolSpec - expect func(*GomegaWithT, containerservice.AgentPool) - }{ - { - name: "Should set all values correctly", - pool: azure.AgentPoolSpec{ - Name: "agentpool1", - SKU: "Standard_D2s_v3", - OSDiskSizeGB: 100, - OSType: to.StringPtr(azure.LinuxOS), - Replicas: 2, - Version: to.StringPtr("1.22.6"), - VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg-123/providers/Microsoft.Network/virtualNetworks/vnet-123/subnets/subnet-123", - Mode: "User", - EnableAutoScaling: to.BoolPtr(true), - MaxCount: to.Int32Ptr(5), - MinCount: to.Int32Ptr(2), - NodeTaints: []string{"key1=value1:NoSchedule"}, - AvailabilityZones: []string{"zone1"}, - MaxPods: to.Int32Ptr(60), - OsDiskType: to.StringPtr(string(containerservice.OSDiskTypeManaged)), - NodeLabels: map[string]*string{ - "custom": to.StringPtr("default"), - }, - }, - - expect: func(g *GomegaWithT, result containerservice.AgentPool) { - g.Expect(result).To(Equal(containerservice.AgentPool{ - ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ - VMSize: to.StringPtr("Standard_D2s_v3"), - OsType: azure.LinuxOS, - OsDiskSizeGB: to.Int32Ptr(100), - Count: to.Int32Ptr(2), - Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, - OrchestratorVersion: to.StringPtr("1.22.6"), - VnetSubnetID: to.StringPtr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg-123/providers/Microsoft.Network/virtualNetworks/vnet-123/subnets/subnet-123"), - Mode: containerservice.AgentPoolModeUser, - EnableAutoScaling: to.BoolPtr(true), - MaxCount: to.Int32Ptr(5), - MinCount: to.Int32Ptr(2), - NodeTaints: to.StringSlicePtr([]string{"key1=value1:NoSchedule"}), - AvailabilityZones: to.StringSlicePtr([]string{"zone1"}), - MaxPods: to.Int32Ptr(60), - OsDiskType: containerservice.OSDiskTypeManaged, - NodeLabels: map[string]*string{ - "custom": to.StringPtr("default"), - }, - }, - })) - }, - }, - } - - for _, c := range cases { - c := c - t.Run(c.name, func(t *testing.T) { - t.Parallel() - g := NewGomegaWithT(t) - result := AgentPoolToContainerServiceAgentPool(c.pool) - c.expect(g, result) - }) - } -} diff --git a/azure/scope/managedcontrolplane.go b/azure/scope/managedcontrolplane.go index a3e8cd4c2dc..c835c06e96d 100644 --- a/azure/scope/managedcontrolplane.go +++ b/azure/scope/managedcontrolplane.go @@ -470,9 +470,9 @@ func (s *ManagedControlPlaneScope) ManagedClusterSpec(ctx context.Context) azure } // GetAllAgentPoolSpecs gets a slice of azure.AgentPoolSpec for the list of agent pools. -func (s *ManagedControlPlaneScope) GetAllAgentPoolSpecs() ([]azure.AgentPoolSpec, error) { +func (s *ManagedControlPlaneScope) GetAllAgentPoolSpecs() ([]azure.ResourceSpecGetter, error) { var ( - ammps = make([]azure.AgentPoolSpec, 0, len(s.ManagedMachinePools)) + ammps = make([]azure.ResourceSpecGetter, 0, len(s.ManagedMachinePools)) foundSystemPool = false ) for _, pool := range s.ManagedMachinePools { @@ -488,7 +488,7 @@ func (s *ManagedControlPlaneScope) GetAllAgentPoolSpecs() ([]azure.AgentPoolSpec foundSystemPool = true } - ammp := buildAgentPoolSpec(s.ControlPlane, pool.MachinePool, pool.InfraMachinePool) + ammp := buildAgentPoolSpec(s.ControlPlane, pool.MachinePool, pool.InfraMachinePool, pool.InfraMachinePool.Annotations) ammps = append(ammps, ammp) } diff --git a/azure/scope/managedcontrolplane_test.go b/azure/scope/managedcontrolplane_test.go index 0cf3edd5e7f..969b21f44b5 100644 --- a/azure/scope/managedcontrolplane_test.go +++ b/azure/scope/managedcontrolplane_test.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/agentpools" "sigs.k8s.io/cluster-api-provider-azure/azure/services/managedclusters" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -41,7 +42,7 @@ func TestManagedControlPlaneScope_PoolVersion(t *testing.T) { cases := []struct { Name string Input ManagedControlPlaneScopeParams - Expected []azure.AgentPoolSpec + Expected []azure.ResourceSpecGetter Err string }{ { @@ -72,14 +73,15 @@ func TestManagedControlPlaneScope_PoolVersion(t *testing.T) { }, }, }, - Expected: []azure.AgentPoolSpec{ - { + Expected: []azure.ResourceSpecGetter{ + &agentpools.AgentPoolSpec{ Name: "pool0", SKU: "Standard_D2s_v3", Replicas: 1, Mode: "System", Cluster: "cluster1", VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", + Headers: map[string]string{}, }, }, }, @@ -112,8 +114,8 @@ func TestManagedControlPlaneScope_PoolVersion(t *testing.T) { }, }, }, - Expected: []azure.AgentPoolSpec{ - { + Expected: []azure.ResourceSpecGetter{ + &agentpools.AgentPoolSpec{ Name: "pool0", SKU: "Standard_D2s_v3", Mode: "System", @@ -121,6 +123,7 @@ func TestManagedControlPlaneScope_PoolVersion(t *testing.T) { Version: to.StringPtr("1.21.1"), Cluster: "cluster1", VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", + Headers: map[string]string{}, }, }, }, @@ -276,7 +279,7 @@ func TestManagedControlPlaneScope_OSType(t *testing.T) { cases := []struct { Name string Input ManagedControlPlaneScopeParams - Expected []azure.AgentPoolSpec + Expected []azure.ResourceSpecGetter Err string }{ { @@ -316,16 +319,17 @@ func TestManagedControlPlaneScope_OSType(t *testing.T) { }, }, }, - Expected: []azure.AgentPoolSpec{ - { + Expected: []azure.ResourceSpecGetter{ + &agentpools.AgentPoolSpec{ Name: "pool0", SKU: "Standard_D2s_v3", Mode: "System", Replicas: 1, Cluster: "cluster1", VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", + Headers: map[string]string{}, }, - { + &agentpools.AgentPoolSpec{ Name: "pool1", SKU: "Standard_D2s_v3", Mode: "User", @@ -333,8 +337,9 @@ func TestManagedControlPlaneScope_OSType(t *testing.T) { Cluster: "cluster1", VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", OSType: to.StringPtr(azure.LinuxOS), + Headers: map[string]string{}, }, - { + &agentpools.AgentPoolSpec{ Name: "pool2", SKU: "Standard_D2s_v3", Mode: "User", @@ -342,6 +347,7 @@ func TestManagedControlPlaneScope_OSType(t *testing.T) { Cluster: "cluster1", VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", OSType: to.StringPtr(azure.WindowsOS), + Headers: map[string]string{}, }, }, }, diff --git a/azure/scope/managedmachinepool.go b/azure/scope/managedmachinepool.go index 1d7b6e3fb66..a1bcb2a9a89 100644 --- a/azure/scope/managedmachinepool.go +++ b/azure/scope/managedmachinepool.go @@ -25,8 +25,10 @@ import ( "github.com/pkg/errors" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/agentpools" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/util/futures" + "sigs.k8s.io/cluster-api-provider-azure/util/maps" "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" @@ -120,14 +122,20 @@ func (s *ManagedMachinePoolScope) AgentPoolAnnotations() map[string]string { return s.InfraMachinePool.Annotations } -// AgentPoolSpec returns an azure.AgentPoolSpec for currently reconciled AzureManagedMachinePool. -func (s *ManagedMachinePoolScope) AgentPoolSpec() azure.AgentPoolSpec { - return buildAgentPoolSpec(s.ControlPlane, s.MachinePool, s.InfraMachinePool) +// Name returns the name of the infra machine pool. +func (s *ManagedMachinePoolScope) Name() string { + return s.InfraMachinePool.Name +} + +// AgentPoolSpec returns an azure.ResourceSpecGetter for currently reconciled AzureManagedMachinePool. +func (s *ManagedMachinePoolScope) AgentPoolSpec() azure.ResourceSpecGetter { + return buildAgentPoolSpec(s.ControlPlane, s.MachinePool, s.InfraMachinePool, s.AgentPoolAnnotations()) } func buildAgentPoolSpec(managedControlPlane *infrav1exp.AzureManagedControlPlane, machinePool *expv1.MachinePool, - managedMachinePool *infrav1exp.AzureManagedMachinePool) azure.AgentPoolSpec { + managedMachinePool *infrav1exp.AzureManagedMachinePool, + agentPoolAnnotations map[string]string) azure.ResourceSpecGetter { var normalizedVersion *string if machinePool.Spec.Template.Spec.Version != nil { v := strings.TrimPrefix(*machinePool.Spec.Template.Spec.Version, "v") @@ -139,7 +147,7 @@ func buildAgentPoolSpec(managedControlPlane *infrav1exp.AzureManagedControlPlane replicas = *machinePool.Spec.Replicas } - agentPoolSpec := azure.AgentPoolSpec{ + agentPoolSpec := &agentpools.AgentPoolSpec{ Name: to.String(managedMachinePool.Spec.Name), ResourceGroup: managedControlPlane.Spec.ResourceGroupName, Cluster: managedControlPlane.Name, @@ -158,6 +166,7 @@ func buildAgentPoolSpec(managedControlPlane *infrav1exp.AzureManagedControlPlane AvailabilityZones: managedMachinePool.Spec.AvailabilityZones, OsDiskType: managedMachinePool.Spec.OsDiskType, EnableUltraSSD: managedMachinePool.Spec.EnableUltraSSD, + Headers: maps.FilterByKeyPrefix(agentPoolAnnotations, azure.CustomHeaderPrefix), } if managedMachinePool.Spec.OSDiskSizeGB != nil { diff --git a/azure/scope/managedmachinepool_test.go b/azure/scope/managedmachinepool_test.go index 46791c4f6a1..8a910787071 100644 --- a/azure/scope/managedmachinepool_test.go +++ b/azure/scope/managedmachinepool_test.go @@ -18,14 +18,17 @@ package scope import ( "context" + "reflect" "testing" "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-05-01/containerservice" "github.com/Azure/go-autorest/autorest/to" + "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/agentpools" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" @@ -40,7 +43,7 @@ func TestManagedMachinePoolScope_Autoscaling(t *testing.T) { cases := []struct { Name string Input ManagedMachinePoolScopeParams - Expected azure.AgentPoolSpec + Expected azure.ResourceSpecGetter }{ { Name: "Without Autoscaling", @@ -65,7 +68,7 @@ func TestManagedMachinePoolScope_Autoscaling(t *testing.T) { InfraMachinePool: getAzureMachinePool("pool0", infrav1exp.NodePoolModeSystem), }, }, - Expected: azure.AgentPoolSpec{ + Expected: &agentpools.AgentPoolSpec{ Name: "pool0", SKU: "Standard_D2s_v3", @@ -73,6 +76,7 @@ func TestManagedMachinePoolScope_Autoscaling(t *testing.T) { Mode: "System", Cluster: "cluster1", VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", + Headers: map[string]string{}, }, }, { @@ -98,7 +102,7 @@ func TestManagedMachinePoolScope_Autoscaling(t *testing.T) { InfraMachinePool: getAzureMachinePoolWithScaling("pool1", 2, 10), }, }, - Expected: azure.AgentPoolSpec{ + Expected: &agentpools.AgentPoolSpec{ Name: "pool1", SKU: "Standard_D2s_v3", Mode: "User", @@ -108,6 +112,7 @@ func TestManagedMachinePoolScope_Autoscaling(t *testing.T) { MinCount: to.Int32Ptr(2), MaxCount: to.Int32Ptr(10), VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", + Headers: map[string]string{}, }, }, } @@ -121,7 +126,9 @@ func TestManagedMachinePoolScope_Autoscaling(t *testing.T) { s, err := NewManagedMachinePoolScope(context.TODO(), c.Input) g.Expect(err).To(Succeed()) agentPool := s.AgentPoolSpec() - g.Expect(agentPool).To(Equal(c.Expected)) + if !reflect.DeepEqual(c.Expected, agentPool) { + t.Errorf("Got difference between expected result and result:\n%s", cmp.Diff(c.Expected, agentPool)) + } }) } } @@ -134,7 +141,7 @@ func TestManagedMachinePoolScope_NodeLabels(t *testing.T) { cases := []struct { Name string Input ManagedMachinePoolScopeParams - Expected azure.AgentPoolSpec + Expected azure.ResourceSpecGetter }{ { Name: "Without node labels", @@ -159,14 +166,14 @@ func TestManagedMachinePoolScope_NodeLabels(t *testing.T) { InfraMachinePool: getAzureMachinePool("pool0", infrav1exp.NodePoolModeSystem), }, }, - Expected: azure.AgentPoolSpec{ - + Expected: &agentpools.AgentPoolSpec{ Name: "pool0", SKU: "Standard_D2s_v3", Replicas: 1, Mode: "System", Cluster: "cluster1", VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", + Headers: map[string]string{}, }, }, { @@ -194,7 +201,7 @@ func TestManagedMachinePoolScope_NodeLabels(t *testing.T) { }), }, }, - Expected: azure.AgentPoolSpec{ + Expected: &agentpools.AgentPoolSpec{ Name: "pool1", SKU: "Standard_D2s_v3", Mode: "System", @@ -204,6 +211,7 @@ func TestManagedMachinePoolScope_NodeLabels(t *testing.T) { "custom": to.StringPtr("default"), }, VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", + Headers: map[string]string{}, }, }, } @@ -217,7 +225,9 @@ func TestManagedMachinePoolScope_NodeLabels(t *testing.T) { s, err := NewManagedMachinePoolScope(context.TODO(), c.Input) g.Expect(err).To(Succeed()) agentPool := s.AgentPoolSpec() - g.Expect(agentPool).To(Equal(c.Expected)) + if !reflect.DeepEqual(c.Expected, agentPool) { + t.Errorf("Got difference between expected result and result:\n%s", cmp.Diff(c.Expected, agentPool)) + } }) } } @@ -230,7 +240,7 @@ func TestManagedMachinePoolScope_MaxPods(t *testing.T) { cases := []struct { Name string Input ManagedMachinePoolScopeParams - Expected azure.AgentPoolSpec + Expected azure.ResourceSpecGetter }{ { Name: "Without MaxPods", @@ -255,13 +265,14 @@ func TestManagedMachinePoolScope_MaxPods(t *testing.T) { InfraMachinePool: getAzureMachinePool("pool0", infrav1exp.NodePoolModeSystem), }, }, - Expected: azure.AgentPoolSpec{ + Expected: &agentpools.AgentPoolSpec{ Name: "pool0", SKU: "Standard_D2s_v3", Replicas: 1, Mode: "System", Cluster: "cluster1", VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", + Headers: map[string]string{}, }, }, { @@ -287,7 +298,7 @@ func TestManagedMachinePoolScope_MaxPods(t *testing.T) { InfraMachinePool: getAzureMachinePoolWithMaxPods("pool1", 12), }, }, - Expected: azure.AgentPoolSpec{ + Expected: &agentpools.AgentPoolSpec{ Name: "pool1", SKU: "Standard_D2s_v3", Mode: "System", @@ -295,6 +306,7 @@ func TestManagedMachinePoolScope_MaxPods(t *testing.T) { Replicas: 1, MaxPods: to.Int32Ptr(12), VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", + Headers: map[string]string{}, }, }, } @@ -308,7 +320,9 @@ func TestManagedMachinePoolScope_MaxPods(t *testing.T) { s, err := NewManagedMachinePoolScope(context.TODO(), c.Input) g.Expect(err).To(Succeed()) agentPool := s.AgentPoolSpec() - g.Expect(agentPool).To(Equal(c.Expected)) + if !reflect.DeepEqual(c.Expected, agentPool) { + t.Errorf("Got difference between expected result and result:\n%s", cmp.Diff(c.Expected, agentPool)) + } }) } } @@ -321,7 +335,7 @@ func TestManagedMachinePoolScope_Taints(t *testing.T) { cases := []struct { Name string Input ManagedMachinePoolScopeParams - Expected azure.AgentPoolSpec + Expected azure.ResourceSpecGetter }{ { Name: "Without taints", @@ -346,7 +360,7 @@ func TestManagedMachinePoolScope_Taints(t *testing.T) { InfraMachinePool: getAzureMachinePool("pool0", infrav1exp.NodePoolModeSystem), }, }, - Expected: azure.AgentPoolSpec{ + Expected: &agentpools.AgentPoolSpec{ Name: "pool0", SKU: "Standard_D2s_v3", @@ -354,6 +368,7 @@ func TestManagedMachinePoolScope_Taints(t *testing.T) { Mode: "System", Cluster: "cluster1", VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", + Headers: map[string]string{}, }, }, { @@ -385,7 +400,7 @@ func TestManagedMachinePoolScope_Taints(t *testing.T) { }), }, }, - Expected: azure.AgentPoolSpec{ + Expected: &agentpools.AgentPoolSpec{ Name: "pool1", SKU: "Standard_D2s_v3", Mode: "User", @@ -393,6 +408,7 @@ func TestManagedMachinePoolScope_Taints(t *testing.T) { Replicas: 1, NodeTaints: []string{"key1=value1:NoSchedule"}, VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", + Headers: map[string]string{}, }, }, } @@ -406,7 +422,9 @@ func TestManagedMachinePoolScope_Taints(t *testing.T) { s, err := NewManagedMachinePoolScope(context.TODO(), c.Input) g.Expect(err).To(Succeed()) agentPool := s.AgentPoolSpec() - g.Expect(agentPool).To(Equal(c.Expected)) + if !reflect.DeepEqual(c.Expected, agentPool) { + t.Errorf("Got difference between expected result and result:\n%s", cmp.Diff(c.Expected, agentPool)) + } }) } } @@ -419,7 +437,7 @@ func TestManagedMachinePoolScope_OSDiskType(t *testing.T) { cases := []struct { Name string Input ManagedMachinePoolScopeParams - Expected azure.AgentPoolSpec + Expected azure.ResourceSpecGetter }{ { Name: "Without OsDiskType", @@ -444,13 +462,14 @@ func TestManagedMachinePoolScope_OSDiskType(t *testing.T) { InfraMachinePool: getAzureMachinePool("pool0", infrav1exp.NodePoolModeSystem), }, }, - Expected: azure.AgentPoolSpec{ + Expected: &agentpools.AgentPoolSpec{ Name: "pool0", SKU: "Standard_D2s_v3", Replicas: 1, Mode: "System", Cluster: "cluster1", VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", + Headers: map[string]string{}, }, }, { @@ -476,7 +495,7 @@ func TestManagedMachinePoolScope_OSDiskType(t *testing.T) { InfraMachinePool: getAzureMachinePoolWithOsDiskType("pool1", string(containerservice.OSDiskTypeEphemeral)), }, }, - Expected: azure.AgentPoolSpec{ + Expected: &agentpools.AgentPoolSpec{ Name: "pool1", SKU: "Standard_D2s_v3", Mode: "User", @@ -484,6 +503,7 @@ func TestManagedMachinePoolScope_OSDiskType(t *testing.T) { Replicas: 1, OsDiskType: to.StringPtr(string(containerservice.OSDiskTypeEphemeral)), VnetSubnetID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups//providers/Microsoft.Network/virtualNetworks//subnets/", + Headers: map[string]string{}, }, }, } @@ -497,7 +517,9 @@ func TestManagedMachinePoolScope_OSDiskType(t *testing.T) { s, err := NewManagedMachinePoolScope(context.TODO(), c.Input) g.Expect(err).To(Succeed()) agentPool := s.AgentPoolSpec() - g.Expect(agentPool).To(Equal(c.Expected)) + if !reflect.DeepEqual(c.Expected, agentPool) { + t.Errorf("Got difference between expected result and result:\n%s", cmp.Diff(c.Expected, agentPool)) + } }) } } diff --git a/azure/services/agentpools/agentpools.go b/azure/services/agentpools/agentpools.go index ba2a9ceed96..d9cb066e1c6 100644 --- a/azure/services/agentpools/agentpools.go +++ b/azure/services/agentpools/agentpools.go @@ -18,28 +18,24 @@ package agentpools import ( "context" - "fmt" - "time" - "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-05-01/containerservice" - "github.com/google/go-cmp/cmp" - "github.com/pkg/errors" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" - "sigs.k8s.io/cluster-api-provider-azure/azure/converters" - "sigs.k8s.io/cluster-api-provider-azure/util/maps" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/async" "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) const serviceName = "agentpools" -// ManagedMachinePoolScope defines the scope interface for a managed machine pool. -type ManagedMachinePoolScope interface { +// AgentPoolScope defines the scope interface for an agent pool. +type AgentPoolScope interface { azure.ClusterDescriber + azure.AsyncStatusUpdater + Name() string NodeResourceGroup() string AgentPoolAnnotations() map[string]string - AgentPoolSpec() azure.AgentPoolSpec + AgentPoolSpec() azure.ResourceSpecGetter SetAgentPoolProviderIDList([]string) SetAgentPoolReplicas(int32) SetAgentPoolReady(bool) @@ -47,15 +43,16 @@ type ManagedMachinePoolScope interface { // Service provides operations on Azure resources. type Service struct { - scope ManagedMachinePoolScope - Client + scope AgentPoolScope + async.Reconciler } // New creates a new service. -func New(scope ManagedMachinePoolScope) *Service { +func New(scope AgentPoolScope) *Service { + client := newClient(scope) return &Service{ - scope: scope, - Client: NewClient(scope), + scope: scope, + Reconciler: async.New(scope, client, client), } } @@ -66,104 +63,32 @@ func (s *Service) Name() string { // Reconcile idempotently creates or updates a agent pool, if possible. func (s *Service) Reconcile(ctx context.Context) error { - ctx, log, done := tele.StartSpanWithLogger( - ctx, - "agentpools.Service.Reconcile", - ) + ctx, _, done := tele.StartSpanWithLogger(ctx, "agentpools.Service.Reconcile") defer done() - agentPoolSpec := s.scope.AgentPoolSpec() - profile := converters.AgentPoolToContainerServiceAgentPool(agentPoolSpec) - - existingPool, err := s.Client.Get(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name) - if err != nil && !azure.ResourceNotFound(err) { - return errors.Wrap(err, "failed to get existing agent pool") - } - - // For updates, we want to pass whatever we find in the existing - // cluster, normalized to reflect the input we originally provided. - // AKS will populate defaults and read-only values, which we want - // to strip/clean to match what we expect. - - customHeaders := maps.FilterByKeyPrefix(s.scope.AgentPoolAnnotations(), azure.CustomHeaderPrefix) - if isCreate := azure.ResourceNotFound(err); isCreate { - err = s.Client.CreateOrUpdate(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name, - profile, customHeaders) - if err != nil && azure.ResourceNotFound(err) { - return azure.WithTransientError(errors.Wrap(err, "agent pool dependent resource does not exist yet"), 20*time.Second) - } else if err != nil { - return errors.Wrap(err, "failed to create or update agent pool") - } + var resultingErr error + if agentPoolSpec := s.scope.AgentPoolSpec(); agentPoolSpec != nil { + _, resultingErr = s.CreateResource(ctx, agentPoolSpec, serviceName) } else { - ps := *existingPool.ManagedClusterAgentPoolProfileProperties.ProvisioningState - if ps != string(infrav1.Canceled) && ps != string(infrav1.Failed) && ps != string(infrav1.Succeeded) { - msg := fmt.Sprintf("Unable to update existing agent pool in non terminal state. Agent pool must be in one of the following provisioning states: canceled, failed, or succeeded. Actual state: %s", ps) - log.V(2).Info(msg) - return azure.WithTransientError(errors.New(msg), 20*time.Second) - } - - // Normalize individual agent pools to diff in case we need to update - existingProfile := containerservice.AgentPool{ - ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ - Count: existingPool.Count, - OrchestratorVersion: existingPool.OrchestratorVersion, - Mode: existingPool.Mode, - EnableAutoScaling: existingPool.EnableAutoScaling, - MinCount: existingPool.MinCount, - MaxCount: existingPool.MaxCount, - NodeLabels: existingPool.NodeLabels, - }, - } - - normalizedProfile := containerservice.AgentPool{ - ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ - Count: profile.Count, - OrchestratorVersion: profile.OrchestratorVersion, - Mode: profile.Mode, - EnableAutoScaling: profile.EnableAutoScaling, - MinCount: profile.MinCount, - MaxCount: profile.MaxCount, - NodeLabels: profile.NodeLabels, - }, - } - - // Diff and check if we require an update - diff := cmp.Diff(normalizedProfile, existingProfile) - if diff != "" { - log.V(2).Info(fmt.Sprintf("Update required (+new -old):\n%s", diff)) - err = s.Client.CreateOrUpdate(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name, - profile, customHeaders) - if err != nil { - return errors.Wrap(err, "failed to create or update agent pool") - } - } else { - log.V(2).Info("Normalized and desired agent pool matched, no update needed") - } + return nil } - return nil + s.scope.UpdatePutStatus(infrav1.AgentPoolsReadyCondition, serviceName, resultingErr) + return resultingErr } // Delete deletes the virtual network with the provided name. func (s *Service) Delete(ctx context.Context) error { - ctx, log, done := tele.StartSpanWithLogger( - ctx, - "agentpools.Service.Delete", - ) + ctx, _, done := tele.StartSpanWithLogger(ctx, "agentpools.Service.Delete") defer done() - agentPoolSpec := s.scope.AgentPoolSpec() - - log.V(2).Info(fmt.Sprintf("deleting agent pool %s ", agentPoolSpec.Name)) - err := s.Client.Delete(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name) - if err != nil { - if azure.ResourceNotFound(err) { - // already deleted - return nil - } - return errors.Wrapf(err, "failed to delete agent pool %s in resource group %s", agentPoolSpec.Name, agentPoolSpec.ResourceGroup) + var resultingErr error + if agentPoolSpec := s.scope.AgentPoolSpec(); agentPoolSpec != nil { + resultingErr = s.DeleteResource(ctx, agentPoolSpec, serviceName) + } else { + return nil } - log.V(2).Info(fmt.Sprintf("Successfully deleted agent pool %s ", agentPoolSpec.Name)) - return nil + s.scope.UpdateDeleteStatus(infrav1.AgentPoolsReadyCondition, serviceName, resultingErr) + return resultingErr } diff --git a/azure/services/agentpools/agentpools_test.go b/azure/services/agentpools/agentpools_test.go index ce85c3fcb44..01e0777c152 100644 --- a/azure/services/agentpools/agentpools_test.go +++ b/azure/services/agentpools/agentpools_test.go @@ -21,444 +21,91 @@ import ( "net/http" "testing" - "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-05-01/containerservice" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/to" "github.com/golang/mock/gomock" . "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/cluster-api-provider-azure/azure" - "sigs.k8s.io/cluster-api-provider-azure/azure/scope" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure/services/agentpools/mock_agentpools" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/async/mock_async" gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" ) -func TestReconcile(t *testing.T) { - provisioningstatetestcases := []struct { - name string - agentpoolSpec azure.AgentPoolSpec - provisioningStatesToTest []string - expectedError string - expect func(m *mock_agentpools.MockClientMockRecorder, provisioningstate string) - }{ - { - name: "agentpool in terminal provisioning state", - agentpoolSpec: azure.AgentPoolSpec{ - ResourceGroup: "my-rg", - Cluster: "my-cluster", - Name: "my-agentpool", - }, - provisioningStatesToTest: []string{"Canceled", "Succeeded", "Failed"}, - expectedError: "", - expect: func(m *mock_agentpools.MockClientMockRecorder, provisioningstate string) { - pv := provisioningstate - m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agentpool", gomock.Any(), gomock.Any()).Return(nil) - m.Get(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agentpool").Return(containerservice.AgentPool{ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ - ProvisioningState: &pv, - }}, nil) - }, - }, - { - name: "agentpool in nonterminal provisioning state", - agentpoolSpec: azure.AgentPoolSpec{ - ResourceGroup: "my-rg", - Cluster: "my-cluster", - Name: "my-agentpool", - }, - provisioningStatesToTest: []string{"Deleting", "InProgress", "randomStringHere"}, - expectedError: "Unable to update existing agent pool in non terminal state. Agent pool must be in one of the following provisioning states: canceled, failed, or succeeded. Actual state:", - expect: func(m *mock_agentpools.MockClientMockRecorder, provisioningstate string) { - m.Get(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agentpool").Return(containerservice.AgentPool{ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ - ProvisioningState: &provisioningstate, - }}, nil) - }, - }, - } - - for _, tc := range provisioningstatetestcases { - for _, provisioningstate := range tc.provisioningStatesToTest { - tc := tc - provisioningstate := provisioningstate - t.Logf("Testing agentpool provision state: " + provisioningstate) - t.Run(tc.name, func(t *testing.T) { - g := NewWithT(t) - t.Parallel() - - mockCtrl := gomock.NewController(t) - defer mockCtrl.Finish() - - agentpoolsMock := mock_agentpools.NewMockClient(mockCtrl) - machinePoolScope := &scope.ManagedMachinePoolScope{ - ControlPlane: &infrav1exp.AzureManagedControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: tc.agentpoolSpec.Cluster, - }, - Spec: infrav1exp.AzureManagedControlPlaneSpec{ - ResourceGroupName: tc.agentpoolSpec.ResourceGroup, - }, - }, - MachinePool: &expv1.MachinePool{}, - InfraMachinePool: &infrav1exp.AzureManagedMachinePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: tc.agentpoolSpec.Name, - }, - Spec: infrav1exp.AzureManagedMachinePoolSpec{ - Name: &tc.agentpoolSpec.Name, - }, - }, - } - - tc.expect(agentpoolsMock.EXPECT(), provisioningstate) - - s := &Service{ - Client: agentpoolsMock, - scope: machinePoolScope, - } - - err := s.Reconcile(context.TODO()) - if tc.expectedError != "" { - g.Expect(err.Error()).To(ContainSubstring(tc.expectedError)) - g.Expect(err.Error()).To(ContainSubstring(provisioningstate)) - } else { - g.Expect(err).NotTo(HaveOccurred()) - } - }) - } - } - - testcases := []struct { - name string - agentPoolsSpec azure.AgentPoolSpec - expectedError string - expect func(m *mock_agentpools.MockClientMockRecorder) - }{ - { - name: "no agentpool exists", - agentPoolsSpec: azure.AgentPoolSpec{ - ResourceGroup: "my-rg", - Cluster: "my-cluster", - Name: "my-agentpool", - }, - expectedError: "", - expect: func(m *mock_agentpools.MockClientMockRecorder) { - m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agentpool", gomock.Any(), gomock.Any()).Return(nil) - m.Get(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agentpool").Return(containerservice.AgentPool{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not Found")) - }, - }, - { - name: "fail to get existing agent pool", - agentPoolsSpec: azure.AgentPoolSpec{ - Name: "my-agent-pool", - ResourceGroup: "my-rg", - Cluster: "my-cluster", - SKU: "SKU123", - Version: to.StringPtr("9.99.9999"), - Replicas: 2, - OSDiskSizeGB: 100, - MaxPods: to.Int32Ptr(12), - OsDiskType: to.StringPtr(string(containerservice.OSDiskTypeManaged)), - }, - expectedError: "failed to get existing agent pool: #: Internal Server Error: StatusCode=500", - expect: func(m *mock_agentpools.MockClientMockRecorder) { - m.Get(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool").Return(containerservice.AgentPool{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 500}, "Internal Server Error")) - }, - }, - { - name: "can create an Agent Pool", - agentPoolsSpec: azure.AgentPoolSpec{ - Name: "my-agent-pool", - ResourceGroup: "my-rg", - Cluster: "my-cluster", - SKU: "SKU123", - Version: to.StringPtr("9.99.9999"), - Replicas: 2, - OSDiskSizeGB: 100, - MaxPods: to.Int32Ptr(12), - OsDiskType: to.StringPtr(string(containerservice.OSDiskTypeManaged)), - }, - expectedError: "", - expect: func(m *mock_agentpools.MockClientMockRecorder) { - m.Get(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool").Return(containerservice.AgentPool{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) - m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool", gomock.AssignableToTypeOf(containerservice.AgentPool{}), gomock.Any()).Return(nil) - }, - }, - { - name: "fail to create an Agent Pool", - agentPoolsSpec: azure.AgentPoolSpec{ - Name: "my-agent-pool", - ResourceGroup: "my-rg", - Cluster: "my-cluster", - SKU: "SKU123", - Version: to.StringPtr("9.99.9999"), - Replicas: 2, - OSDiskSizeGB: 100, - MaxPods: to.Int32Ptr(12), - OsDiskType: to.StringPtr(string(containerservice.OSDiskTypeManaged)), - }, - expectedError: "failed to create or update agent pool: #: Internal Server Error: StatusCode=500", - expect: func(m *mock_agentpools.MockClientMockRecorder) { - m.Get(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool").Return(containerservice.AgentPool{}, autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) - m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool", gomock.AssignableToTypeOf(containerservice.AgentPool{}), gomock.Any()).Return(autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 500}, "Internal Server Error")) - }, - }, - { - name: "fail to update an Agent Pool", - agentPoolsSpec: azure.AgentPoolSpec{ - Name: "my-agent-pool", - ResourceGroup: "my-rg", - Cluster: "my-cluster", - SKU: "SKU123", - Version: to.StringPtr("9.99.9999"), - Replicas: 2, - OSDiskSizeGB: 100, - MaxPods: to.Int32Ptr(12), - OsDiskType: to.StringPtr(string(containerservice.OSDiskTypeManaged)), - }, - expectedError: "failed to create or update agent pool: #: Internal Server Error: StatusCode=500", - expect: func(m *mock_agentpools.MockClientMockRecorder) { - m.Get(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool").Return(containerservice.AgentPool{ - ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ - Count: to.Int32Ptr(3), - OsDiskSizeGB: to.Int32Ptr(20), - VMSize: to.StringPtr(string(containerservice.VMSizeTypesStandardA1)), - OrchestratorVersion: to.StringPtr("9.99.9999"), - ProvisioningState: to.StringPtr("Failed"), - }, - }, nil) - m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool", gomock.AssignableToTypeOf(containerservice.AgentPool{}), gomock.Any()).Return(autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 500}, "Internal Server Error")) - }, - }, +var ( + fakeAgentPoolSpec = AgentPoolSpec{ + Name: "fake-agent-pool-name", + ResourceGroup: "fake-rg", + Cluster: "fake-cluster", + Version: to.StringPtr("fake-version"), + SKU: "fake-sku", + Replicas: 1, + OSDiskSizeGB: 2, + VnetSubnetID: "fake-vnet-subnet-id", + Mode: "fake-mode", + MaxCount: to.Int32Ptr(5), + MinCount: to.Int32Ptr(1), + NodeLabels: map[string]*string{"fake-label": to.StringPtr("fake-value")}, + NodeTaints: []string{"fake-taint"}, + EnableAutoScaling: to.BoolPtr(true), + AvailabilityZones: []string{"fake-zone"}, + MaxPods: to.Int32Ptr(10), + OsDiskType: to.StringPtr("fake-os-disk-type"), + EnableUltraSSD: to.BoolPtr(true), + OSType: to.StringPtr("fake-os-type"), + Headers: map[string]string{"fake-header": "fake-value"}, } - for _, tc := range testcases { - t.Logf("Testing " + tc.name) - tc := tc - t.Run(tc.name, func(t *testing.T) { - g := NewWithT(t) - t.Parallel() - mockCtrl := gomock.NewController(t) - defer mockCtrl.Finish() - - replicas := tc.agentPoolsSpec.Replicas - osDiskSizeGB := tc.agentPoolsSpec.OSDiskSizeGB - - agentpoolsMock := mock_agentpools.NewMockClient(mockCtrl) - machinePoolScope := &scope.ManagedMachinePoolScope{ - ControlPlane: &infrav1exp.AzureManagedControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: tc.agentPoolsSpec.Cluster, - }, - Spec: infrav1exp.AzureManagedControlPlaneSpec{ - ResourceGroupName: tc.agentPoolsSpec.ResourceGroup, - }, - }, - MachinePool: &expv1.MachinePool{ - Spec: expv1.MachinePoolSpec{ - Replicas: &replicas, - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ - Version: tc.agentPoolsSpec.Version, - }, - }, - }, - }, - InfraMachinePool: &infrav1exp.AzureManagedMachinePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: tc.agentPoolsSpec.Name, - }, - Spec: infrav1exp.AzureManagedMachinePoolSpec{ - Name: &tc.agentPoolsSpec.Name, - SKU: tc.agentPoolsSpec.SKU, - OSDiskSizeGB: &osDiskSizeGB, - MaxPods: to.Int32Ptr(12), - OsDiskType: to.StringPtr(string(containerservice.OSDiskTypeManaged)), - }, - }, - } - - tc.expect(agentpoolsMock.EXPECT()) - - s := &Service{ - Client: agentpoolsMock, - scope: machinePoolScope, - } - - err := s.Reconcile(context.TODO()) - if tc.expectedError != "" { - g.Expect(err).To(HaveOccurred()) - g.Expect(err).To(MatchError(tc.expectedError)) - } else { - g.Expect(err).NotTo(HaveOccurred()) - } - }) - } -} + internalError = autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 500}, "Internal Server Error") +) -func TestNormalizedDiff(t *testing.T) { +func TestReconcileAgentPools(t *testing.T) { testcases := []struct { - name string - agentPoolsSpec azure.AgentPoolSpec - expectedError string - expect func(m *mock_agentpools.MockClientMockRecorder) + name string + expectedError string + expect func(s *mock_agentpools.MockAgentPoolScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) }{ { - name: "no update needed on Agent Pool", - agentPoolsSpec: azure.AgentPoolSpec{ - Name: "my-agent-pool", - ResourceGroup: "my-rg", - Cluster: "my-cluster", - SKU: "Standard_D2s_v3", - Version: to.StringPtr("9.99.9999"), - Replicas: 2, - OSDiskSizeGB: 100, - MaxPods: to.Int32Ptr(12), - OsDiskType: to.StringPtr(string(containerservice.OSDiskTypeEphemeral)), - }, + name: "agent pool successfully created", expectedError: "", - expect: func(m *mock_agentpools.MockClientMockRecorder) { - m.Get(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool").Return(containerservice.AgentPool{ - ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ - Count: to.Int32Ptr(2), - OsDiskSizeGB: to.Int32Ptr(100), - VMSize: to.StringPtr(string(containerservice.VMSizeTypesStandardD2sV3)), - OsType: containerservice.OSTypeLinux, - OrchestratorVersion: to.StringPtr("9.99.9999"), - ProvisioningState: to.StringPtr("Succeeded"), - VnetSubnetID: to.StringPtr(""), - MaxPods: to.Int32Ptr(12), - OsDiskType: containerservice.OSDiskTypeEphemeral, - }, - }, nil) + expect: func(s *mock_agentpools.MockAgentPoolScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { + s.AgentPoolSpec().Return(&fakeAgentPoolSpec) + r.CreateResource(gomockinternal.AContext(), &fakeAgentPoolSpec, serviceName).Return(nil, nil) + s.UpdatePutStatus(infrav1.AgentPoolsReadyCondition, serviceName, nil) }, }, { - name: "update needed on autoscaler configuration change", - agentPoolsSpec: azure.AgentPoolSpec{ - Name: "my-agent-pool", - ResourceGroup: "my-rg", - Cluster: "my-cluster", - SKU: "Standard_D2s_v3", - Version: to.StringPtr("9.99.9999"), - EnableAutoScaling: to.BoolPtr(true), - MinCount: to.Int32Ptr(1), - MaxCount: to.Int32Ptr(3), - OSDiskSizeGB: 100, - MaxPods: to.Int32Ptr(12), - OsDiskType: to.StringPtr(string(containerservice.OSDiskTypeEphemeral)), - }, + name: "no agent pool spec found", expectedError: "", - expect: func(m *mock_agentpools.MockClientMockRecorder) { - m.Get(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool").Return(containerservice.AgentPool{ - ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ - EnableAutoScaling: to.BoolPtr(true), - MinCount: to.Int32Ptr(1), - MaxCount: to.Int32Ptr(5), - OsDiskSizeGB: to.Int32Ptr(100), - VMSize: to.StringPtr(string(containerservice.VMSizeTypesStandardD2sV3)), - OsType: containerservice.OSTypeLinux, - OrchestratorVersion: to.StringPtr("9.99.9999"), - ProvisioningState: to.StringPtr("Succeeded"), - VnetSubnetID: to.StringPtr(""), - MaxPods: to.Int32Ptr(12), - OsDiskType: containerservice.OSDiskTypeEphemeral, - }, - }, nil) - m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool", gomock.AssignableToTypeOf(containerservice.AgentPool{}), gomock.Any()).Return(nil) + expect: func(s *mock_agentpools.MockAgentPoolScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { + s.AgentPoolSpec().Return(nil) }, }, { - name: "update needed on nodepool labels change", - agentPoolsSpec: azure.AgentPoolSpec{ - Name: "my-agent-pool", - ResourceGroup: "my-rg", - Cluster: "my-cluster", - SKU: "Standard_D2s_v3", - Version: to.StringPtr("9.99.9999"), - EnableAutoScaling: to.BoolPtr(true), - MinCount: to.Int32Ptr(1), - MaxCount: to.Int32Ptr(3), - OSDiskSizeGB: 100, - MaxPods: to.Int32Ptr(12), - OsDiskType: to.StringPtr(string(containerservice.OSDiskTypeEphemeral)), - NodeLabels: map[string]*string{"workload": to.StringPtr("stateless")}, - }, - expectedError: "", - expect: func(m *mock_agentpools.MockClientMockRecorder) { - m.Get(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool").Return(containerservice.AgentPool{ - ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ - EnableAutoScaling: to.BoolPtr(true), - MinCount: to.Int32Ptr(1), - MaxCount: to.Int32Ptr(3), - OsDiskSizeGB: to.Int32Ptr(100), - VMSize: to.StringPtr(string(containerservice.VMSizeTypesStandardD2sV3)), - OsType: containerservice.OSTypeLinux, - OrchestratorVersion: to.StringPtr("9.99.9999"), - ProvisioningState: to.StringPtr("Succeeded"), - VnetSubnetID: to.StringPtr(""), - MaxPods: to.Int32Ptr(12), - OsDiskType: containerservice.OSDiskTypeEphemeral, - NodeLabels: map[string]*string{"workload": to.StringPtr("all")}, - }, - }, nil) - m.CreateOrUpdate(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool", gomock.AssignableToTypeOf(containerservice.AgentPool{}), gomock.Any()).Return(nil) + name: "fail to create a agent pool", + expectedError: internalError.Error(), + expect: func(s *mock_agentpools.MockAgentPoolScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { + s.AgentPoolSpec().Return(&fakeAgentPoolSpec) + r.CreateResource(gomockinternal.AContext(), &fakeAgentPoolSpec, serviceName).Return(nil, internalError) + s.UpdatePutStatus(infrav1.AgentPoolsReadyCondition, serviceName, internalError) }, }, } for _, tc := range testcases { - t.Logf("Testing " + tc.name) tc := tc t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) t.Parallel() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() + scopeMock := mock_agentpools.NewMockAgentPoolScope(mockCtrl) + asyncMock := mock_async.NewMockReconciler(mockCtrl) - replicas := tc.agentPoolsSpec.Replicas - osDiskSizeGB := tc.agentPoolsSpec.OSDiskSizeGB - - agentpoolsMock := mock_agentpools.NewMockClient(mockCtrl) - machinePoolScope := &scope.ManagedMachinePoolScope{ - ControlPlane: &infrav1exp.AzureManagedControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: tc.agentPoolsSpec.Cluster, - }, - Spec: infrav1exp.AzureManagedControlPlaneSpec{ - ResourceGroupName: tc.agentPoolsSpec.ResourceGroup, - }, - }, - MachinePool: &expv1.MachinePool{ - Spec: expv1.MachinePoolSpec{ - Replicas: &replicas, - Template: clusterv1.MachineTemplateSpec{ - Spec: clusterv1.MachineSpec{ - Version: tc.agentPoolsSpec.Version, - }, - }, - }, - }, - InfraMachinePool: &infrav1exp.AzureManagedMachinePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: tc.agentPoolsSpec.Name, - }, - Spec: infrav1exp.AzureManagedMachinePoolSpec{ - Name: &tc.agentPoolsSpec.Name, - SKU: tc.agentPoolsSpec.SKU, - OSDiskSizeGB: &osDiskSizeGB, - MaxPods: to.Int32Ptr(12), - OsDiskType: to.StringPtr(string(containerservice.OSDiskTypeManaged)), - }, - }, - } - - tc.expect(agentpoolsMock.EXPECT()) + tc.expect(scopeMock.EXPECT(), asyncMock.EXPECT()) s := &Service{ - Client: agentpoolsMock, - scope: machinePoolScope, + scope: scopeMock, + Reconciler: asyncMock, } err := s.Reconcile(context.TODO()) @@ -474,47 +121,33 @@ func TestNormalizedDiff(t *testing.T) { func TestDeleteAgentPools(t *testing.T) { testcases := []struct { - name string - agentPoolsSpec azure.AgentPoolSpec - expectedError string - expect func(m *mock_agentpools.MockClientMockRecorder) + name string + expectedError string + expect func(s *mock_agentpools.MockAgentPoolScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) }{ { - name: "successfully delete an existing agent pool", - agentPoolsSpec: azure.AgentPoolSpec{ - Name: "my-agent-pool", - ResourceGroup: "my-rg", - Cluster: "my-cluster", - }, + name: "existing agent pool successfully deleted", expectedError: "", - expect: func(m *mock_agentpools.MockClientMockRecorder) { - m.Delete(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool") + expect: func(s *mock_agentpools.MockAgentPoolScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { + s.AgentPoolSpec().Return(&fakeAgentPoolSpec) + r.DeleteResource(gomockinternal.AContext(), &fakeAgentPoolSpec, serviceName).Return(nil) + s.UpdateDeleteStatus(infrav1.AgentPoolsReadyCondition, serviceName, nil) }, }, { - name: "agent pool already deleted", - agentPoolsSpec: azure.AgentPoolSpec{ - Name: "my-agent-pool", - ResourceGroup: "my-rg", - Cluster: "my-cluster", - }, + name: "no agent pool spec found", expectedError: "", - expect: func(m *mock_agentpools.MockClientMockRecorder) { - m.Delete(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool"). - Return(autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 404}, "Not found")) + expect: func(s *mock_agentpools.MockAgentPoolScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { + s.AgentPoolSpec().Return(nil) }, }, { - name: "agent pool deletion fails", - agentPoolsSpec: azure.AgentPoolSpec{ - Name: "my-agent-pool", - ResourceGroup: "my-rg", - Cluster: "my-cluster", - }, - expectedError: "failed to delete agent pool my-agent-pool in resource group my-rg: #: Internal Server Error: StatusCode=500", - expect: func(m *mock_agentpools.MockClientMockRecorder) { - m.Delete(gomockinternal.AContext(), "my-rg", "my-cluster", "my-agent-pool"). - Return(autorest.NewErrorWithResponse("", "", &http.Response{StatusCode: 500}, "Internal Server Error")) + name: "fail to delete a agent pool", + expectedError: internalError.Error(), + expect: func(s *mock_agentpools.MockAgentPoolScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { + s.AgentPoolSpec().Return(&fakeAgentPoolSpec) + r.DeleteResource(gomockinternal.AContext(), &fakeAgentPoolSpec, serviceName).Return(internalError) + s.UpdateDeleteStatus(infrav1.AgentPoolsReadyCondition, serviceName, internalError) }, }, } @@ -526,33 +159,14 @@ func TestDeleteAgentPools(t *testing.T) { t.Parallel() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() + scopeMock := mock_agentpools.NewMockAgentPoolScope(mockCtrl) + asyncMock := mock_async.NewMockReconciler(mockCtrl) - agentPoolsMock := mock_agentpools.NewMockClient(mockCtrl) - machinePoolScope := &scope.ManagedMachinePoolScope{ - ControlPlane: &infrav1exp.AzureManagedControlPlane{ - ObjectMeta: metav1.ObjectMeta{ - Name: tc.agentPoolsSpec.Cluster, - }, - Spec: infrav1exp.AzureManagedControlPlaneSpec{ - ResourceGroupName: tc.agentPoolsSpec.ResourceGroup, - }, - }, - MachinePool: &expv1.MachinePool{}, - InfraMachinePool: &infrav1exp.AzureManagedMachinePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: tc.agentPoolsSpec.Name, - }, - Spec: infrav1exp.AzureManagedMachinePoolSpec{ - Name: &tc.agentPoolsSpec.Name, - }, - }, - } - - tc.expect(agentPoolsMock.EXPECT()) + tc.expect(scopeMock.EXPECT(), asyncMock.EXPECT()) s := &Service{ - Client: agentPoolsMock, - scope: machinePoolScope, + scope: scopeMock, + Reconciler: asyncMock, } err := s.Delete(context.TODO()) diff --git a/azure/services/agentpools/client.go b/azure/services/agentpools/client.go index 3418f4b245d..22d15b77a57 100644 --- a/azure/services/agentpools/client.go +++ b/azure/services/agentpools/client.go @@ -18,32 +18,27 @@ package agentpools import ( "context" + "encoding/json" "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-05-01/containerservice" "github.com/Azure/go-autorest/autorest" + azureautorest "github.com/Azure/go-autorest/autorest/azure" "github.com/pkg/errors" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) -// Client wraps go-sdk. -type Client interface { - Get(context.Context, string, string, string) (containerservice.AgentPool, error) - CreateOrUpdate(context.Context, string, string, string, containerservice.AgentPool, map[string]string) error - Delete(context.Context, string, string, string) error -} - -// AzureClient contains the Azure go-sdk Client. -type AzureClient struct { +// azureClient contains the Azure go-sdk Client. +type azureClient struct { agentpools containerservice.AgentPoolsClient } -var _ Client = &AzureClient{} - -// NewClient creates a new agent pools client from subscription ID. -func NewClient(auth azure.Authorizer) *AzureClient { +// newClient creates a new agent pools client from subscription ID. +func newClient(auth azure.Authorizer) *azureClient { c := newAgentPoolsClient(auth.SubscriptionID(), auth.BaseURI(), auth.Authorizer()) - return &AzureClient{c} + return &azureClient{c} } // newAgentPoolsClient creates a new agent pool client from subscription ID. @@ -54,50 +49,125 @@ func newAgentPoolsClient(subscriptionID string, baseURI string, authorizer autor } // Get gets an agent pool. -func (ac *AzureClient) Get(ctx context.Context, resourceGroupName, cluster, name string) (containerservice.AgentPool, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "agentpools.AzureClient.Get") +func (ac *azureClient) Get(ctx context.Context, spec azure.ResourceSpecGetter) (result interface{}, err error) { + ctx, _, done := tele.StartSpanWithLogger(ctx, "agentpools.azureClient.Get") defer done() - return ac.agentpools.Get(ctx, resourceGroupName, cluster, name) + return ac.agentpools.Get(ctx, spec.ResourceGroupName(), spec.OwnerResourceName(), spec.ResourceName()) } -// CreateOrUpdate creates or updates an agent pool. -func (ac *AzureClient) CreateOrUpdate(ctx context.Context, resourceGroupName, cluster, name string, - properties containerservice.AgentPool, customHeaders map[string]string) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "agentpools.AzureClient.CreateOrUpdate") +// CreateOrUpdateAsync creates or updates an agent pool asynchronously. +// It sends a PUT request to Azure and if accepted without error, the func will return a Future which can be used to track the ongoing +// progress of the operation. +func (ac *azureClient) CreateOrUpdateAsync(ctx context.Context, spec azure.ResourceSpecGetter, parameters interface{}) (result interface{}, future azureautorest.FutureAPI, err error) { + ctx, _, done := tele.StartSpanWithLogger(ctx, "agentpools.azureClient.CreateOrUpdate") defer done() - preparer, err := ac.agentpools.CreateOrUpdatePreparer(ctx, resourceGroupName, cluster, name, properties) + agentPool, ok := parameters.(containerservice.AgentPool) + if !ok { + return nil, nil, errors.Errorf("%T is not a containerservice.AgentPool", parameters) + } + + preparer, err := ac.agentpools.CreateOrUpdatePreparer(ctx, spec.ResourceGroupName(), spec.OwnerResourceName(), spec.ResourceName(), agentPool) if err != nil { - return errors.Wrap(err, "failed to prepare operation") + return nil, nil, errors.Wrap(err, "failed to prepare operation") + } + + headerSpec, ok := spec.(azure.ResourceSpecGetterWithHeaders) + if !ok { + return nil, nil, errors.Errorf("%T is not a azure.ResourceSpecGetterWithHeaders", spec) } - for key, element := range customHeaders { + for key, element := range headerSpec.CustomHeaders() { preparer.Header.Add(key, element) } - future, err := ac.agentpools.CreateOrUpdateSender(preparer) + createFuture, err := ac.agentpools.CreateOrUpdateSender(preparer) if err != nil { - return errors.Wrap(err, "failed to begin operation") + return nil, nil, errors.Wrap(err, "failed to begin operation") } - if err := future.WaitForCompletionRef(ctx, ac.agentpools.Client); err != nil { - return errors.Wrap(err, "failed to end operation") + + ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultAzureCallTimeout) + defer cancel() + + err = createFuture.WaitForCompletionRef(ctx, ac.agentpools.Client) + if err != nil { + // if an error occurs, return the future. + // this means the long-running operation didn't finish in the specified timeout. + return nil, &createFuture, err } - _, err = future.Result(ac.agentpools) - return err + result, err = createFuture.Result(ac.agentpools) + // if the operation completed, return a nil future + return result, nil, err } -// Delete deletes an agent pool. -func (ac *AzureClient) Delete(ctx context.Context, resourceGroupName, cluster, name string) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "agentpools.AzureClient.Delete") +// DeleteAsync deletes an agent pool asynchronously. DeleteAsync sends a DELETE +// request to Azure and if accepted without error, the func will return a Future which can be used to track the ongoing +// progress of the operation. +func (ac *azureClient) DeleteAsync(ctx context.Context, spec azure.ResourceSpecGetter) (future azureautorest.FutureAPI, err error) { + ctx, _, done := tele.StartSpanWithLogger(ctx, "agentpools.azureClient.Delete") defer done() - future, err := ac.agentpools.Delete(ctx, resourceGroupName, cluster, name) + deleteFuture, err := ac.agentpools.Delete(ctx, spec.ResourceGroupName(), spec.OwnerResourceName(), spec.ResourceName()) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultAzureCallTimeout) + defer cancel() + + err = deleteFuture.WaitForCompletionRef(ctx, ac.agentpools.Client) if err != nil { - return errors.Wrap(err, "failed to begin operation") + // if an error occurs, return the future. + // this means the long-running operation didn't finish in the specified timeout. + return &deleteFuture, err } - if err := future.WaitForCompletionRef(ctx, ac.agentpools.Client); err != nil { - return errors.Wrap(err, "failed to end operation") + _, err = deleteFuture.Result(ac.agentpools) + // if the operation completed, return a nil future. + return nil, err +} + +// IsDone returns true if the long-running operation has completed. +func (ac *azureClient) IsDone(ctx context.Context, future azureautorest.FutureAPI) (isDone bool, err error) { + ctx, _, done := tele.StartSpanWithLogger(ctx, "agentpools.azureClient.IsDone") + defer done() + + isDone, err = future.DoneWithContext(ctx, ac.agentpools) + if err != nil { + return false, errors.Wrap(err, "failed checking if the operation was complete") + } + + return isDone, nil +} + +// Result fetches the result of a long-running operation future. +func (ac *azureClient) Result(ctx context.Context, future azureautorest.FutureAPI, futureType string) (result interface{}, err error) { + _, _, done := tele.StartSpanWithLogger(ctx, "agentpools.azureClient.Result") + defer done() + + if future == nil { + return nil, errors.Errorf("cannot get result from nil future") + } + + switch futureType { + case infrav1.PutFuture: + // Marshal and Unmarshal the future to put it into the correct future type so we can access the Result function. + // Unfortunately the FutureAPI can't be casted directly to AgentPoolsCreateOrUpdateFuture because it is a azureautorest.Future, which doesn't implement the Result function. See PR #1686 for discussion on alternatives. + // It was converted back to a generic azureautorest.Future from the CAPZ infrav1.Future type stored in Status: https://github.com/kubernetes-sigs/cluster-api-provider-azure/blob/main/azure/converters/futures.go#L49. + var createFuture *containerservice.AgentPoolsCreateOrUpdateFuture + jsonData, err := future.MarshalJSON() + if err != nil { + return nil, errors.Wrap(err, "failed to marshal future") + } + if err := json.Unmarshal(jsonData, &createFuture); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal future data") + } + return createFuture.Result(ac.agentpools) + + case infrav1.DeleteFuture: + // Delete does not return a result agentPool. + return nil, nil + + default: + return nil, errors.Errorf("unknown future type %q", futureType) } - _, err = future.Result(ac.agentpools) - return err } diff --git a/azure/services/agentpools/mock_agentpools/agentpools_mock.go b/azure/services/agentpools/mock_agentpools/agentpools_mock.go index 8ad572ee46a..0366e5391ca 100644 --- a/azure/services/agentpools/mock_agentpools/agentpools_mock.go +++ b/azure/services/agentpools/mock_agentpools/agentpools_mock.go @@ -15,81 +15,416 @@ limitations under the License. */ // Code generated by MockGen. DO NOT EDIT. -// Source: ../client.go +// Source: ../agentpools.go // Package mock_agentpools is a generated GoMock package. package mock_agentpools import ( - context "context" reflect "reflect" - containerservice "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-05-01/containerservice" + autorest "github.com/Azure/go-autorest/autorest" gomock "github.com/golang/mock/gomock" + v1beta1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + azure "sigs.k8s.io/cluster-api-provider-azure/azure" + v1beta10 "sigs.k8s.io/cluster-api/api/v1beta1" ) -// MockClient is a mock of Client interface. -type MockClient struct { +// MockAgentPoolScope is a mock of AgentPoolScope interface. +type MockAgentPoolScope struct { ctrl *gomock.Controller - recorder *MockClientMockRecorder + recorder *MockAgentPoolScopeMockRecorder } -// MockClientMockRecorder is the mock recorder for MockClient. -type MockClientMockRecorder struct { - mock *MockClient +// MockAgentPoolScopeMockRecorder is the mock recorder for MockAgentPoolScope. +type MockAgentPoolScopeMockRecorder struct { + mock *MockAgentPoolScope } -// NewMockClient creates a new mock instance. -func NewMockClient(ctrl *gomock.Controller) *MockClient { - mock := &MockClient{ctrl: ctrl} - mock.recorder = &MockClientMockRecorder{mock} +// NewMockAgentPoolScope creates a new mock instance. +func NewMockAgentPoolScope(ctrl *gomock.Controller) *MockAgentPoolScope { + mock := &MockAgentPoolScope{ctrl: ctrl} + mock.recorder = &MockAgentPoolScopeMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockClient) EXPECT() *MockClientMockRecorder { +func (m *MockAgentPoolScope) EXPECT() *MockAgentPoolScopeMockRecorder { return m.recorder } -// CreateOrUpdate mocks base method. -func (m *MockClient) CreateOrUpdate(arg0 context.Context, arg1, arg2, arg3 string, arg4 containerservice.AgentPool, arg5 map[string]string) error { +// AdditionalTags mocks base method. +func (m *MockAgentPoolScope) AdditionalTags() v1beta1.Tags { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateOrUpdate", arg0, arg1, arg2, arg3, arg4, arg5) - ret0, _ := ret[0].(error) + ret := m.ctrl.Call(m, "AdditionalTags") + ret0, _ := ret[0].(v1beta1.Tags) return ret0 } -// CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockClientMockRecorder) CreateOrUpdate(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { +// AdditionalTags indicates an expected call of AdditionalTags. +func (mr *MockAgentPoolScopeMockRecorder) AdditionalTags() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockClient)(nil).CreateOrUpdate), arg0, arg1, arg2, arg3, arg4, arg5) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AdditionalTags", reflect.TypeOf((*MockAgentPoolScope)(nil).AdditionalTags)) } -// Delete mocks base method. -func (m *MockClient) Delete(arg0 context.Context, arg1, arg2, arg3 string) error { +// AgentPoolAnnotations mocks base method. +func (m *MockAgentPoolScope) AgentPoolAnnotations() map[string]string { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(error) + ret := m.ctrl.Call(m, "AgentPoolAnnotations") + ret0, _ := ret[0].(map[string]string) return ret0 } -// Delete indicates an expected call of Delete. -func (mr *MockClientMockRecorder) Delete(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +// AgentPoolAnnotations indicates an expected call of AgentPoolAnnotations. +func (mr *MockAgentPoolScopeMockRecorder) AgentPoolAnnotations() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockClient)(nil).Delete), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AgentPoolAnnotations", reflect.TypeOf((*MockAgentPoolScope)(nil).AgentPoolAnnotations)) } -// Get mocks base method. -func (m *MockClient) Get(arg0 context.Context, arg1, arg2, arg3 string) (containerservice.AgentPool, error) { +// AgentPoolSpec mocks base method. +func (m *MockAgentPoolScope) AgentPoolSpec() azure.ResourceSpecGetter { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(containerservice.AgentPool) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "AgentPoolSpec") + ret0, _ := ret[0].(azure.ResourceSpecGetter) + return ret0 +} + +// AgentPoolSpec indicates an expected call of AgentPoolSpec. +func (mr *MockAgentPoolScopeMockRecorder) AgentPoolSpec() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AgentPoolSpec", reflect.TypeOf((*MockAgentPoolScope)(nil).AgentPoolSpec)) +} + +// Authorizer mocks base method. +func (m *MockAgentPoolScope) Authorizer() autorest.Authorizer { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Authorizer") + ret0, _ := ret[0].(autorest.Authorizer) + return ret0 +} + +// Authorizer indicates an expected call of Authorizer. +func (mr *MockAgentPoolScopeMockRecorder) Authorizer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Authorizer", reflect.TypeOf((*MockAgentPoolScope)(nil).Authorizer)) +} + +// AvailabilitySetEnabled mocks base method. +func (m *MockAgentPoolScope) AvailabilitySetEnabled() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AvailabilitySetEnabled") + ret0, _ := ret[0].(bool) + return ret0 +} + +// AvailabilitySetEnabled indicates an expected call of AvailabilitySetEnabled. +func (mr *MockAgentPoolScopeMockRecorder) AvailabilitySetEnabled() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AvailabilitySetEnabled", reflect.TypeOf((*MockAgentPoolScope)(nil).AvailabilitySetEnabled)) +} + +// BaseURI mocks base method. +func (m *MockAgentPoolScope) BaseURI() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BaseURI") + ret0, _ := ret[0].(string) + return ret0 +} + +// BaseURI indicates an expected call of BaseURI. +func (mr *MockAgentPoolScopeMockRecorder) BaseURI() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BaseURI", reflect.TypeOf((*MockAgentPoolScope)(nil).BaseURI)) +} + +// ClientID mocks base method. +func (m *MockAgentPoolScope) ClientID() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientID") + ret0, _ := ret[0].(string) + return ret0 +} + +// ClientID indicates an expected call of ClientID. +func (mr *MockAgentPoolScopeMockRecorder) ClientID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientID", reflect.TypeOf((*MockAgentPoolScope)(nil).ClientID)) +} + +// ClientSecret mocks base method. +func (m *MockAgentPoolScope) ClientSecret() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientSecret") + ret0, _ := ret[0].(string) + return ret0 +} + +// ClientSecret indicates an expected call of ClientSecret. +func (mr *MockAgentPoolScopeMockRecorder) ClientSecret() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientSecret", reflect.TypeOf((*MockAgentPoolScope)(nil).ClientSecret)) +} + +// CloudEnvironment mocks base method. +func (m *MockAgentPoolScope) CloudEnvironment() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloudEnvironment") + ret0, _ := ret[0].(string) + return ret0 +} + +// CloudEnvironment indicates an expected call of CloudEnvironment. +func (mr *MockAgentPoolScopeMockRecorder) CloudEnvironment() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloudEnvironment", reflect.TypeOf((*MockAgentPoolScope)(nil).CloudEnvironment)) +} + +// CloudProviderConfigOverrides mocks base method. +func (m *MockAgentPoolScope) CloudProviderConfigOverrides() *v1beta1.CloudProviderConfigOverrides { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloudProviderConfigOverrides") + ret0, _ := ret[0].(*v1beta1.CloudProviderConfigOverrides) + return ret0 +} + +// CloudProviderConfigOverrides indicates an expected call of CloudProviderConfigOverrides. +func (mr *MockAgentPoolScopeMockRecorder) CloudProviderConfigOverrides() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloudProviderConfigOverrides", reflect.TypeOf((*MockAgentPoolScope)(nil).CloudProviderConfigOverrides)) +} + +// ClusterName mocks base method. +func (m *MockAgentPoolScope) ClusterName() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClusterName") + ret0, _ := ret[0].(string) + return ret0 +} + +// ClusterName indicates an expected call of ClusterName. +func (mr *MockAgentPoolScopeMockRecorder) ClusterName() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterName", reflect.TypeOf((*MockAgentPoolScope)(nil).ClusterName)) +} + +// DeleteLongRunningOperationState mocks base method. +func (m *MockAgentPoolScope) DeleteLongRunningOperationState(arg0, arg1 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteLongRunningOperationState", arg0, arg1) +} + +// DeleteLongRunningOperationState indicates an expected call of DeleteLongRunningOperationState. +func (mr *MockAgentPoolScopeMockRecorder) DeleteLongRunningOperationState(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLongRunningOperationState", reflect.TypeOf((*MockAgentPoolScope)(nil).DeleteLongRunningOperationState), arg0, arg1) +} + +// FailureDomains mocks base method. +func (m *MockAgentPoolScope) FailureDomains() []string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FailureDomains") + ret0, _ := ret[0].([]string) + return ret0 +} + +// FailureDomains indicates an expected call of FailureDomains. +func (mr *MockAgentPoolScopeMockRecorder) FailureDomains() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FailureDomains", reflect.TypeOf((*MockAgentPoolScope)(nil).FailureDomains)) +} + +// GetLongRunningOperationState mocks base method. +func (m *MockAgentPoolScope) GetLongRunningOperationState(arg0, arg1 string) *v1beta1.Future { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLongRunningOperationState", arg0, arg1) + ret0, _ := ret[0].(*v1beta1.Future) + return ret0 +} + +// GetLongRunningOperationState indicates an expected call of GetLongRunningOperationState. +func (mr *MockAgentPoolScopeMockRecorder) GetLongRunningOperationState(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLongRunningOperationState", reflect.TypeOf((*MockAgentPoolScope)(nil).GetLongRunningOperationState), arg0, arg1) +} + +// HashKey mocks base method. +func (m *MockAgentPoolScope) HashKey() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HashKey") + ret0, _ := ret[0].(string) + return ret0 +} + +// HashKey indicates an expected call of HashKey. +func (mr *MockAgentPoolScopeMockRecorder) HashKey() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashKey", reflect.TypeOf((*MockAgentPoolScope)(nil).HashKey)) +} + +// Location mocks base method. +func (m *MockAgentPoolScope) Location() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Location") + ret0, _ := ret[0].(string) + return ret0 +} + +// Location indicates an expected call of Location. +func (mr *MockAgentPoolScopeMockRecorder) Location() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Location", reflect.TypeOf((*MockAgentPoolScope)(nil).Location)) +} + +// Name mocks base method. +func (m *MockAgentPoolScope) Name() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Name") + ret0, _ := ret[0].(string) + return ret0 +} + +// Name indicates an expected call of Name. +func (mr *MockAgentPoolScopeMockRecorder) Name() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockAgentPoolScope)(nil).Name)) +} + +// NodeResourceGroup mocks base method. +func (m *MockAgentPoolScope) NodeResourceGroup() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NodeResourceGroup") + ret0, _ := ret[0].(string) + return ret0 +} + +// NodeResourceGroup indicates an expected call of NodeResourceGroup. +func (mr *MockAgentPoolScopeMockRecorder) NodeResourceGroup() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeResourceGroup", reflect.TypeOf((*MockAgentPoolScope)(nil).NodeResourceGroup)) +} + +// ResourceGroup mocks base method. +func (m *MockAgentPoolScope) ResourceGroup() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResourceGroup") + ret0, _ := ret[0].(string) + return ret0 +} + +// ResourceGroup indicates an expected call of ResourceGroup. +func (mr *MockAgentPoolScopeMockRecorder) ResourceGroup() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResourceGroup", reflect.TypeOf((*MockAgentPoolScope)(nil).ResourceGroup)) +} + +// SetAgentPoolProviderIDList mocks base method. +func (m *MockAgentPoolScope) SetAgentPoolProviderIDList(arg0 []string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetAgentPoolProviderIDList", arg0) +} + +// SetAgentPoolProviderIDList indicates an expected call of SetAgentPoolProviderIDList. +func (mr *MockAgentPoolScopeMockRecorder) SetAgentPoolProviderIDList(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAgentPoolProviderIDList", reflect.TypeOf((*MockAgentPoolScope)(nil).SetAgentPoolProviderIDList), arg0) +} + +// SetAgentPoolReady mocks base method. +func (m *MockAgentPoolScope) SetAgentPoolReady(arg0 bool) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetAgentPoolReady", arg0) +} + +// SetAgentPoolReady indicates an expected call of SetAgentPoolReady. +func (mr *MockAgentPoolScopeMockRecorder) SetAgentPoolReady(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAgentPoolReady", reflect.TypeOf((*MockAgentPoolScope)(nil).SetAgentPoolReady), arg0) +} + +// SetAgentPoolReplicas mocks base method. +func (m *MockAgentPoolScope) SetAgentPoolReplicas(arg0 int32) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetAgentPoolReplicas", arg0) +} + +// SetAgentPoolReplicas indicates an expected call of SetAgentPoolReplicas. +func (mr *MockAgentPoolScopeMockRecorder) SetAgentPoolReplicas(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAgentPoolReplicas", reflect.TypeOf((*MockAgentPoolScope)(nil).SetAgentPoolReplicas), arg0) +} + +// SetLongRunningOperationState mocks base method. +func (m *MockAgentPoolScope) SetLongRunningOperationState(arg0 *v1beta1.Future) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetLongRunningOperationState", arg0) +} + +// SetLongRunningOperationState indicates an expected call of SetLongRunningOperationState. +func (mr *MockAgentPoolScopeMockRecorder) SetLongRunningOperationState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLongRunningOperationState", reflect.TypeOf((*MockAgentPoolScope)(nil).SetLongRunningOperationState), arg0) +} + +// SubscriptionID mocks base method. +func (m *MockAgentPoolScope) SubscriptionID() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubscriptionID") + ret0, _ := ret[0].(string) + return ret0 +} + +// SubscriptionID indicates an expected call of SubscriptionID. +func (mr *MockAgentPoolScopeMockRecorder) SubscriptionID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscriptionID", reflect.TypeOf((*MockAgentPoolScope)(nil).SubscriptionID)) +} + +// TenantID mocks base method. +func (m *MockAgentPoolScope) TenantID() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TenantID") + ret0, _ := ret[0].(string) + return ret0 +} + +// TenantID indicates an expected call of TenantID. +func (mr *MockAgentPoolScopeMockRecorder) TenantID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantID", reflect.TypeOf((*MockAgentPoolScope)(nil).TenantID)) +} + +// UpdateDeleteStatus mocks base method. +func (m *MockAgentPoolScope) UpdateDeleteStatus(arg0 v1beta10.ConditionType, arg1 string, arg2 error) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "UpdateDeleteStatus", arg0, arg1, arg2) +} + +// UpdateDeleteStatus indicates an expected call of UpdateDeleteStatus. +func (mr *MockAgentPoolScopeMockRecorder) UpdateDeleteStatus(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateDeleteStatus", reflect.TypeOf((*MockAgentPoolScope)(nil).UpdateDeleteStatus), arg0, arg1, arg2) +} + +// UpdatePatchStatus mocks base method. +func (m *MockAgentPoolScope) UpdatePatchStatus(arg0 v1beta10.ConditionType, arg1 string, arg2 error) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "UpdatePatchStatus", arg0, arg1, arg2) +} + +// UpdatePatchStatus indicates an expected call of UpdatePatchStatus. +func (mr *MockAgentPoolScopeMockRecorder) UpdatePatchStatus(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePatchStatus", reflect.TypeOf((*MockAgentPoolScope)(nil).UpdatePatchStatus), arg0, arg1, arg2) +} + +// UpdatePutStatus mocks base method. +func (m *MockAgentPoolScope) UpdatePutStatus(arg0 v1beta10.ConditionType, arg1 string, arg2 error) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "UpdatePutStatus", arg0, arg1, arg2) } -// Get indicates an expected call of Get. -func (mr *MockClientMockRecorder) Get(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +// UpdatePutStatus indicates an expected call of UpdatePutStatus. +func (mr *MockAgentPoolScopeMockRecorder) UpdatePutStatus(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockClient)(nil).Get), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePutStatus", reflect.TypeOf((*MockAgentPoolScope)(nil).UpdatePutStatus), arg0, arg1, arg2) } diff --git a/azure/services/agentpools/mock_agentpools/doc.go b/azure/services/agentpools/mock_agentpools/doc.go index fc6a1984a50..f0f04c0961f 100644 --- a/azure/services/agentpools/mock_agentpools/doc.go +++ b/azure/services/agentpools/mock_agentpools/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // Run go generate to regenerate this mock. -//go:generate ../../../../hack/tools/bin/mockgen -destination agentpools_mock.go -package mock_agentpools -source ../client.go Client +//go:generate ../../../../hack/tools/bin/mockgen -destination agentpools_mock.go -package mock_agentpools -source ../agentpools.go AgentPoolScope //go:generate /usr/bin/env bash -c "cat ../../../../hack/boilerplate/boilerplate.generatego.txt agentpools_mock.go > _agentpools_mock.go && mv _agentpools_mock.go agentpools_mock.go" package mock_agentpools diff --git a/azure/services/agentpools/spec.go b/azure/services/agentpools/spec.go new file mode 100644 index 00000000000..830e0acf81f --- /dev/null +++ b/azure/services/agentpools/spec.go @@ -0,0 +1,204 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package agentpools + +import ( + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-05-01/containerservice" + "github.com/Azure/go-autorest/autorest/to" + "github.com/google/go-cmp/cmp" + "github.com/pkg/errors" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" +) + +// AgentPoolSpec contains agent pool specification details. +type AgentPoolSpec struct { + // Name is the name of agent pool. + Name string + + // ResourceGroup is the name of the Azure resource group for the AKS Cluster. + ResourceGroup string + + // Cluster is the name of the AKS cluster. + Cluster string + + // Version defines the desired Kubernetes version. + Version *string + + // SKU defines the Azure VM size for the agent pool VMs. + SKU string + + // Replicas is the number of desired machines. + Replicas int32 + + // OSDiskSizeGB is the OS disk size in GB for every machine in this agent pool. + OSDiskSizeGB int32 + + // VnetSubnetID is the Azure Resource ID for the subnet which should contain nodes. + VnetSubnetID string + + // Mode represents mode of an agent pool. Possible values include: 'System', 'User'. + Mode string + + // Maximum number of nodes for auto-scaling + MaxCount *int32 `json:"maxCount,omitempty"` + + // Minimum number of nodes for auto-scaling + MinCount *int32 `json:"minCount,omitempty"` + + // Node labels - labels for all of the nodes present in node pool + NodeLabels map[string]*string `json:"nodeLabels,omitempty"` + + // NodeTaints specifies the taints for nodes present in this agent pool. + NodeTaints []string `json:"nodeTaints,omitempty"` + + // EnableAutoScaling - Whether to enable auto-scaler + EnableAutoScaling *bool `json:"enableAutoScaling,omitempty"` + + // AvailabilityZones represents the Availability zones for nodes in the AgentPool. + AvailabilityZones []string + + // MaxPods specifies the kubelet --max-pods configuration for the agent pool. + MaxPods *int32 `json:"maxPods,omitempty"` + + // OsDiskType specifies the OS disk type for each node in the pool. Allowed values are 'Ephemeral' and 'Managed'. + OsDiskType *string `json:"osDiskType,omitempty"` + + // EnableUltraSSD enables the storage type UltraSSD_LRS for the agent pool. + EnableUltraSSD *bool `json:"enableUltraSSD,omitempty"` + + // OSType specifies the operating system for the node pool. Allowed values are 'Linux' and 'Windows' + OSType *string `json:"osType,omitempty"` + + // Headers is the list of headers to add to the HTTP requests to update this resource. + Headers map[string]string +} + +// ResourceName returns the name of the agent pool. +func (s *AgentPoolSpec) ResourceName() string { + return s.Name +} + +// ResourceGroupName returns the name of the resource group. +func (s *AgentPoolSpec) ResourceGroupName() string { + return s.ResourceGroup +} + +// OwnerResourceName is a no-op for agent pools. +func (s *AgentPoolSpec) OwnerResourceName() string { + return s.Cluster +} + +// CustomHeaders returns custom headers to be added to the Azure API calls. +func (s *AgentPoolSpec) CustomHeaders() map[string]string { + return s.Headers +} + +// Parameters returns the parameters for the agent pool. +func (s *AgentPoolSpec) Parameters(existing interface{}) (params interface{}, err error) { + if existing != nil { + existingPool, ok := existing.(containerservice.AgentPool) + if !ok { + return nil, errors.Errorf("%T is not a containerservice.AgentPool", existing) + } + + // agent pool already exists + ps := *existingPool.ManagedClusterAgentPoolProfileProperties.ProvisioningState + if ps != string(infrav1.Canceled) && ps != string(infrav1.Failed) && ps != string(infrav1.Succeeded) { + msg := fmt.Sprintf("Unable to update existing agent pool in non terminal state. Agent pool must be in one of the following provisioning states: Canceled, Failed, or Succeeded. Actual state: %s", ps) + return nil, azure.WithTransientError(errors.New(msg), 20*time.Second) + } + + // Normalize individual agent pools to diff in case we need to update + existingProfile := containerservice.AgentPool{ + ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ + Count: existingPool.Count, + OrchestratorVersion: existingPool.OrchestratorVersion, + Mode: existingPool.Mode, + EnableAutoScaling: existingPool.EnableAutoScaling, + MinCount: existingPool.MinCount, + MaxCount: existingPool.MaxCount, + NodeLabels: existingPool.NodeLabels, + }, + } + + normalizedProfile := containerservice.AgentPool{ + ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ + Count: &s.Replicas, + OrchestratorVersion: s.Version, + Mode: containerservice.AgentPoolMode(s.Mode), + EnableAutoScaling: s.EnableAutoScaling, + MinCount: s.MinCount, + MaxCount: s.MaxCount, + NodeLabels: s.NodeLabels, + }, + } + + // Compute a diff to check if we require an update + diff := cmp.Diff(normalizedProfile, existingProfile) + if diff == "" { + // agent pool is up to date, nothing to do + return nil, nil + } + } + + var availabilityZones *[]string + if len(s.AvailabilityZones) > 0 { + availabilityZones = &s.AvailabilityZones + } + var replicas *int32 + if s.Replicas > 0 { + replicas = &s.Replicas + } + var nodeTaints *[]string + if len(s.NodeTaints) > 0 { + nodeTaints = &s.NodeTaints + } + var sku *string + if s.SKU != "" { + sku = &s.SKU + } + var vnetSubnetID *string + if s.VnetSubnetID != "" { + vnetSubnetID = &s.VnetSubnetID + } + + return containerservice.AgentPool{ + ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ + AvailabilityZones: availabilityZones, + Count: replicas, + EnableAutoScaling: s.EnableAutoScaling, + EnableUltraSSD: s.EnableUltraSSD, + MaxCount: s.MaxCount, + MaxPods: s.MaxPods, + MinCount: s.MinCount, + Mode: containerservice.AgentPoolMode(s.Mode), + NodeLabels: s.NodeLabels, + NodeTaints: nodeTaints, + OrchestratorVersion: s.Version, + OsDiskSizeGB: &s.OSDiskSizeGB, + OsDiskType: containerservice.OSDiskType(to.String(s.OsDiskType)), + OsType: containerservice.OSType(to.String(s.OSType)), + Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, + VMSize: sku, + VnetSubnetID: vnetSubnetID, + }, + }, nil +} diff --git a/azure/services/agentpools/spec_test.go b/azure/services/agentpools/spec_test.go new file mode 100644 index 00000000000..c27d979a431 --- /dev/null +++ b/azure/services/agentpools/spec_test.go @@ -0,0 +1,344 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package agentpools + +import ( + "reflect" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-05-01/containerservice" + "github.com/Azure/go-autorest/autorest/to" + "github.com/google/go-cmp/cmp" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + "sigs.k8s.io/cluster-api-provider-azure/azure" +) + +var ( + fakeAgentPoolSpec1 = AgentPoolSpec{ + Name: "fake-agent-pool-name", + ResourceGroup: "fake-rg", + Cluster: "fake-cluster", + AvailabilityZones: []string{"fake-zone"}, + EnableAutoScaling: to.BoolPtr(true), + EnableUltraSSD: to.BoolPtr(true), + MaxCount: to.Int32Ptr(5), + MaxPods: to.Int32Ptr(10), + MinCount: to.Int32Ptr(1), + Mode: "fake-mode", + NodeLabels: map[string]*string{"fake-label": to.StringPtr("fake-value")}, + NodeTaints: []string{"fake-taint"}, + OSDiskSizeGB: 2, + OsDiskType: to.StringPtr("fake-os-disk-type"), + OSType: to.StringPtr("fake-os-type"), + Replicas: 1, + SKU: "fake-sku", + Version: to.StringPtr("fake-version"), + VnetSubnetID: "fake-vnet-subnet-id", + Headers: map[string]string{"fake-header": "fake-value"}, + } + + fakeAgentPoolCountOutOfDate = containerservice.AgentPool{ + ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ + AvailabilityZones: &[]string{"fake-zone"}, + Count: to.Int32Ptr(2), // updates if changed + EnableAutoScaling: to.BoolPtr(true), // updates if changed + EnableUltraSSD: to.BoolPtr(true), + MaxCount: to.Int32Ptr(5), // updates if changed + MaxPods: to.Int32Ptr(10), + MinCount: to.Int32Ptr(1), // updates if changed + Mode: containerservice.AgentPoolMode("fake-mode"), // updates if changed + NodeLabels: map[string]*string{"fake-label": to.StringPtr("fake-value")}, // updates if changed + NodeTaints: &[]string{"fake-taint"}, + OrchestratorVersion: to.StringPtr("fake-version"), // updates if changed + OsDiskSizeGB: to.Int32Ptr(2), + OsDiskType: containerservice.OSDiskType("fake-os-disk-type"), + OsType: containerservice.OSType("fake-os-type"), + ProvisioningState: to.StringPtr("Succeeded"), + Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, + VMSize: to.StringPtr("fake-sku"), + VnetSubnetID: to.StringPtr("fake-vnet-subnet-id"), + }, + } + + fakeAgentPoolAutoScalingOutOfDate = containerservice.AgentPool{ + ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ + AvailabilityZones: &[]string{"fake-zone"}, + Count: to.Int32Ptr(1), // updates if changed + EnableAutoScaling: to.BoolPtr(false), // updates if changed + EnableUltraSSD: to.BoolPtr(true), + MaxCount: to.Int32Ptr(5), // updates if changed + MaxPods: to.Int32Ptr(10), + MinCount: to.Int32Ptr(1), // updates if changed + Mode: containerservice.AgentPoolMode("fake-mode"), // updates if changed + NodeLabels: map[string]*string{"fake-label": to.StringPtr("fake-value")}, // updates if changed + NodeTaints: &[]string{"fake-taint"}, + OrchestratorVersion: to.StringPtr("fake-version"), // updates if changed + OsDiskSizeGB: to.Int32Ptr(2), + OsDiskType: containerservice.OSDiskType("fake-os-disk-type"), + OsType: containerservice.OSType("fake-os-type"), + ProvisioningState: to.StringPtr("Succeeded"), + Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, + VMSize: to.StringPtr("fake-sku"), + VnetSubnetID: to.StringPtr("fake-vnet-subnet-id"), + }, + } + + fakeAgentPoolMaxCountOutOfDate = containerservice.AgentPool{ + ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ + AvailabilityZones: &[]string{"fake-zone"}, + Count: to.Int32Ptr(1), // updates if changed + EnableAutoScaling: to.BoolPtr(true), // updates if changed + EnableUltraSSD: to.BoolPtr(true), + MaxCount: to.Int32Ptr(3), // updates if changed + MaxPods: to.Int32Ptr(10), + MinCount: to.Int32Ptr(1), // updates if changed + Mode: containerservice.AgentPoolMode("fake-mode"), // updates if changed + NodeLabels: map[string]*string{"fake-label": to.StringPtr("fake-value")}, // updates if changed + NodeTaints: &[]string{"fake-taint"}, + OrchestratorVersion: to.StringPtr("fake-version"), // updates if changed + OsDiskSizeGB: to.Int32Ptr(2), + OsDiskType: containerservice.OSDiskType("fake-os-disk-type"), + OsType: containerservice.OSType("fake-os-type"), + ProvisioningState: to.StringPtr("Succeeded"), + Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, + VMSize: to.StringPtr("fake-sku"), + VnetSubnetID: to.StringPtr("fake-vnet-subnet-id"), + }, + } + + fakeAgentPoolMinCountOutOfDate = containerservice.AgentPool{ + ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ + AvailabilityZones: &[]string{"fake-zone"}, + Count: to.Int32Ptr(1), // updates if changed + EnableAutoScaling: to.BoolPtr(true), // updates if changed + EnableUltraSSD: to.BoolPtr(true), + MaxCount: to.Int32Ptr(5), // updates if changed + MaxPods: to.Int32Ptr(10), + MinCount: to.Int32Ptr(3), // updates if changed + Mode: containerservice.AgentPoolMode("fake-mode"), // updates if changed + NodeLabels: map[string]*string{"fake-label": to.StringPtr("fake-value")}, // updates if changed + NodeTaints: &[]string{"fake-taint"}, + OrchestratorVersion: to.StringPtr("fake-version"), // updates if changed + OsDiskSizeGB: to.Int32Ptr(2), + OsDiskType: containerservice.OSDiskType("fake-os-disk-type"), + OsType: containerservice.OSType("fake-os-type"), + ProvisioningState: to.StringPtr("Succeeded"), + Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, + VMSize: to.StringPtr("fake-sku"), + VnetSubnetID: to.StringPtr("fake-vnet-subnet-id"), + }, + } + + fakeAgentPoolModeOutOfDate = containerservice.AgentPool{ + ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ + AvailabilityZones: &[]string{"fake-zone"}, + Count: to.Int32Ptr(1), // updates if changed + EnableAutoScaling: to.BoolPtr(true), // updates if changed + EnableUltraSSD: to.BoolPtr(true), + MaxCount: to.Int32Ptr(5), // updates if changed + MaxPods: to.Int32Ptr(10), + MinCount: to.Int32Ptr(3), // updates if changed + Mode: containerservice.AgentPoolMode("fake-old-mode"), // updates if changed + NodeLabels: map[string]*string{"fake-label": to.StringPtr("fake-value")}, // updates if changed + NodeTaints: &[]string{"fake-taint"}, + OrchestratorVersion: to.StringPtr("fake-version"), // updates if changed + OsDiskSizeGB: to.Int32Ptr(2), + OsDiskType: containerservice.OSDiskType("fake-os-disk-type"), + OsType: containerservice.OSType("fake-os-type"), + ProvisioningState: to.StringPtr("Succeeded"), + Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, + VMSize: to.StringPtr("fake-sku"), + VnetSubnetID: to.StringPtr("fake-vnet-subnet-id"), + }, + } + + fakeAgentPoolNodeLabelsOutOfDate = containerservice.AgentPool{ + ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ + AvailabilityZones: &[]string{"fake-zone"}, + Count: to.Int32Ptr(1), // updates if changed + EnableAutoScaling: to.BoolPtr(true), // updates if changed + EnableUltraSSD: to.BoolPtr(true), + MaxCount: to.Int32Ptr(5), // updates if changed + MaxPods: to.Int32Ptr(10), + MinCount: to.Int32Ptr(3), // updates if changed + Mode: containerservice.AgentPoolMode("fake-old-mode"), // updates if changed + NodeLabels: map[string]*string{ + "fake-label": to.StringPtr("fake-value"), + "fake-old-label": to.StringPtr("fake-old-value")}, // updates if changed + NodeTaints: &[]string{"fake-taint"}, + OrchestratorVersion: to.StringPtr("fake-version"), // updates if changed + OsDiskSizeGB: to.Int32Ptr(2), + OsDiskType: containerservice.OSDiskType("fake-os-disk-type"), + OsType: containerservice.OSType("fake-os-type"), + ProvisioningState: to.StringPtr("Succeeded"), + Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, + VMSize: to.StringPtr("fake-sku"), + VnetSubnetID: to.StringPtr("fake-vnet-subnet-id"), + }, + } +) + +func fakeAgentPoolWithProvisioningState(provisioningState string) containerservice.AgentPool { + var state *string + if provisioningState != "" { + state = to.StringPtr(provisioningState) + } + return containerservice.AgentPool{ + ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{ + AvailabilityZones: &[]string{"fake-zone"}, + Count: to.Int32Ptr(1), + EnableAutoScaling: to.BoolPtr(true), + EnableUltraSSD: to.BoolPtr(true), + MaxCount: to.Int32Ptr(5), + MaxPods: to.Int32Ptr(10), + MinCount: to.Int32Ptr(1), + Mode: containerservice.AgentPoolMode("fake-mode"), + NodeLabels: map[string]*string{"fake-label": to.StringPtr("fake-value")}, + NodeTaints: &[]string{"fake-taint"}, + OrchestratorVersion: to.StringPtr("fake-version"), + OsDiskSizeGB: to.Int32Ptr(2), + OsDiskType: containerservice.OSDiskType("fake-os-disk-type"), + OsType: containerservice.OSType("fake-os-type"), + ProvisioningState: state, + Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, + VMSize: to.StringPtr("fake-sku"), + VnetSubnetID: to.StringPtr("fake-vnet-subnet-id"), + }, + } +} + +func TestParameters(t *testing.T) { + testcases := []struct { + name string + spec AgentPoolSpec + existing interface{} + expected interface{} + expectedError error + }{ + { + name: "parameters without an existing agent pool", + spec: fakeAgentPoolSpec1, + existing: nil, + expected: fakeAgentPoolWithProvisioningState(""), + expectedError: nil, + }, + { + name: "existing agent pool up to date with provisioning state `Succeeded` without error", + spec: fakeAgentPoolSpec1, + existing: fakeAgentPoolWithProvisioningState("Succeeded"), + expected: nil, + expectedError: nil, + }, + { + name: "existing agent pool up to date with provisioning state `Canceled` without error", + spec: fakeAgentPoolSpec1, + existing: fakeAgentPoolWithProvisioningState("Canceled"), + expected: nil, + expectedError: nil, + }, + { + name: "existing agent pool up to date with provisioning state `Failed` without error", + spec: fakeAgentPoolSpec1, + existing: fakeAgentPoolWithProvisioningState("Failed"), + expected: nil, + expectedError: nil, + }, + { + name: "existing agent pool up to date with non-terminal provisioning state `Deleting`", + spec: fakeAgentPoolSpec1, + existing: fakeAgentPoolWithProvisioningState("Deleting"), + expected: nil, + expectedError: azure.WithTransientError(errors.New("Unable to update existing agent pool in non terminal state. Agent pool must be in one of the following provisioning states: Canceled, Failed, or Succeeded. Actual state: Deleting"), 20*time.Second), + }, + { + name: "existing agent pool up to date with non-terminal provisioning state `InProgress`", + spec: fakeAgentPoolSpec1, + existing: fakeAgentPoolWithProvisioningState("InProgress"), + expected: nil, + expectedError: azure.WithTransientError(errors.New("Unable to update existing agent pool in non terminal state. Agent pool must be in one of the following provisioning states: Canceled, Failed, or Succeeded. Actual state: InProgress"), 20*time.Second), + }, + { + name: "existing agent pool up to date with non-terminal provisioning state `randomString`", + spec: fakeAgentPoolSpec1, + existing: fakeAgentPoolWithProvisioningState("randomString"), + expected: nil, + expectedError: azure.WithTransientError(errors.New("Unable to update existing agent pool in non terminal state. Agent pool must be in one of the following provisioning states: Canceled, Failed, or Succeeded. Actual state: randomString"), 20*time.Second), + }, + { + name: "parameters with an existing agent pool and update needed on count", + spec: fakeAgentPoolSpec1, + existing: fakeAgentPoolCountOutOfDate, + expected: fakeAgentPoolWithProvisioningState(""), + expectedError: nil, + }, + { + name: "parameters with an existing agent pool and update needed on autoscaling", + spec: fakeAgentPoolSpec1, + existing: fakeAgentPoolAutoScalingOutOfDate, + expected: fakeAgentPoolWithProvisioningState(""), + expectedError: nil, + }, + { + name: "parameters with an existing agent pool and update needed on max count", + spec: fakeAgentPoolSpec1, + existing: fakeAgentPoolMaxCountOutOfDate, + expected: fakeAgentPoolWithProvisioningState(""), + expectedError: nil, + }, + { + name: "parameters with an existing agent pool and update needed on min count", + spec: fakeAgentPoolSpec1, + existing: fakeAgentPoolMinCountOutOfDate, + expected: fakeAgentPoolWithProvisioningState(""), + expectedError: nil, + }, + { + name: "parameters with an existing agent pool and update needed on mode", + spec: fakeAgentPoolSpec1, + existing: fakeAgentPoolModeOutOfDate, + expected: fakeAgentPoolWithProvisioningState(""), + expectedError: nil, + }, + { + name: "parameters with an existing agent pool and update needed on node labels", + spec: fakeAgentPoolSpec1, + existing: fakeAgentPoolNodeLabelsOutOfDate, + expected: fakeAgentPoolWithProvisioningState(""), + expectedError: nil, + }, + } + for _, tc := range testcases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + t.Parallel() + + result, err := tc.spec.Parameters(tc.existing) + if tc.expectedError != nil { + g.Expect(err).To(HaveOccurred()) + g.Expect(err).To(MatchError(tc.expectedError)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + if !reflect.DeepEqual(result, tc.expected) { + t.Errorf("Got difference between expected result and computed result:\n%s", cmp.Diff(tc.expected, result)) + } + }) + } +} diff --git a/azure/services/managedclusters/spec.go b/azure/services/managedclusters/spec.go index 6214c46387e..2295e3760a8 100644 --- a/azure/services/managedclusters/spec.go +++ b/azure/services/managedclusters/spec.go @@ -67,7 +67,7 @@ type ManagedClusterSpec struct { SSHPublicKey string // GetAllAgentPools is a function that returns the list of agent pool specifications in this cluster. - GetAllAgentPools func() ([]azure.AgentPoolSpec, error) + GetAllAgentPools func() ([]azure.ResourceSpecGetter, error) // PodCIDR is the CIDR block for IP addresses distributed to pods PodCIDR string @@ -338,8 +338,17 @@ func (s *ManagedClusterSpec) Parameters(existing interface{}) (params interface{ return nil, errors.Wrapf(err, "failed to get agent pool specs for managed cluster %s", s.Name) } - for i := range agentPoolSpecs { - profile := converters.AgentPoolToManagedClusterAgentPoolProfile(agentPoolSpecs[i]) + for _, spec := range agentPoolSpecs { + params, err := spec.Parameters(nil) + if err != nil { + return nil, errors.Wrapf(err, "failed to get agent pool parameters for managed cluster %s", s.Name) + } + agentPool, ok := params.(containerservice.AgentPool) + if !ok { + return nil, fmt.Errorf("%T is not a containerservice.AgentPool", agentPool) + } + agentPool.Name = to.StringPtr(spec.ResourceName()) + profile := converters.AgentPoolToManagedClusterAgentPoolProfile(agentPool) *managedCluster.AgentPoolProfiles = append(*managedCluster.AgentPoolProfiles, profile) } } diff --git a/azure/services/managedclusters/spec_test.go b/azure/services/managedclusters/spec_test.go index e6256e900a0..784c1c79b38 100644 --- a/azure/services/managedclusters/spec_test.go +++ b/azure/services/managedclusters/spec_test.go @@ -24,7 +24,7 @@ import ( "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" "sigs.k8s.io/cluster-api-provider-azure/azure" - "sigs.k8s.io/cluster-api-provider-azure/azure/converters" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/agentpools" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" ) @@ -65,15 +65,15 @@ func TestParameters(t *testing.T) { }, Version: "v1.22.0", LoadBalancerSKU: "Standard", - GetAllAgentPools: func() ([]azure.AgentPoolSpec, error) { - return []azure.AgentPoolSpec{ - { + GetAllAgentPools: func() ([]azure.ResourceSpecGetter, error) { + return []azure.ResourceSpecGetter{ + &agentpools.AgentPoolSpec{ Name: "test-agentpool-0", Mode: string(infrav1exp.NodePoolModeSystem), ResourceGroup: "test-rg", Replicas: int32(2), }, - { + &agentpools.AgentPoolSpec{ Name: "test-agentpool-1", Mode: string(infrav1exp.NodePoolModeUser), ResourceGroup: "test-rg", @@ -160,24 +160,25 @@ func getSampleManagedCluster() containerservice.ManagedCluster { KubernetesVersion: to.StringPtr("v1.22.0"), DNSPrefix: to.StringPtr("test-managedcluster"), AgentPoolProfiles: &[]containerservice.ManagedClusterAgentPoolProfile{ - converters.AgentPoolToManagedClusterAgentPoolProfile(azure.AgentPoolSpec{ - Name: "test-agentpool-0", - Mode: string(infrav1exp.NodePoolModeSystem), - ResourceGroup: "test-rg", - Replicas: int32(2), - }), - converters.AgentPoolToManagedClusterAgentPoolProfile(azure.AgentPoolSpec{ - Name: "test-agentpool-1", - Mode: string(infrav1exp.NodePoolModeUser), - ResourceGroup: "test-rg", - Replicas: int32(4), - Cluster: "test-managedcluster", - SKU: "test_SKU", - Version: to.StringPtr("v1.22.0"), - VnetSubnetID: "fake/subnet/id", - MaxPods: to.Int32Ptr(int32(32)), - AvailabilityZones: []string{"1", "2"}, - }), + { + Name: to.StringPtr("test-agentpool-0"), + Mode: containerservice.AgentPoolMode(infrav1exp.NodePoolModeSystem), + Count: to.Int32Ptr(2), + Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, + OsDiskSizeGB: to.Int32Ptr(0), + }, + { + Name: to.StringPtr("test-agentpool-1"), + Mode: containerservice.AgentPoolMode(infrav1exp.NodePoolModeUser), + Count: to.Int32Ptr(4), + Type: containerservice.AgentPoolTypeVirtualMachineScaleSets, + OsDiskSizeGB: to.Int32Ptr(0), + VMSize: to.StringPtr("test_SKU"), + OrchestratorVersion: to.StringPtr("v1.22.0"), + VnetSubnetID: to.StringPtr("fake/subnet/id"), + MaxPods: to.Int32Ptr(int32(32)), + AvailabilityZones: &[]string{"1", "2"}, + }, }, LinuxProfile: &containerservice.LinuxProfile{ AdminUsername: to.StringPtr(azure.DefaultAKSUserName), diff --git a/azure/types.go b/azure/types.go index 6bfb2655dad..df20d352657 100644 --- a/azure/types.go +++ b/azure/types.go @@ -43,66 +43,6 @@ const ( VirtualMachineScaleSet = "VirtualMachineScaleSet" ) -// AgentPoolSpec contains agent pool specification details. -type AgentPoolSpec struct { - // Name is the name of agent pool. - Name string - - // ResourceGroup is the name of the Azure resource group for the AKS Cluster. - ResourceGroup string - - // Cluster is the name of the AKS cluster. - Cluster string - - // Version defines the desired Kubernetes version. - Version *string - - // SKU defines the Azure VM size for the agent pool VMs. - SKU string - - // Replicas is the number of desired machines. - Replicas int32 - - // OSDiskSizeGB is the OS disk size in GB for every machine in this agent pool. - OSDiskSizeGB int32 - - // VnetSubnetID is the Azure Resource ID for the subnet which should contain nodes. - VnetSubnetID string - - // Mode represents mode of an agent pool. Possible values include: 'System', 'User'. - Mode string - - // Maximum number of nodes for auto-scaling - MaxCount *int32 `json:"maxCount,omitempty"` - - // Minimum number of nodes for auto-scaling - MinCount *int32 `json:"minCount,omitempty"` - - // Node labels - labels for all of the nodes present in node pool - NodeLabels map[string]*string `json:"nodeLabels,omitempty"` - - // NodeTaints specifies the taints for nodes present in this agent pool. - NodeTaints []string `json:"nodeTaints,omitempty"` - - // EnableAutoScaling - Whether to enable auto-scaler - EnableAutoScaling *bool `json:"enableAutoScaling,omitempty"` - - // AvailabilityZones represents the Availability zones for nodes in the AgentPool. - AvailabilityZones []string - - // MaxPods specifies the kubelet --max-pods configuration for the agent pool. - MaxPods *int32 `json:"maxPods,omitempty"` - - // OsDiskType specifies the OS disk type for each node in the pool. Allowed values are 'Ephemeral' and 'Managed'. - OsDiskType *string `json:"osDiskType,omitempty"` - - // EnableUltraSSD enables the storage type UltraSSD_LRS for the agent pool. - EnableUltraSSD *bool `json:"enableUltraSSD,omitempty"` - - // OSType specifies the operating system for the node pool. Allowed values are 'Linux' and 'Windows' - OSType *string `json:"osType,omitempty"` -} - // ScaleSetSpec defines the specification for a Scale Set. type ScaleSetSpec struct { Name string diff --git a/exp/controllers/azuremanagedmachinepool_reconciler.go b/exp/controllers/azuremanagedmachinepool_reconciler.go index 67fd56bbc3e..f4ff58d7b21 100644 --- a/exp/controllers/azuremanagedmachinepool_reconciler.go +++ b/exp/controllers/azuremanagedmachinepool_reconciler.go @@ -34,7 +34,7 @@ import ( type ( // azureManagedMachinePoolService contains the services required by the cluster controller. azureManagedMachinePoolService struct { - scope agentpools.ManagedMachinePoolScope + scope agentpools.AgentPoolScope agentPoolsSvc azure.Reconciler scaleSetsSvc NodeLister } @@ -96,7 +96,7 @@ func (s *azureManagedMachinePoolService) Reconcile(ctx context.Context) error { defer done() log.Info("reconciling managed machine pool") - agentPoolName := s.scope.AgentPoolSpec().Name + agentPoolName := s.scope.Name() if err := s.agentPoolsSvc.Reconcile(ctx); err != nil { return errors.Wrapf(err, "failed to reconcile machine pool %s", agentPoolName) @@ -155,7 +155,7 @@ func (s *azureManagedMachinePoolService) Delete(ctx context.Context) error { defer done() if err := s.agentPoolsSvc.Delete(ctx); err != nil { - return errors.Wrapf(err, "failed to delete machine pool %s", s.scope.AgentPoolSpec().Name) + return errors.Wrapf(err, "failed to delete machine pool %s", s.scope.Name()) } return nil