From 951e35fce97be01f4f10b068059afa89aff56f32 Mon Sep 17 00:00:00 2001 From: Cecile Robert-Michon Date: Wed, 2 Mar 2022 16:05:12 -0800 Subject: [PATCH 1/2] Use a list for Azure services in AzureCluster and AzureMachine reconcilers --- azure/interfaces.go | 10 +- azure/mock_azure/azure_mock.go | 51 +++-- azure/services/agentpools/agentpools.go | 7 + .../availabilitysets/availabilitysets.go | 5 + azure/services/bastionhosts/bastionhosts.go | 5 + azure/services/disks/disks.go | 5 + azure/services/groups/groups.go | 19 +- azure/services/groups/groups_test.go | 20 +- .../inboundnatrules/inboundnatrules.go | 5 + azure/services/loadbalancers/loadbalancers.go | 5 + .../managedclusters/managedclusters.go | 7 + azure/services/natgateways/natgateways.go | 5 + .../networkinterfaces/networkinterfaces.go | 5 + azure/services/privatedns/privatedns.go | 7 + azure/services/publicips/publicips.go | 7 + .../roleassignments/roleassignments.go | 10 +- azure/services/routetables/routetables.go | 5 + azure/services/scalesets/scalesets.go | 11 +- azure/services/scalesets/scalesets_test.go | 2 +- azure/services/scalesetvms/scalesetvms.go | 5 + .../services/securitygroups/securitygroups.go | 5 + azure/services/subnets/subnets.go | 5 + azure/services/tags/tags.go | 7 + .../virtualmachines/virtualmachines.go | 5 + .../virtualnetworks/virtualnetworks.go | 5 + azure/services/vmextensions/vmextensions.go | 7 + .../services/vmssextensions/vmssextensions.go | 7 + azure/services/vnetpeerings/vnetpeerings.go | 5 + controllers/azurecluster_reconciler.go | 161 +++++----------- controllers/azurecluster_reconciler_test.go | 160 +++++++++------- controllers/azuremachine_reconciler.go | 103 +++------- controllers/azuremachine_reconciler_test.go | 172 +++++++++++++++++ .../azuremachinepool_controller_unit_test.go | 2 +- .../azuremachinepool_reconciler.go | 44 ++--- .../azuremachinepool_reconciler_test.go | 176 ++++++++++++++++++ .../azuremanagedcontrolplane_reconciler.go | 64 +++---- 36 files changed, 746 insertions(+), 378 deletions(-) create mode 100644 controllers/azuremachine_reconciler_test.go create mode 100644 exp/controllers/azuremachinepool_reconciler_test.go diff --git a/azure/interfaces.go b/azure/interfaces.go index 345d7e15994..a10fe0ab6c5 100644 --- a/azure/interfaces.go +++ b/azure/interfaces.go @@ -24,18 +24,16 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) -// Reconciler is a generic interface used by components offering a type of service. -// Example: virtualnetworks service would offer Reconcile/Delete methods. +// Reconciler is a generic interface for a controller reconciler which has Reconcile and Delete methods. type Reconciler interface { Reconcile(ctx context.Context) error Delete(ctx context.Context) error } -// CredentialGetter is a Service which knows how to retrieve credentials for an Azure -// resource in a resource group. -type CredentialGetter interface { +// ServiceReconciler is an Azure service reconciler which can reconcile an Azure service. +type ServiceReconciler interface { + Name() string Reconciler - GetCredentials(ctx context.Context, group string, cluster string) ([]byte, error) } // Authorizer is an interface which can get the subscription ID, base URI, and authorizer for an Azure service. diff --git a/azure/mock_azure/azure_mock.go b/azure/mock_azure/azure_mock.go index ba7053081b1..8c853e2faa0 100644 --- a/azure/mock_azure/azure_mock.go +++ b/azure/mock_azure/azure_mock.go @@ -81,31 +81,31 @@ func (mr *MockReconcilerMockRecorder) Reconcile(ctx interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reconcile", reflect.TypeOf((*MockReconciler)(nil).Reconcile), ctx) } -// MockCredentialGetter is a mock of CredentialGetter interface. -type MockCredentialGetter struct { +// MockServiceReconciler is a mock of ServiceReconciler interface. +type MockServiceReconciler struct { ctrl *gomock.Controller - recorder *MockCredentialGetterMockRecorder + recorder *MockServiceReconcilerMockRecorder } -// MockCredentialGetterMockRecorder is the mock recorder for MockCredentialGetter. -type MockCredentialGetterMockRecorder struct { - mock *MockCredentialGetter +// MockServiceReconcilerMockRecorder is the mock recorder for MockServiceReconciler. +type MockServiceReconcilerMockRecorder struct { + mock *MockServiceReconciler } -// NewMockCredentialGetter creates a new mock instance. -func NewMockCredentialGetter(ctrl *gomock.Controller) *MockCredentialGetter { - mock := &MockCredentialGetter{ctrl: ctrl} - mock.recorder = &MockCredentialGetterMockRecorder{mock} +// NewMockServiceReconciler creates a new mock instance. +func NewMockServiceReconciler(ctrl *gomock.Controller) *MockServiceReconciler { + mock := &MockServiceReconciler{ctrl: ctrl} + mock.recorder = &MockServiceReconcilerMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCredentialGetter) EXPECT() *MockCredentialGetterMockRecorder { +func (m *MockServiceReconciler) EXPECT() *MockServiceReconcilerMockRecorder { return m.recorder } // Delete mocks base method. -func (m *MockCredentialGetter) Delete(ctx context.Context) error { +func (m *MockServiceReconciler) Delete(ctx context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Delete", ctx) ret0, _ := ret[0].(error) @@ -113,28 +113,27 @@ func (m *MockCredentialGetter) Delete(ctx context.Context) error { } // Delete indicates an expected call of Delete. -func (mr *MockCredentialGetterMockRecorder) Delete(ctx interface{}) *gomock.Call { +func (mr *MockServiceReconcilerMockRecorder) Delete(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockCredentialGetter)(nil).Delete), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockServiceReconciler)(nil).Delete), ctx) } -// GetCredentials mocks base method. -func (m *MockCredentialGetter) GetCredentials(ctx context.Context, group, cluster string) ([]byte, error) { +// Name mocks base method. +func (m *MockServiceReconciler) Name() string { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCredentials", ctx, group, cluster) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "Name") + ret0, _ := ret[0].(string) + return ret0 } -// GetCredentials indicates an expected call of GetCredentials. -func (mr *MockCredentialGetterMockRecorder) GetCredentials(ctx, group, cluster interface{}) *gomock.Call { +// Name indicates an expected call of Name. +func (mr *MockServiceReconcilerMockRecorder) Name() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCredentials", reflect.TypeOf((*MockCredentialGetter)(nil).GetCredentials), ctx, group, cluster) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockServiceReconciler)(nil).Name)) } // Reconcile mocks base method. -func (m *MockCredentialGetter) Reconcile(ctx context.Context) error { +func (m *MockServiceReconciler) Reconcile(ctx context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Reconcile", ctx) ret0, _ := ret[0].(error) @@ -142,9 +141,9 @@ func (m *MockCredentialGetter) Reconcile(ctx context.Context) error { } // Reconcile indicates an expected call of Reconcile. -func (mr *MockCredentialGetterMockRecorder) Reconcile(ctx interface{}) *gomock.Call { +func (mr *MockServiceReconcilerMockRecorder) Reconcile(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reconcile", reflect.TypeOf((*MockCredentialGetter)(nil).Reconcile), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reconcile", reflect.TypeOf((*MockServiceReconciler)(nil).Reconcile), ctx) } // MockAuthorizer is a mock of Authorizer interface. diff --git a/azure/services/agentpools/agentpools.go b/azure/services/agentpools/agentpools.go index 3ebd2758ca2..4f5e8020d0c 100644 --- a/azure/services/agentpools/agentpools.go +++ b/azure/services/agentpools/agentpools.go @@ -31,6 +31,8 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) +const serviceName = "agentpools" + // ManagedMachinePoolScope defines the scope interface for a managed machine pool. type ManagedMachinePoolScope interface { azure.ClusterDescriber @@ -57,6 +59,11 @@ func New(scope ManagedMachinePoolScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile idempotently creates or updates a agent pool, if possible. func (s *Service) Reconcile(ctx context.Context) error { ctx, log, done := tele.StartSpanWithLogger( diff --git a/azure/services/availabilitysets/availabilitysets.go b/azure/services/availabilitysets/availabilitysets.go index 280e8958714..7e10e9f121a 100644 --- a/azure/services/availabilitysets/availabilitysets.go +++ b/azure/services/availabilitysets/availabilitysets.go @@ -57,6 +57,11 @@ func New(scope AvailabilitySetScope, skuCache *resourceskus.Cache) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile creates or updates availability sets. func (s *Service) Reconcile(ctx context.Context) error { ctx, log, done := tele.StartSpanWithLogger(ctx, "availabilitysets.Service.Reconcile") diff --git a/azure/services/bastionhosts/bastionhosts.go b/azure/services/bastionhosts/bastionhosts.go index 396a663f67e..6888959221c 100644 --- a/azure/services/bastionhosts/bastionhosts.go +++ b/azure/services/bastionhosts/bastionhosts.go @@ -50,6 +50,11 @@ func New(scope BastionScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile gets/creates/updates a bastion host. func (s *Service) Reconcile(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "bastionhosts.Service.Reconcile") diff --git a/azure/services/disks/disks.go b/azure/services/disks/disks.go index 00e092558b7..fa0a60ab7ff 100644 --- a/azure/services/disks/disks.go +++ b/azure/services/disks/disks.go @@ -50,6 +50,11 @@ func New(scope DiskScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile on disk is currently no-op. OS disks should only be deleted and will create with the VM automatically. func (s *Service) Reconcile(ctx context.Context) error { _, _, done := tele.StartSpanWithLogger(ctx, "disks.Service.Reconcile") diff --git a/azure/services/groups/groups.go b/azure/services/groups/groups.go index e301df5de5b..bdaf2d64d7d 100644 --- a/azure/services/groups/groups.go +++ b/azure/services/groups/groups.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) -const serviceName = "group" +const ServiceName = "group" // Service provides operations on Azure resources. type Service struct { @@ -56,6 +56,11 @@ func New(scope GroupScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return ServiceName +} + // Reconcile gets/creates/updates a resource group. func (s *Service) Reconcile(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "groups.Service.Reconcile") @@ -69,8 +74,8 @@ func (s *Service) Reconcile(ctx context.Context) error { return nil } - _, err := s.CreateResource(ctx, groupSpec, serviceName) - s.Scope.UpdatePutStatus(infrav1.ResourceGroupReadyCondition, serviceName, err) + _, err := s.CreateResource(ctx, groupSpec, ServiceName) + s.Scope.UpdatePutStatus(infrav1.ResourceGroupReadyCondition, ServiceName, err) return err } @@ -92,8 +97,8 @@ func (s *Service) Delete(ctx context.Context) error { if err != nil { if azure.ResourceNotFound(err) { // already deleted or doesn't exist, cleanup status and return. - s.Scope.DeleteLongRunningOperationState(groupSpec.ResourceName(), serviceName) - s.Scope.UpdateDeleteStatus(infrav1.ResourceGroupReadyCondition, serviceName, nil) + s.Scope.DeleteLongRunningOperationState(groupSpec.ResourceName(), ServiceName) + s.Scope.UpdateDeleteStatus(infrav1.ResourceGroupReadyCondition, ServiceName, nil) return nil } return errors.Wrap(err, "could not get resource group management state") @@ -103,8 +108,8 @@ func (s *Service) Delete(ctx context.Context) error { return azure.ErrNotOwned } - err = s.DeleteResource(ctx, groupSpec, serviceName) - s.Scope.UpdateDeleteStatus(infrav1.ResourceGroupReadyCondition, serviceName, err) + err = s.DeleteResource(ctx, groupSpec, ServiceName) + s.Scope.UpdateDeleteStatus(infrav1.ResourceGroupReadyCondition, ServiceName, err) return err } diff --git a/azure/services/groups/groups_test.go b/azure/services/groups/groups_test.go index c3500c3141d..cca1e643bd6 100644 --- a/azure/services/groups/groups_test.go +++ b/azure/services/groups/groups_test.go @@ -74,8 +74,8 @@ func TestReconcileGroups(t *testing.T) { expectedError: "", expect: func(s *mock_groups.MockGroupScopeMockRecorder, m *mock_groups.MockclientMockRecorder, r *mock_async.MockReconcilerMockRecorder) { s.GroupSpec().Return(&fakeGroupSpec) - r.CreateResource(gomockinternal.AContext(), &fakeGroupSpec, serviceName).Return(nil, nil) - s.UpdatePutStatus(infrav1.ResourceGroupReadyCondition, serviceName, nil) + r.CreateResource(gomockinternal.AContext(), &fakeGroupSpec, ServiceName).Return(nil, nil) + s.UpdatePutStatus(infrav1.ResourceGroupReadyCondition, ServiceName, nil) }, }, { @@ -83,8 +83,8 @@ func TestReconcileGroups(t *testing.T) { expectedError: "#: Internal Server Error: StatusCode=500", expect: func(s *mock_groups.MockGroupScopeMockRecorder, m *mock_groups.MockclientMockRecorder, r *mock_async.MockReconcilerMockRecorder) { s.GroupSpec().Return(&fakeGroupSpec) - r.CreateResource(gomockinternal.AContext(), &fakeGroupSpec, serviceName).Return(nil, internalError) - s.UpdatePutStatus(infrav1.ResourceGroupReadyCondition, serviceName, internalError) + r.CreateResource(gomockinternal.AContext(), &fakeGroupSpec, ServiceName).Return(nil, internalError) + s.UpdatePutStatus(infrav1.ResourceGroupReadyCondition, ServiceName, internalError) }, }, } @@ -140,8 +140,8 @@ func TestDeleteGroups(t *testing.T) { s.GroupSpec().AnyTimes().Return(&fakeGroupSpec) m.Get(gomockinternal.AContext(), &fakeGroupSpec).Return(sampleManagedGroup, nil) s.ClusterName().Return("test-cluster") - r.DeleteResource(gomockinternal.AContext(), &fakeGroupSpec, serviceName).Return(nil) - s.UpdateDeleteStatus(infrav1.ResourceGroupReadyCondition, serviceName, nil) + r.DeleteResource(gomockinternal.AContext(), &fakeGroupSpec, ServiceName).Return(nil) + s.UpdateDeleteStatus(infrav1.ResourceGroupReadyCondition, ServiceName, nil) }, }, { @@ -167,8 +167,8 @@ func TestDeleteGroups(t *testing.T) { expect: func(s *mock_groups.MockGroupScopeMockRecorder, m *mock_groups.MockclientMockRecorder, r *mock_async.MockReconcilerMockRecorder) { s.GroupSpec().AnyTimes().Return(&fakeGroupSpec) m.Get(gomockinternal.AContext(), &fakeGroupSpec).Return(resources.Group{}, notFoundError) - s.DeleteLongRunningOperationState("test-group", serviceName) - s.UpdateDeleteStatus(infrav1.ResourceGroupReadyCondition, serviceName, nil) + s.DeleteLongRunningOperationState("test-group", ServiceName) + s.UpdateDeleteStatus(infrav1.ResourceGroupReadyCondition, ServiceName, nil) }, }, { @@ -178,8 +178,8 @@ func TestDeleteGroups(t *testing.T) { s.GroupSpec().AnyTimes().Return(&fakeGroupSpec) m.Get(gomockinternal.AContext(), &fakeGroupSpec).Return(sampleManagedGroup, nil) s.ClusterName().Return("test-cluster") - r.DeleteResource(gomockinternal.AContext(), &fakeGroupSpec, serviceName).Return(internalError) - s.UpdateDeleteStatus(infrav1.ResourceGroupReadyCondition, serviceName, gomockinternal.ErrStrEq("#: Internal Server Error: StatusCode=500")) + r.DeleteResource(gomockinternal.AContext(), &fakeGroupSpec, ServiceName).Return(internalError) + s.UpdateDeleteStatus(infrav1.ResourceGroupReadyCondition, ServiceName, gomockinternal.ErrStrEq("#: Internal Server Error: StatusCode=500")) }, }, } diff --git a/azure/services/inboundnatrules/inboundnatrules.go b/azure/services/inboundnatrules/inboundnatrules.go index c95a680f2ee..212ceee647c 100644 --- a/azure/services/inboundnatrules/inboundnatrules.go +++ b/azure/services/inboundnatrules/inboundnatrules.go @@ -54,6 +54,11 @@ func New(scope InboundNatScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile gets/creates/updates an inbound NAT rule. func (s *Service) Reconcile(ctx context.Context) error { ctx, log, done := tele.StartSpanWithLogger(ctx, "inboundnatrules.Service.Reconcile") diff --git a/azure/services/loadbalancers/loadbalancers.go b/azure/services/loadbalancers/loadbalancers.go index 3e5d6165fa5..2f69c157407 100644 --- a/azure/services/loadbalancers/loadbalancers.go +++ b/azure/services/loadbalancers/loadbalancers.go @@ -55,6 +55,11 @@ func New(scope LBScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile gets/creates/updates a load balancer. func (s *Service) Reconcile(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "loadbalancers.Service.Reconcile") diff --git a/azure/services/managedclusters/managedclusters.go b/azure/services/managedclusters/managedclusters.go index 464b9cd9473..78362a7e0bf 100644 --- a/azure/services/managedclusters/managedclusters.go +++ b/azure/services/managedclusters/managedclusters.go @@ -35,6 +35,8 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) +const serviceName = "managedclusters" + var ( defaultUser = "azureuser" managedIdentity = "msi" @@ -145,6 +147,11 @@ func New(scope ManagedClusterScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile idempotently creates or updates a managed cluster, if possible. func (s *Service) Reconcile(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "managedclusters.Service.Reconcile") diff --git a/azure/services/natgateways/natgateways.go b/azure/services/natgateways/natgateways.go index 1b64d94875c..1fd9ae7ed3d 100644 --- a/azure/services/natgateways/natgateways.go +++ b/azure/services/natgateways/natgateways.go @@ -53,6 +53,11 @@ func New(scope NatGatewayScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile gets/creates/updates a NAT gateway. // Only when the NAT gateway 'Name' property is defined we create the NAT gateway: it's opt-in. func (s *Service) Reconcile(ctx context.Context) error { diff --git a/azure/services/networkinterfaces/networkinterfaces.go b/azure/services/networkinterfaces/networkinterfaces.go index bce8a6322f8..96d91f5dd87 100644 --- a/azure/services/networkinterfaces/networkinterfaces.go +++ b/azure/services/networkinterfaces/networkinterfaces.go @@ -53,6 +53,11 @@ func New(scope NICScope, skuCache *resourceskus.Cache) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile gets/creates/updates a network interface. func (s *Service) Reconcile(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "networkinterfaces.Service.Reconcile") diff --git a/azure/services/privatedns/privatedns.go b/azure/services/privatedns/privatedns.go index c87f9dba908..0ad47829328 100644 --- a/azure/services/privatedns/privatedns.go +++ b/azure/services/privatedns/privatedns.go @@ -28,6 +28,8 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) +const serviceName = "privatedns" + // Scope defines the scope interface for a private dns service. type Scope interface { azure.ClusterDescriber @@ -48,6 +50,11 @@ func New(scope Scope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile creates or updates the private zone, links it to the vnet, and creates DNS records. func (s *Service) Reconcile(ctx context.Context) error { ctx, log, done := tele.StartSpanWithLogger(ctx, "privatedns.Service.Reconcile") diff --git a/azure/services/publicips/publicips.go b/azure/services/publicips/publicips.go index a9be7749476..bd1a32dece6 100644 --- a/azure/services/publicips/publicips.go +++ b/azure/services/publicips/publicips.go @@ -29,6 +29,8 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) +const serviceName = "publicips" + // PublicIPScope defines the scope interface for a public IP service. type PublicIPScope interface { azure.ClusterDescriber @@ -49,6 +51,11 @@ func New(scope PublicIPScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile gets/creates/updates a public ip. func (s *Service) Reconcile(ctx context.Context) error { ctx, log, done := tele.StartSpanWithLogger(ctx, "publicips.Service.Reconcile") diff --git a/azure/services/roleassignments/roleassignments.go b/azure/services/roleassignments/roleassignments.go index 239f7e5402c..dd360a3c760 100644 --- a/azure/services/roleassignments/roleassignments.go +++ b/azure/services/roleassignments/roleassignments.go @@ -31,7 +31,10 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) -const azureBuiltInContributorID = "b24988ac-6180-42a0-ab88-20f7382dd24c" +const ( + azureBuiltInContributorID = "b24988ac-6180-42a0-ab88-20f7382dd24c" + serviceName = "roleassignments" +) // RoleAssignmentScope defines the scope interface for a role assignment service. type RoleAssignmentScope interface { @@ -57,6 +60,11 @@ func New(scope RoleAssignmentScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile creates a role assignment. func (s *Service) Reconcile(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "roleassignments.Service.Reconcile") diff --git a/azure/services/routetables/routetables.go b/azure/services/routetables/routetables.go index 456aa77c14d..02b763371fd 100644 --- a/azure/services/routetables/routetables.go +++ b/azure/services/routetables/routetables.go @@ -51,6 +51,11 @@ func New(scope RouteTableScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile gets/creates/updates route tables. func (s *Service) Reconcile(ctx context.Context) error { ctx, log, done := tele.StartSpanWithLogger(ctx, "routetables.Service.Reconcile") diff --git a/azure/services/scalesets/scalesets.go b/azure/services/scalesets/scalesets.go index 0f6dc793479..ea036295191 100644 --- a/azure/services/scalesets/scalesets.go +++ b/azure/services/scalesets/scalesets.go @@ -35,6 +35,8 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) +const serviceName = "scalesets" + type ( // ScaleSetScope defines the scope interface for a scale sets service. ScaleSetScope interface { @@ -59,8 +61,8 @@ type ( } ) -// NewService creates a new service. -func NewService(scope ScaleSetScope, skuCache *resourceskus.Cache) *Service { +// New creates a new service. +func New(scope ScaleSetScope, skuCache *resourceskus.Cache) *Service { return &Service{ Client: NewClient(scope), Scope: scope, @@ -68,6 +70,11 @@ func NewService(scope ScaleSetScope, skuCache *resourceskus.Cache) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile idempotently gets, creates, and updates a scale set. func (s *Service) Reconcile(ctx context.Context) (retErr error) { ctx, log, done := tele.StartSpanWithLogger(ctx, "scalesets.Service.Reconcile") diff --git a/azure/services/scalesets/scalesets_test.go b/azure/services/scalesets/scalesets_test.go index d49591f2ed5..2bb1e5de371 100644 --- a/azure/services/scalesets/scalesets_test.go +++ b/azure/services/scalesets/scalesets_test.go @@ -92,7 +92,7 @@ func TestNewService(t *testing.T) { ClusterScope: s, }) g.Expect(err).NotTo(HaveOccurred()) - actual := NewService(mps, resourceskus.NewStaticCache(nil, "")) + actual := New(mps, resourceskus.NewStaticCache(nil, "")) g.Expect(actual).ToNot(BeNil()) } diff --git a/azure/services/scalesetvms/scalesetvms.go b/azure/services/scalesetvms/scalesetvms.go index 85fc7e3a9ad..4d8b8852557 100644 --- a/azure/services/scalesetvms/scalesetvms.go +++ b/azure/services/scalesetvms/scalesetvms.go @@ -54,6 +54,11 @@ func NewService(scope ScaleSetVMScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile idempotently gets, creates, and updates a scale set. func (s *Service) Reconcile(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "scalesetvms.Service.Reconcile") diff --git a/azure/services/securitygroups/securitygroups.go b/azure/services/securitygroups/securitygroups.go index 5a53d476817..f797b01393a 100644 --- a/azure/services/securitygroups/securitygroups.go +++ b/azure/services/securitygroups/securitygroups.go @@ -51,6 +51,11 @@ func New(scope NSGScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile gets/creates/updates network security groups. func (s *Service) Reconcile(ctx context.Context) error { ctx, log, done := tele.StartSpanWithLogger(ctx, "securitygroups.Service.Reconcile") diff --git a/azure/services/subnets/subnets.go b/azure/services/subnets/subnets.go index aa3a713e840..9af9429debc 100644 --- a/azure/services/subnets/subnets.go +++ b/azure/services/subnets/subnets.go @@ -55,6 +55,11 @@ func New(scope SubnetScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile gets/creates/updates a subnet. func (s *Service) Reconcile(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "subnets.Service.Reconcile") diff --git a/azure/services/tags/tags.go b/azure/services/tags/tags.go index e10998d508d..6361d9c83f8 100644 --- a/azure/services/tags/tags.go +++ b/azure/services/tags/tags.go @@ -27,6 +27,8 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) +const serviceName = "tags" + // TagScope defines the scope interface for a tags service. type TagScope interface { azure.Authorizer @@ -50,6 +52,11 @@ func New(scope TagScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile ensures tags are correct. func (s *Service) Reconcile(ctx context.Context) error { ctx, log, done := tele.StartSpanWithLogger(ctx, "tags.Service.Reconcile") diff --git a/azure/services/virtualmachines/virtualmachines.go b/azure/services/virtualmachines/virtualmachines.go index 57daeaa0678..a2ffa5fe9c3 100644 --- a/azure/services/virtualmachines/virtualmachines.go +++ b/azure/services/virtualmachines/virtualmachines.go @@ -67,6 +67,11 @@ func New(scope VMScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile gets/creates/updates a virtual machine. func (s *Service) Reconcile(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "virtualmachines.Service.Reconcile") diff --git a/azure/services/virtualnetworks/virtualnetworks.go b/azure/services/virtualnetworks/virtualnetworks.go index 8206e52cfc0..4cc9bd89d6d 100644 --- a/azure/services/virtualnetworks/virtualnetworks.go +++ b/azure/services/virtualnetworks/virtualnetworks.go @@ -59,6 +59,11 @@ func New(scope VNetScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + func (s *Service) Reconcile(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "virtualnetworks.Service.Reconcile") defer done() diff --git a/azure/services/vmextensions/vmextensions.go b/azure/services/vmextensions/vmextensions.go index 63b15e01642..b77ade4ad4d 100644 --- a/azure/services/vmextensions/vmextensions.go +++ b/azure/services/vmextensions/vmextensions.go @@ -26,6 +26,8 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) +const serviceName = "vmextensions" + // VMExtensionScope defines the scope interface for a vm extension service. type VMExtensionScope interface { azure.ClusterDescriber @@ -47,6 +49,11 @@ func New(scope VMExtensionScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile creates or updates the VM extension. func (s *Service) Reconcile(ctx context.Context) error { ctx, log, done := tele.StartSpanWithLogger(ctx, "vmextensions.Service.Reconcile") diff --git a/azure/services/vmssextensions/vmssextensions.go b/azure/services/vmssextensions/vmssextensions.go index 0c0c87c0454..cc34dc240f1 100644 --- a/azure/services/vmssextensions/vmssextensions.go +++ b/azure/services/vmssextensions/vmssextensions.go @@ -25,6 +25,8 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) +const serviceName = "vmssextensions" + // VMSSExtensionScope defines the scope interface for a vmss extension service. type VMSSExtensionScope interface { azure.ClusterDescriber @@ -46,6 +48,11 @@ func New(scope VMSSExtensionScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile creates or updates the VMSS extension. func (s *Service) Reconcile(ctx context.Context) error { _, _, done := tele.StartSpanWithLogger(ctx, "vmssextensions.Service.Reconcile") diff --git a/azure/services/vnetpeerings/vnetpeerings.go b/azure/services/vnetpeerings/vnetpeerings.go index 0aa995eda87..6321a203b9b 100644 --- a/azure/services/vnetpeerings/vnetpeerings.go +++ b/azure/services/vnetpeerings/vnetpeerings.go @@ -50,6 +50,11 @@ func New(scope VnetPeeringScope) *Service { } } +// Name returns the service name. +func (s *Service) Name() string { + return serviceName +} + // Reconcile gets/creates/updates a peering. func (s *Service) Reconcile(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "vnetpeerings.Service.Reconcile") diff --git a/controllers/azurecluster_reconciler.go b/controllers/azurecluster_reconciler.go index 26e6d5b5063..2f6c229ccd9 100644 --- a/controllers/azurecluster_reconciler.go +++ b/controllers/azurecluster_reconciler.go @@ -41,20 +41,11 @@ import ( // azureClusterService is the reconciler called by the AzureCluster controller. type azureClusterService struct { - scope *scope.ClusterScope - groupsSvc azure.Reconciler - vnetSvc azure.Reconciler - securityGroupSvc azure.Reconciler - routeTableSvc azure.Reconciler - subnetsSvc azure.Reconciler - publicIPSvc azure.Reconciler - loadBalancerSvc azure.Reconciler - privateDNSSvc azure.Reconciler - bastionSvc azure.Reconciler - skuCache *resourceskus.Cache - natGatewaySvc azure.Reconciler - peeringsSvc azure.Reconciler - tagsSvc azure.Reconciler + scope *scope.ClusterScope + // services is the list of services that are reconciled by this controller. + // The order of the services is important as it determines the order in which the services are reconciled. + services []azure.ServiceReconciler + skuCache *resourceskus.Cache } // newAzureClusterService populates all the services based on input scope. @@ -63,27 +54,26 @@ func newAzureClusterService(scope *scope.ClusterScope) (*azureClusterService, er if err != nil { return nil, errors.Wrap(err, "failed creating a NewCache") } - return &azureClusterService{ - scope: scope, - groupsSvc: groups.New(scope), - vnetSvc: virtualnetworks.New(scope), - securityGroupSvc: securitygroups.New(scope), - routeTableSvc: routetables.New(scope), - natGatewaySvc: natgateways.New(scope), - subnetsSvc: subnets.New(scope), - publicIPSvc: publicips.New(scope), - loadBalancerSvc: loadbalancers.New(scope), - privateDNSSvc: privatedns.New(scope), - bastionSvc: bastionhosts.New(scope), - skuCache: skuCache, - peeringsSvc: vnetpeerings.New(scope), - tagsSvc: tags.New(scope), + scope: scope, + services: []azure.ServiceReconciler{ + groups.New(scope), + virtualnetworks.New(scope), + securitygroups.New(scope), + routetables.New(scope), + publicips.New(scope), + natgateways.New(scope), + subnets.New(scope), + vnetpeerings.New(scope), + loadbalancers.New(scope), + privatedns.New(scope), + bastionhosts.New(scope), + tags.New(scope), + }, + skuCache: skuCache, }, nil } -var _ azure.Reconciler = (*azureClusterService)(nil) - // Reconcile reconciles all the services in a predetermined order. func (s *azureClusterService) Reconcile(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.azureClusterService.Reconcile") @@ -96,52 +86,10 @@ func (s *azureClusterService) Reconcile(ctx context.Context) error { s.scope.SetDNSName() s.scope.SetControlPlaneSecurityRules() - if err := s.groupsSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "failed to reconcile resource group") - } - - if err := s.vnetSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "failed to reconcile virtual network") - } - - if err := s.securityGroupSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "failed to reconcile network security group") - } - - if err := s.routeTableSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "failed to reconcile route table") - } - - if err := s.publicIPSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "failed to reconcile public IP") - } - - if err := s.natGatewaySvc.Reconcile(ctx); err != nil { - return errors.Wrapf(err, "failed to reconcile NAT gateway") - } - - if err := s.subnetsSvc.Reconcile(ctx); err != nil { - return errors.Wrapf(err, "failed to reconcile subnet") - } - - if err := s.peeringsSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "failed to reconcile peerings") - } - - if err := s.loadBalancerSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "failed to reconcile load balancer") - } - - if err := s.privateDNSSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "failed to reconcile private dns") - } - - if err := s.bastionSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "failed to reconcile bastion") - } - - if err := s.tagsSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "unable to update tags") + for _, service := range s.services { + if err := service.Reconcile(ctx); err != nil { + return errors.Wrapf(err, "failed to reconcile AzureCluster service %s", service.Name()) + } } return nil @@ -152,46 +100,18 @@ func (s *azureClusterService) Delete(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.azureClusterService.Delete") defer done() - if err := s.groupsSvc.Delete(ctx); err != nil { + groupSvc, err := s.getService(groups.ServiceName) + if err != nil { + return err + } + if err := groupSvc.Delete(ctx); err != nil { if errors.Is(err, azure.ErrNotOwned) { - if err := s.bastionSvc.Delete(ctx); err != nil { - return errors.Wrap(err, "failed to delete bastion") - } - - if err := s.privateDNSSvc.Delete(ctx); err != nil { - return errors.Wrap(err, "failed to delete private dns") - } - - if err := s.loadBalancerSvc.Delete(ctx); err != nil { - return errors.Wrap(err, "failed to delete load balancer") - } - - if err := s.peeringsSvc.Delete(ctx); err != nil { - return errors.Wrap(err, "failed to delete peerings") - } - - if err := s.subnetsSvc.Delete(ctx); err != nil { - return errors.Wrap(err, "failed to delete subnet") - } - - if err := s.natGatewaySvc.Delete(ctx); err != nil { - return errors.Wrapf(err, "failed to delete NAT gateway") - } - - if err := s.publicIPSvc.Delete(ctx); err != nil { - return errors.Wrapf(err, "failed to delete public IP") - } - - if err := s.routeTableSvc.Delete(ctx); err != nil { - return errors.Wrap(err, "failed to delete route table") - } - - if err := s.securityGroupSvc.Delete(ctx); err != nil { - return errors.Wrap(err, "failed to delete network security group") - } - - if err := s.vnetSvc.Delete(ctx); err != nil { - return errors.Wrap(err, "failed to delete virtual network") + // If the resource group is not managed we need to delete resources inside the group one by one. + // services are deleted in reverse order from the order in which they are reconciled. + for i := len(s.services) - 1; i >= 1; i-- { + if err := s.services[i].Delete(ctx); err != nil { + return errors.Wrapf(err, "failed to delete AzureCluster service %s", s.services[i].Name()) + } } } else { return errors.Wrap(err, "failed to delete resource group") @@ -201,6 +121,15 @@ func (s *azureClusterService) Delete(ctx context.Context) error { return nil } +func (s *azureClusterService) getService(name string) (azure.Reconciler, error) { + for _, service := range s.services { + if service.Name() == name { + return service, nil + } + } + return nil, errors.Errorf("service %s not found", name) +} + // setFailureDomainsForLocation sets the AzureCluster Status failure domains based on which Azure Availability Zones are available in the cluster location. // Note that this is not done in a webhook as it requires API calls to fetch the availability zones. func (s *azureClusterService) setFailureDomainsForLocation(ctx context.Context) error { diff --git a/controllers/azurecluster_reconciler_test.go b/controllers/azurecluster_reconciler_test.go index 58e6a42857a..1eb06c3b270 100644 --- a/controllers/azurecluster_reconciler_test.go +++ b/controllers/azurecluster_reconciler_test.go @@ -28,74 +28,116 @@ import ( "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/mock_azure" "sigs.k8s.io/cluster-api-provider-azure/azure/scope" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/groups" "sigs.k8s.io/cluster-api-provider-azure/azure/services/resourceskus" gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) -type expect func(grp *mock_azure.MockReconcilerMockRecorder, vnet *mock_azure.MockReconcilerMockRecorder, sg *mock_azure.MockReconcilerMockRecorder, rt *mock_azure.MockReconcilerMockRecorder, sn *mock_azure.MockReconcilerMockRecorder, natg *mock_azure.MockReconcilerMockRecorder, pip *mock_azure.MockReconcilerMockRecorder, lb *mock_azure.MockReconcilerMockRecorder, dns *mock_azure.MockReconcilerMockRecorder, bastion *mock_azure.MockReconcilerMockRecorder, peer *mock_azure.MockReconcilerMockRecorder) +func TestAzureClusterServiceReconcile(t *testing.T) { + cases := map[string]struct { + expectedError string + expect func(one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) + }{ + "all services are reconciled in order": { + expectedError: "", + expect: func(one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { + gomock.InOrder( + one.Reconcile(gomockinternal.AContext()).Return(nil), + two.Reconcile(gomockinternal.AContext()).Return(nil), + three.Reconcile(gomockinternal.AContext()).Return(nil)) + }, + }, + "service reconcile fails": { + expectedError: "failed to reconcile AzureCluster service two: some error happened", + expect: func(one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { + gomock.InOrder( + one.Reconcile(gomockinternal.AContext()).Return(nil), + two.Reconcile(gomockinternal.AContext()).Return(errors.New("some error happened")), + two.Name().Return("two")) + }, + }, + } + + for name, tc := range cases { + tc := tc + t.Run(name, func(t *testing.T) { + g := NewWithT(t) + + t.Parallel() + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + svcOneMock := mock_azure.NewMockServiceReconciler(mockCtrl) + svcTwoMock := mock_azure.NewMockServiceReconciler(mockCtrl) + svcThreeMock := mock_azure.NewMockServiceReconciler(mockCtrl) + + tc.expect(svcOneMock.EXPECT(), svcTwoMock.EXPECT(), svcThreeMock.EXPECT()) + + s := &azureClusterService{ + scope: &scope.ClusterScope{ + Cluster: &clusterv1.Cluster{}, + AzureCluster: &infrav1.AzureCluster{}, + }, + services: []azure.ServiceReconciler{ + svcOneMock, + svcTwoMock, + svcThreeMock, + }, + skuCache: resourceskus.NewStaticCache([]compute.ResourceSku{}, ""), + } + + err := s.Reconcile(context.TODO()) + if tc.expectedError != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err).To(MatchError(tc.expectedError)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} -func TestAzureClusterReconcilerDelete(t *testing.T) { +func TestAzureClusterServiceDelete(t *testing.T) { cases := map[string]struct { expectedError string - expect expect + expect func(grp *mock_azure.MockServiceReconcilerMockRecorder, one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) }{ "Resource Group is deleted successfully": { expectedError: "", - expect: func(grp *mock_azure.MockReconcilerMockRecorder, vnet *mock_azure.MockReconcilerMockRecorder, sg *mock_azure.MockReconcilerMockRecorder, rt *mock_azure.MockReconcilerMockRecorder, sn *mock_azure.MockReconcilerMockRecorder, natg *mock_azure.MockReconcilerMockRecorder, pip *mock_azure.MockReconcilerMockRecorder, lb *mock_azure.MockReconcilerMockRecorder, dns *mock_azure.MockReconcilerMockRecorder, bastion *mock_azure.MockReconcilerMockRecorder, peer *mock_azure.MockReconcilerMockRecorder) { + expect: func(grp *mock_azure.MockServiceReconcilerMockRecorder, one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { gomock.InOrder( + grp.Name().Return(groups.ServiceName), grp.Delete(gomockinternal.AContext()).Return(nil)) }, }, "Resource Group delete fails": { expectedError: "failed to delete resource group: internal error", - expect: func(grp *mock_azure.MockReconcilerMockRecorder, vnet *mock_azure.MockReconcilerMockRecorder, sg *mock_azure.MockReconcilerMockRecorder, rt *mock_azure.MockReconcilerMockRecorder, sn *mock_azure.MockReconcilerMockRecorder, natg *mock_azure.MockReconcilerMockRecorder, pip *mock_azure.MockReconcilerMockRecorder, lb *mock_azure.MockReconcilerMockRecorder, dns *mock_azure.MockReconcilerMockRecorder, bastion *mock_azure.MockReconcilerMockRecorder, peer *mock_azure.MockReconcilerMockRecorder) { + expect: func(grp *mock_azure.MockServiceReconcilerMockRecorder, one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { gomock.InOrder( + grp.Name().Return(groups.ServiceName), grp.Delete(gomockinternal.AContext()).Return(errors.New("internal error"))) }, }, "Resource Group not owned by cluster": { expectedError: "", - expect: func(grp *mock_azure.MockReconcilerMockRecorder, vnet *mock_azure.MockReconcilerMockRecorder, sg *mock_azure.MockReconcilerMockRecorder, rt *mock_azure.MockReconcilerMockRecorder, sn *mock_azure.MockReconcilerMockRecorder, natg *mock_azure.MockReconcilerMockRecorder, pip *mock_azure.MockReconcilerMockRecorder, lb *mock_azure.MockReconcilerMockRecorder, dns *mock_azure.MockReconcilerMockRecorder, bastion *mock_azure.MockReconcilerMockRecorder, peer *mock_azure.MockReconcilerMockRecorder) { + expect: func(grp *mock_azure.MockServiceReconcilerMockRecorder, one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { gomock.InOrder( + grp.Name().Return(groups.ServiceName), grp.Delete(gomockinternal.AContext()).Return(azure.ErrNotOwned), - bastion.Delete(gomockinternal.AContext()), - dns.Delete(gomockinternal.AContext()), - lb.Delete(gomockinternal.AContext()), - peer.Delete(gomockinternal.AContext()), - sn.Delete(gomockinternal.AContext()), - natg.Delete(gomockinternal.AContext()), - pip.Delete(gomockinternal.AContext()), - rt.Delete(gomockinternal.AContext()), - sg.Delete(gomockinternal.AContext()), - vnet.Delete(gomockinternal.AContext()), - ) + three.Delete(gomockinternal.AContext()).Return(nil), + two.Delete(gomockinternal.AContext()).Return(nil), + one.Delete(gomockinternal.AContext()).Return(nil)) }, }, - "Load Balancer delete fails": { - expectedError: "failed to delete load balancer: some error happened", - expect: func(grp *mock_azure.MockReconcilerMockRecorder, vnet *mock_azure.MockReconcilerMockRecorder, sg *mock_azure.MockReconcilerMockRecorder, rt *mock_azure.MockReconcilerMockRecorder, sn *mock_azure.MockReconcilerMockRecorder, pip *mock_azure.MockReconcilerMockRecorder, natg *mock_azure.MockReconcilerMockRecorder, lb *mock_azure.MockReconcilerMockRecorder, dns *mock_azure.MockReconcilerMockRecorder, bastion *mock_azure.MockReconcilerMockRecorder, peer *mock_azure.MockReconcilerMockRecorder) { + "service delete fails": { + expectedError: "failed to delete AzureCluster service two: some error happened", + expect: func(grp *mock_azure.MockServiceReconcilerMockRecorder, one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { gomock.InOrder( + grp.Name().Return(groups.ServiceName), grp.Delete(gomockinternal.AContext()).Return(azure.ErrNotOwned), - bastion.Delete(gomockinternal.AContext()), - dns.Delete(gomockinternal.AContext()), - lb.Delete(gomockinternal.AContext()).Return(errors.New("some error happened")), - ) - }, - }, - "Route table delete fails": { - expectedError: "failed to delete route table: some error happened", - expect: func(grp *mock_azure.MockReconcilerMockRecorder, vnet *mock_azure.MockReconcilerMockRecorder, sg *mock_azure.MockReconcilerMockRecorder, rt *mock_azure.MockReconcilerMockRecorder, sn *mock_azure.MockReconcilerMockRecorder, pip *mock_azure.MockReconcilerMockRecorder, natg *mock_azure.MockReconcilerMockRecorder, lb *mock_azure.MockReconcilerMockRecorder, dns *mock_azure.MockReconcilerMockRecorder, bastion *mock_azure.MockReconcilerMockRecorder, peer *mock_azure.MockReconcilerMockRecorder) { - gomock.InOrder( - grp.Delete(gomockinternal.AContext()).Return(azure.ErrNotOwned), - bastion.Delete(gomockinternal.AContext()), - dns.Delete(gomockinternal.AContext()), - lb.Delete(gomockinternal.AContext()), - peer.Delete(gomockinternal.AContext()), - sn.Delete(gomockinternal.AContext()), - pip.Delete(gomockinternal.AContext()), - natg.Delete(gomockinternal.AContext()), - rt.Delete(gomockinternal.AContext()).Return(errors.New("some error happened")), - ) + three.Delete(gomockinternal.AContext()).Return(nil), + two.Delete(gomockinternal.AContext()).Return(errors.New("some error happened")), + two.Name().Return("two")) }, }, } @@ -108,36 +150,24 @@ func TestAzureClusterReconcilerDelete(t *testing.T) { t.Parallel() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() - groupsMock := mock_azure.NewMockReconciler(mockCtrl) - vnetMock := mock_azure.NewMockReconciler(mockCtrl) - sgMock := mock_azure.NewMockReconciler(mockCtrl) - rtMock := mock_azure.NewMockReconciler(mockCtrl) - subnetsMock := mock_azure.NewMockReconciler(mockCtrl) - natGatewaysMock := mock_azure.NewMockReconciler(mockCtrl) - publicIPMock := mock_azure.NewMockReconciler(mockCtrl) - lbMock := mock_azure.NewMockReconciler(mockCtrl) - dnsMock := mock_azure.NewMockReconciler(mockCtrl) - bastionMock := mock_azure.NewMockReconciler(mockCtrl) - peeringsMock := mock_azure.NewMockReconciler(mockCtrl) - - tc.expect(groupsMock.EXPECT(), vnetMock.EXPECT(), sgMock.EXPECT(), rtMock.EXPECT(), subnetsMock.EXPECT(), natGatewaysMock.EXPECT(), publicIPMock.EXPECT(), lbMock.EXPECT(), dnsMock.EXPECT(), bastionMock.EXPECT(), peeringsMock.EXPECT()) + groupsMock := mock_azure.NewMockServiceReconciler(mockCtrl) + svcOneMock := mock_azure.NewMockServiceReconciler(mockCtrl) + svcTwoMock := mock_azure.NewMockServiceReconciler(mockCtrl) + svcThreeMock := mock_azure.NewMockServiceReconciler(mockCtrl) + + tc.expect(groupsMock.EXPECT(), svcOneMock.EXPECT(), svcTwoMock.EXPECT(), svcThreeMock.EXPECT()) s := &azureClusterService{ scope: &scope.ClusterScope{ AzureCluster: &infrav1.AzureCluster{}, }, - groupsSvc: groupsMock, - vnetSvc: vnetMock, - securityGroupSvc: sgMock, - routeTableSvc: rtMock, - natGatewaySvc: natGatewaysMock, - subnetsSvc: subnetsMock, - publicIPSvc: publicIPMock, - loadBalancerSvc: lbMock, - privateDNSSvc: dnsMock, - bastionSvc: bastionMock, - peeringsSvc: peeringsMock, - skuCache: resourceskus.NewStaticCache([]compute.ResourceSku{}, ""), + services: []azure.ServiceReconciler{ + groupsMock, + svcOneMock, + svcTwoMock, + svcThreeMock, + }, + skuCache: resourceskus.NewStaticCache([]compute.ResourceSku{}, ""), } err := s.Delete(context.TODO()) diff --git a/controllers/azuremachine_reconciler.go b/controllers/azuremachine_reconciler.go index 456f818036a..e80accfad0d 100644 --- a/controllers/azuremachine_reconciler.go +++ b/controllers/azuremachine_reconciler.go @@ -37,21 +37,13 @@ import ( // azureMachineService is the group of services called by the AzureMachine controller. type azureMachineService struct { - scope *scope.MachineScope - networkInterfacesSvc azure.Reconciler - inboundNatRulesSvc azure.Reconciler - virtualMachinesSvc azure.Reconciler - roleAssignmentsSvc azure.Reconciler - disksSvc azure.Reconciler - publicIPsSvc azure.Reconciler - tagsSvc azure.Reconciler - vmExtensionsSvc azure.Reconciler - availabilitySetsSvc azure.Reconciler - skuCache *resourceskus.Cache + scope *scope.MachineScope + // services is the list of services to be reconciled. + // The order of the services is important as it determines the order in which the services are reconciled. + services []azure.ServiceReconciler + skuCache *resourceskus.Cache } -var _ azure.Reconciler = (*azureMachineService)(nil) - // newAzureMachineService populates all the services based on input scope. func newAzureMachineService(machineScope *scope.MachineScope) (*azureMachineService, error) { cache, err := resourceskus.GetCache(machineScope, machineScope.Location()) @@ -60,17 +52,19 @@ func newAzureMachineService(machineScope *scope.MachineScope) (*azureMachineServ } return &azureMachineService{ - scope: machineScope, - inboundNatRulesSvc: inboundnatrules.New(machineScope), - networkInterfacesSvc: networkinterfaces.New(machineScope, cache), - virtualMachinesSvc: virtualmachines.New(machineScope), - roleAssignmentsSvc: roleassignments.New(machineScope), - disksSvc: disks.New(machineScope), - publicIPsSvc: publicips.New(machineScope), - tagsSvc: tags.New(machineScope), - vmExtensionsSvc: vmextensions.New(machineScope), - availabilitySetsSvc: availabilitysets.New(machineScope, cache), - skuCache: cache, + scope: machineScope, + services: []azure.ServiceReconciler{ + publicips.New(machineScope), + inboundnatrules.New(machineScope), + networkinterfaces.New(machineScope, cache), + availabilitysets.New(machineScope, cache), + disks.New(machineScope), + virtualmachines.New(machineScope), + roleassignments.New(machineScope), + vmextensions.New(machineScope), + tags.New(machineScope), + }, + skuCache: cache, }, nil } @@ -83,36 +77,10 @@ func (s *azureMachineService) Reconcile(ctx context.Context) error { return errors.Wrap(err, "failed defaulting subnet name") } - if err := s.publicIPsSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "failed to create public IP") - } - - if err := s.inboundNatRulesSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "failed to create inbound NAT rule") - } - - if err := s.networkInterfacesSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "failed to create network interface") - } - - if err := s.availabilitySetsSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "failed to create availability set") - } - - if err := s.virtualMachinesSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "failed to create virtual machine") - } - - if err := s.roleAssignmentsSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "unable to create role assignment") - } - - if err := s.vmExtensionsSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "unable to create vm extension") - } - - if err := s.tagsSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "unable to update tags") + for _, service := range s.services { + if err := service.Reconcile(ctx); err != nil { + return errors.Wrapf(err, "failed to reconcile AzureMachine service %s", service.Name()) + } } return nil @@ -123,28 +91,11 @@ func (s *azureMachineService) Delete(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.azureMachineService.Delete") defer done() - if err := s.virtualMachinesSvc.Delete(ctx); err != nil { - return errors.Wrap(err, "failed to delete machine") - } - - if err := s.networkInterfacesSvc.Delete(ctx); err != nil { - return errors.Wrap(err, "failed to delete network interface") - } - - if err := s.inboundNatRulesSvc.Delete(ctx); err != nil { - return errors.Wrap(err, "failed to delete inbound NAT rule") - } - - if err := s.publicIPsSvc.Delete(ctx); err != nil { - return errors.Wrap(err, "failed to delete public IPs") - } - - if err := s.disksSvc.Delete(ctx); err != nil { - return errors.Wrap(err, "failed to delete OS disk") - } - - if err := s.availabilitySetsSvc.Delete(ctx); err != nil { - return errors.Wrap(err, "failed to delete availability set") + // Delete services in reverse order of creation. + for i := len(s.services) - 1; i >= 0; i-- { + if err := s.services[i].Delete(ctx); err != nil { + return errors.Wrapf(err, "failed to delete AzureMachine service %s", s.services[i].Name()) + } } return nil diff --git a/controllers/azuremachine_reconciler_test.go b/controllers/azuremachine_reconciler_test.go new file mode 100644 index 00000000000..54371bee889 --- /dev/null +++ b/controllers/azuremachine_reconciler_test.go @@ -0,0 +1,172 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-04-01/compute" + "github.com/golang/mock/gomock" + . "github.com/onsi/gomega" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/mock_azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/scope" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/resourceskus" + gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +func TestAzureMachineServiceReconcile(t *testing.T) { + cases := map[string]struct { + expectedError string + expect func(one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) + }{ + "all services are reconciled in order": { + expectedError: "", + expect: func(one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { + gomock.InOrder( + one.Reconcile(gomockinternal.AContext()).Return(nil), + two.Reconcile(gomockinternal.AContext()).Return(nil), + three.Reconcile(gomockinternal.AContext()).Return(nil)) + }, + }, + "service reconcile fails": { + expectedError: "failed to reconcile AzureMachine service foo: some error happened", + expect: func(one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { + gomock.InOrder( + one.Reconcile(gomockinternal.AContext()).Return(nil), + two.Reconcile(gomockinternal.AContext()).Return(errors.New("some error happened")), + two.Name().Return("foo")) + }, + }, + } + + for name, tc := range cases { + tc := tc + t.Run(name, func(t *testing.T) { + g := NewWithT(t) + + t.Parallel() + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + svcOneMock := mock_azure.NewMockServiceReconciler(mockCtrl) + svcTwoMock := mock_azure.NewMockServiceReconciler(mockCtrl) + svcThreeMock := mock_azure.NewMockServiceReconciler(mockCtrl) + + tc.expect(svcOneMock.EXPECT(), svcTwoMock.EXPECT(), svcThreeMock.EXPECT()) + + s := &azureMachineService{ + scope: &scope.MachineScope{ + ClusterScoper: &scope.ClusterScope{ + AzureCluster: &infrav1.AzureCluster{}, + Cluster: &clusterv1.Cluster{}, + }, + Machine: &clusterv1.Machine{}, + AzureMachine: &infrav1.AzureMachine{ + Spec: infrav1.AzureMachineSpec{ + SubnetName: "test-subnet", + }, + }, + }, + services: []azure.ServiceReconciler{ + svcOneMock, + svcTwoMock, + svcThreeMock, + }, + skuCache: resourceskus.NewStaticCache([]compute.ResourceSku{}, ""), + } + + err := s.Reconcile(context.TODO()) + if tc.expectedError != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err).To(MatchError(tc.expectedError)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} + +func TestAzureMachineServiceDelete(t *testing.T) { + cases := map[string]struct { + expectedError string + expect func(one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) + }{ + "all services deleted in order": { + expectedError: "", + expect: func(one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { + gomock.InOrder( + three.Delete(gomockinternal.AContext()).Return(nil), + two.Delete(gomockinternal.AContext()).Return(nil), + one.Delete(gomockinternal.AContext()).Return(nil)) + }, + }, + "service delete fails": { + expectedError: "failed to delete AzureMachine service test-service-two: some error happened", + expect: func(one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { + gomock.InOrder( + three.Delete(gomockinternal.AContext()).Return(nil), + two.Delete(gomockinternal.AContext()).Return(errors.New("some error happened")), + two.Name().Return("test-service-two")) + }, + }, + } + + for name, tc := range cases { + tc := tc + t.Run(name, func(t *testing.T) { + g := NewWithT(t) + + t.Parallel() + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + svcOneMock := mock_azure.NewMockServiceReconciler(mockCtrl) + svcTwoMock := mock_azure.NewMockServiceReconciler(mockCtrl) + svcThreeMock := mock_azure.NewMockServiceReconciler(mockCtrl) + + tc.expect(svcOneMock.EXPECT(), svcTwoMock.EXPECT(), svcThreeMock.EXPECT()) + + s := &azureMachineService{ + scope: &scope.MachineScope{ + ClusterScoper: &scope.ClusterScope{ + AzureCluster: &infrav1.AzureCluster{}, + Cluster: &clusterv1.Cluster{}, + }, + Machine: &clusterv1.Machine{}, + AzureMachine: &infrav1.AzureMachine{}, + }, + services: []azure.ServiceReconciler{ + svcOneMock, + svcTwoMock, + svcThreeMock, + }, + skuCache: resourceskus.NewStaticCache([]compute.ResourceSku{}, ""), + } + + err := s.Delete(context.TODO()) + if tc.expectedError != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err).To(MatchError(tc.expectedError)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} diff --git a/exp/controllers/azuremachinepool_controller_unit_test.go b/exp/controllers/azuremachinepool_controller_unit_test.go index 725e5f6cf55..0624a124a8f 100644 --- a/exp/controllers/azuremachinepool_controller_unit_test.go +++ b/exp/controllers/azuremachinepool_controller_unit_test.go @@ -67,7 +67,7 @@ func Test_newAzureMachinePoolService(t *testing.T) { g := NewWithT(t) g.Expect(err).NotTo(HaveOccurred()) g.Expect(subject).NotTo(BeNil()) - g.Expect(subject.virtualMachinesScaleSetSvc).NotTo(BeNil()) + g.Expect(subject.services).NotTo(HaveLen(0)) g.Expect(subject.skuCache).NotTo(BeNil()) } diff --git a/exp/controllers/azuremachinepool_reconciler.go b/exp/controllers/azuremachinepool_reconciler.go index 6793c45a394..5221da6bcbb 100644 --- a/exp/controllers/azuremachinepool_reconciler.go +++ b/exp/controllers/azuremachinepool_reconciler.go @@ -31,15 +31,11 @@ import ( // azureMachinePoolService is the group of services called by the AzureMachinePool controller. type azureMachinePoolService struct { - scope *scope.MachinePoolScope - virtualMachinesScaleSetSvc azure.Reconciler - skuCache *resourceskus.Cache - roleAssignmentsSvc azure.Reconciler - vmssExtensionSvc azure.Reconciler + scope *scope.MachinePoolScope + skuCache *resourceskus.Cache + services []azure.ServiceReconciler } -var _ azure.Reconciler = (*azureMachinePoolService)(nil) - // newAzureMachinePoolService populates all the services based on input scope. func newAzureMachinePoolService(machinePoolScope *scope.MachinePoolScope) (*azureMachinePoolService, error) { cache, err := resourceskus.GetCache(machinePoolScope, machinePoolScope.Location()) @@ -48,11 +44,13 @@ func newAzureMachinePoolService(machinePoolScope *scope.MachinePoolScope) (*azur } return &azureMachinePoolService{ - scope: machinePoolScope, - virtualMachinesScaleSetSvc: scalesets.NewService(machinePoolScope, cache), - skuCache: cache, - roleAssignmentsSvc: roleassignments.New(machinePoolScope), - vmssExtensionSvc: vmssextensions.New(machinePoolScope), + scope: machinePoolScope, + services: []azure.ServiceReconciler{ + scalesets.New(machinePoolScope, cache), + roleassignments.New(machinePoolScope), + vmssextensions.New(machinePoolScope), + }, + skuCache: cache, }, nil } @@ -65,16 +63,10 @@ func (s *azureMachinePoolService) Reconcile(ctx context.Context) error { return errors.Wrap(err, "failed defaulting subnet name") } - if err := s.virtualMachinesScaleSetSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "failed to create scale set") - } - - if err := s.roleAssignmentsSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "unable to create role assignment") - } - - if err := s.vmssExtensionSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "unable to create vmss extension") + for _, service := range s.services { + if err := service.Reconcile(ctx); err != nil { + return errors.Wrapf(err, "failed to reconcile AzureMachinePool service %s", service.Name()) + } } return nil @@ -85,8 +77,12 @@ func (s *azureMachinePoolService) Delete(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.azureMachinePoolService.Delete") defer done() - if err := s.virtualMachinesScaleSetSvc.Delete(ctx); err != nil { - return errors.Wrap(err, "failed to delete scale set") + // Delete services in reverse order of creation. + for i := len(s.services) - 1; i >= 0; i-- { + if err := s.services[i].Delete(ctx); err != nil { + return errors.Wrapf(err, "failed to delete AzureMachinePool service %s", s.services[i].Name()) + } } + return nil } diff --git a/exp/controllers/azuremachinepool_reconciler_test.go b/exp/controllers/azuremachinepool_reconciler_test.go new file mode 100644 index 00000000000..cba52e79fd2 --- /dev/null +++ b/exp/controllers/azuremachinepool_reconciler_test.go @@ -0,0 +1,176 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-04-01/compute" + "github.com/golang/mock/gomock" + . "github.com/onsi/gomega" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/mock_azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/scope" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/resourceskus" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" + gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + capiv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" +) + +func TestAzureMachinePoolServiceReconcile(t *testing.T) { + cases := map[string]struct { + expectedError string + expect func(one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) + }{ + "all services are reconciled in order": { + expectedError: "", + expect: func(one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { + gomock.InOrder( + one.Reconcile(gomockinternal.AContext()).Return(nil), + two.Reconcile(gomockinternal.AContext()).Return(nil), + three.Reconcile(gomockinternal.AContext()).Return(nil)) + }, + }, + "service reconcile fails": { + expectedError: "failed to reconcile AzureMachinePool service foo: some error happened", + expect: func(one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { + gomock.InOrder( + one.Reconcile(gomockinternal.AContext()).Return(nil), + two.Reconcile(gomockinternal.AContext()).Return(errors.New("some error happened")), + two.Name().Return("foo")) + }, + }, + } + + for name, tc := range cases { + tc := tc + t.Run(name, func(t *testing.T) { + g := NewWithT(t) + + t.Parallel() + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + svcOneMock := mock_azure.NewMockServiceReconciler(mockCtrl) + svcTwoMock := mock_azure.NewMockServiceReconciler(mockCtrl) + svcThreeMock := mock_azure.NewMockServiceReconciler(mockCtrl) + + tc.expect(svcOneMock.EXPECT(), svcTwoMock.EXPECT(), svcThreeMock.EXPECT()) + + s := &azureMachinePoolService{ + scope: &scope.MachinePoolScope{ + ClusterScoper: &scope.ClusterScope{ + AzureCluster: &infrav1.AzureCluster{}, + Cluster: &clusterv1.Cluster{}, + }, + MachinePool: &capiv1exp.MachinePool{}, + AzureMachinePool: &infrav1exp.AzureMachinePool{ + Spec: infrav1exp.AzureMachinePoolSpec{ + Template: infrav1exp.AzureMachinePoolMachineTemplate{ + SubnetName: "test-subnet", + }, + }, + }, + }, + services: []azure.ServiceReconciler{ + svcOneMock, + svcTwoMock, + svcThreeMock, + }, + skuCache: resourceskus.NewStaticCache([]compute.ResourceSku{}, ""), + } + + err := s.Reconcile(context.TODO()) + if tc.expectedError != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err).To(MatchError(tc.expectedError)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} + +func TestAzureMachinePoolServiceDelete(t *testing.T) { + cases := map[string]struct { + expectedError string + expect func(one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) + }{ + "all services deleted in order": { + expectedError: "", + expect: func(one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { + gomock.InOrder( + three.Delete(gomockinternal.AContext()).Return(nil), + two.Delete(gomockinternal.AContext()).Return(nil), + one.Delete(gomockinternal.AContext()).Return(nil)) + }, + }, + "service delete fails": { + expectedError: "failed to delete AzureMachinePool service test-service-two: some error happened", + expect: func(one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { + gomock.InOrder( + three.Delete(gomockinternal.AContext()).Return(nil), + two.Delete(gomockinternal.AContext()).Return(errors.New("some error happened")), + two.Name().Return("test-service-two")) + }, + }, + } + + for name, tc := range cases { + tc := tc + t.Run(name, func(t *testing.T) { + g := NewWithT(t) + + t.Parallel() + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + svcOneMock := mock_azure.NewMockServiceReconciler(mockCtrl) + svcTwoMock := mock_azure.NewMockServiceReconciler(mockCtrl) + svcThreeMock := mock_azure.NewMockServiceReconciler(mockCtrl) + + tc.expect(svcOneMock.EXPECT(), svcTwoMock.EXPECT(), svcThreeMock.EXPECT()) + + s := &azureMachinePoolService{ + scope: &scope.MachinePoolScope{ + ClusterScoper: &scope.ClusterScope{ + AzureCluster: &infrav1.AzureCluster{}, + Cluster: &clusterv1.Cluster{}, + }, + MachinePool: &capiv1exp.MachinePool{}, + AzureMachinePool: &infrav1exp.AzureMachinePool{}, + }, + services: []azure.ServiceReconciler{ + svcOneMock, + svcTwoMock, + svcThreeMock, + }, + skuCache: resourceskus.NewStaticCache([]compute.ResourceSku{}, ""), + } + + err := s.Delete(context.TODO()) + if tc.expectedError != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err).To(MatchError(tc.expectedError)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} diff --git a/exp/controllers/azuremanagedcontrolplane_reconciler.go b/exp/controllers/azuremanagedcontrolplane_reconciler.go index cb8c4aafe62..afc0b301d38 100644 --- a/exp/controllers/azuremanagedcontrolplane_reconciler.go +++ b/exp/controllers/azuremanagedcontrolplane_reconciler.go @@ -35,25 +35,23 @@ import ( // azureManagedControlPlaneService contains the services required by the cluster controller. type azureManagedControlPlaneService struct { - kubeclient client.Client - scope managedclusters.ManagedClusterScope - managedClustersSvc azure.Reconciler - groupsSvc azure.Reconciler - vnetSvc azure.Reconciler - subnetsSvc azure.Reconciler - tagsSvc azure.Reconciler + kubeclient client.Client + scope managedclusters.ManagedClusterScope + services []azure.ServiceReconciler } // newAzureManagedControlPlaneReconciler populates all the services based on input scope. func newAzureManagedControlPlaneReconciler(scope *scope.ManagedControlPlaneScope) *azureManagedControlPlaneService { return &azureManagedControlPlaneService{ - kubeclient: scope.Client, - scope: scope, - managedClustersSvc: managedclusters.New(scope), - groupsSvc: groups.New(scope), - vnetSvc: virtualnetworks.New(scope), - subnetsSvc: subnets.New(scope), - tagsSvc: tags.New(scope), + kubeclient: scope.Client, + scope: scope, + services: []azure.ServiceReconciler{ + groups.New(scope), + virtualnetworks.New(scope), + subnets.New(scope), + managedclusters.New(scope), + tags.New(scope), + }, } } @@ -62,31 +60,16 @@ func (r *azureManagedControlPlaneService) Reconcile(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.azureManagedControlPlaneService.Reconcile") defer done() - if err := r.groupsSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "failed to reconcile managed cluster resource group") - } - - if err := r.vnetSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "failed to reconcile virtual network") - } - - if err := r.subnetsSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "failed to reconcile subnet") - } - - // Send to Azure for create/update. - if err := r.managedClustersSvc.Reconcile(ctx); err != nil { - return errors.Wrapf(err, "failed to reconcile managed cluster") + for _, service := range r.services { + if err := service.Reconcile(ctx); err != nil { + return errors.Wrapf(err, "failed to reconcile AzureManagedControlPlane service %s", service.Name()) + } } if err := r.reconcileKubeconfig(ctx); err != nil { return errors.Wrap(err, "failed to reconcile kubeconfig secret") } - if err := r.tagsSvc.Reconcile(ctx); err != nil { - return errors.Wrap(err, "unable to update tags") - } - return nil } @@ -95,16 +78,11 @@ func (r *azureManagedControlPlaneService) Delete(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.azureManagedControlPlaneService.Delete") defer done() - if err := r.managedClustersSvc.Delete(ctx); err != nil { - return errors.Wrapf(err, "failed to delete managed cluster") - } - - if err := r.vnetSvc.Delete(ctx); err != nil { - return errors.Wrap(err, "failed to delete virtual network") - } - - if err := r.groupsSvc.Delete(ctx); err != nil && !errors.Is(err, azure.ErrNotOwned) { - return errors.Wrap(err, "failed to delete managed cluster resource group") + // Delete services in reverse order of creation. + for i := len(r.services) - 1; i >= 0; i-- { + if err := r.services[i].Delete(ctx); err != nil { + return errors.Wrapf(err, "failed to delete AzureManagedControlPlane service %s", r.services[i].Name()) + } } return nil From 2defff06b196242c5363f9b43c2e3f719f682b8a Mon Sep 17 00:00:00 2001 From: Cecile Robert-Michon Date: Tue, 8 Mar 2022 16:55:08 -0800 Subject: [PATCH 2/2] Add IsManaged to service reconciler interface --- azure/errors.go | 3 -- azure/interfaces.go | 1 + azure/mock_azure/azure_mock.go | 15 ++++++++ azure/scope/cluster.go | 6 ---- azure/scope/machine.go | 6 ---- azure/scope/machinepool.go | 6 ---- azure/scope/machinepoolmachine.go | 6 ---- azure/scope/managedcontrolplane.go | 6 ---- .../availabilitysets/availabilitysets.go | 5 +++ azure/services/bastionhosts/bastionhosts.go | 5 +++ azure/services/disks/disks.go | 5 +++ azure/services/groups/groups.go | 12 +++---- azure/services/groups/groups_test.go | 3 +- .../inboundnatrules/inboundnatrules.go | 5 +++ azure/services/loadbalancers/loadbalancers.go | 5 +++ .../managedclusters/managedclusters.go | 5 +++ azure/services/natgateways/natgateways.go | 20 +++++++---- .../services/natgateways/natgateways_test.go | 36 +++++-------------- .../networkinterfaces/networkinterfaces.go | 5 +++ azure/services/privatedns/privatedns.go | 6 ++++ azure/services/publicips/publicips.go | 5 +++ .../roleassignments/roleassignments.go | 5 +++ azure/services/routetables/routetables.go | 17 +++++++-- azure/services/scalesets/scalesets.go | 5 +++ .../services/securitygroups/securitygroups.go | 19 ++++++++-- azure/services/subnets/subnets.go | 13 +++++-- azure/services/tags/tags.go | 5 +++ .../virtualmachines/virtualmachines.go | 5 +++ .../virtualnetworks/virtualnetworks.go | 5 +-- .../virtualnetworks/virtualnetworks_test.go | 18 +++++----- azure/services/vmextensions/vmextensions.go | 5 +++ .../services/vmssextensions/vmssextensions.go | 5 +++ azure/services/vnetpeerings/vnetpeerings.go | 5 +++ controllers/azurecluster_reconciler.go | 34 +++++++++++------- controllers/azurecluster_reconciler_test.go | 17 +++++++-- controllers/helpers.go | 2 +- 36 files changed, 217 insertions(+), 109 deletions(-) diff --git a/azure/errors.go b/azure/errors.go index 23a84a9b505..3925390c48d 100644 --- a/azure/errors.go +++ b/azure/errors.go @@ -26,9 +26,6 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" ) -// ErrNotOwned is returned when a resource can't be deleted because it isn't owned. -var ErrNotOwned = errors.New("resource is not managed and cannot be deleted") - const codeResourceGroupNotFound = "ResourceGroupNotFound" // ResourceGroupNotFound parses the error to check if it's a resource group not found error. diff --git a/azure/interfaces.go b/azure/interfaces.go index a10fe0ab6c5..bcf75522e3b 100644 --- a/azure/interfaces.go +++ b/azure/interfaces.go @@ -33,6 +33,7 @@ type Reconciler interface { // ServiceReconciler is an Azure service reconciler which can reconcile an Azure service. type ServiceReconciler interface { Name() string + IsManaged(ctx context.Context) (bool, error) Reconciler } diff --git a/azure/mock_azure/azure_mock.go b/azure/mock_azure/azure_mock.go index 8c853e2faa0..0f1d092bf18 100644 --- a/azure/mock_azure/azure_mock.go +++ b/azure/mock_azure/azure_mock.go @@ -118,6 +118,21 @@ func (mr *MockServiceReconcilerMockRecorder) Delete(ctx interface{}) *gomock.Cal return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockServiceReconciler)(nil).Delete), ctx) } +// IsManaged mocks base method. +func (m *MockServiceReconciler) IsManaged(ctx context.Context) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsManaged", ctx) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsManaged indicates an expected call of IsManaged. +func (mr *MockServiceReconcilerMockRecorder) IsManaged(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsManaged", reflect.TypeOf((*MockServiceReconciler)(nil).IsManaged), ctx) +} + // Name mocks base method. func (m *MockServiceReconciler) Name() string { m.ctrl.T.Helper() diff --git a/azure/scope/cluster.go b/azure/scope/cluster.go index b323a343511..c596b75de45 100644 --- a/azure/scope/cluster.go +++ b/azure/scope/cluster.go @@ -860,8 +860,6 @@ func (s *ClusterScope) UpdateDeleteStatus(condition clusterv1.ConditionType, ser switch { case err == nil: conditions.MarkFalse(s.AzureCluster, condition, infrav1.DeletedReason, clusterv1.ConditionSeverityInfo, "%s successfully deleted", service) - case errors.Is(err, azure.ErrNotOwned): - // do nothing case azure.IsOperationNotDoneError(err): conditions.MarkFalse(s.AzureCluster, condition, infrav1.DeletingReason, clusterv1.ConditionSeverityInfo, "%s deleting", service) default: @@ -874,8 +872,6 @@ func (s *ClusterScope) UpdatePutStatus(condition clusterv1.ConditionType, servic switch { case err == nil: conditions.MarkTrue(s.AzureCluster, condition) - case errors.Is(err, azure.ErrNotOwned): - // do nothing case azure.IsOperationNotDoneError(err): conditions.MarkFalse(s.AzureCluster, condition, infrav1.CreatingReason, clusterv1.ConditionSeverityInfo, "%s creating or updating", service) default: @@ -888,8 +884,6 @@ func (s *ClusterScope) UpdatePatchStatus(condition clusterv1.ConditionType, serv switch { case err == nil: conditions.MarkTrue(s.AzureCluster, condition) - case errors.Is(err, azure.ErrNotOwned): - // do nothing case azure.IsOperationNotDoneError(err): conditions.MarkFalse(s.AzureCluster, condition, infrav1.UpdatingReason, clusterv1.ConditionSeverityInfo, "%s updating", service) default: diff --git a/azure/scope/machine.go b/azure/scope/machine.go index a21fad7a4b2..5dcb09a4b66 100644 --- a/azure/scope/machine.go +++ b/azure/scope/machine.go @@ -665,8 +665,6 @@ func (m *MachineScope) UpdateDeleteStatus(condition clusterv1.ConditionType, ser switch { case err == nil: conditions.MarkFalse(m.AzureMachine, condition, infrav1.DeletedReason, clusterv1.ConditionSeverityInfo, "%s successfully deleted", service) - case errors.Is(err, azure.ErrNotOwned): - // do nothing case azure.IsOperationNotDoneError(err): conditions.MarkFalse(m.AzureMachine, condition, infrav1.DeletingReason, clusterv1.ConditionSeverityInfo, "%s deleting", service) default: @@ -679,8 +677,6 @@ func (m *MachineScope) UpdatePutStatus(condition clusterv1.ConditionType, servic switch { case err == nil: conditions.MarkTrue(m.AzureMachine, condition) - case errors.Is(err, azure.ErrNotOwned): - // do nothing case azure.IsOperationNotDoneError(err): conditions.MarkFalse(m.AzureMachine, condition, infrav1.CreatingReason, clusterv1.ConditionSeverityInfo, "%s creating or updating", service) default: @@ -693,8 +689,6 @@ func (m *MachineScope) UpdatePatchStatus(condition clusterv1.ConditionType, serv switch { case err == nil: conditions.MarkTrue(m.AzureMachine, condition) - case errors.Is(err, azure.ErrNotOwned): - // do nothing case azure.IsOperationNotDoneError(err): conditions.MarkFalse(m.AzureMachine, condition, infrav1.UpdatingReason, clusterv1.ConditionSeverityInfo, "%s updating", service) default: diff --git a/azure/scope/machinepool.go b/azure/scope/machinepool.go index da3b8a74c4f..e302ed1892e 100644 --- a/azure/scope/machinepool.go +++ b/azure/scope/machinepool.go @@ -626,8 +626,6 @@ func (m *MachinePoolScope) UpdateDeleteStatus(condition clusterv1.ConditionType, switch { case err == nil: conditions.MarkFalse(m.AzureMachinePool, condition, infrav1.DeletedReason, clusterv1.ConditionSeverityInfo, "%s successfully deleted", service) - case errors.Is(err, azure.ErrNotOwned): - // do nothing case azure.IsOperationNotDoneError(err): conditions.MarkFalse(m.AzureMachinePool, condition, infrav1.DeletingReason, clusterv1.ConditionSeverityInfo, "%s deleting", service) default: @@ -640,8 +638,6 @@ func (m *MachinePoolScope) UpdatePutStatus(condition clusterv1.ConditionType, se switch { case err == nil: conditions.MarkTrue(m.AzureMachinePool, condition) - case errors.Is(err, azure.ErrNotOwned): - // do nothing case azure.IsOperationNotDoneError(err): conditions.MarkFalse(m.AzureMachinePool, condition, infrav1.CreatingReason, clusterv1.ConditionSeverityInfo, "%s creating or updating", service) default: @@ -654,8 +650,6 @@ func (m *MachinePoolScope) UpdatePatchStatus(condition clusterv1.ConditionType, switch { case err == nil: conditions.MarkTrue(m.AzureMachinePool, condition) - case errors.Is(err, azure.ErrNotOwned): - // do nothing case azure.IsOperationNotDoneError(err): conditions.MarkFalse(m.AzureMachinePool, condition, infrav1.UpdatingReason, clusterv1.ConditionSeverityInfo, "%s updating", service) default: diff --git a/azure/scope/machinepoolmachine.go b/azure/scope/machinepoolmachine.go index f07fe21fe0d..cf2e1014944 100644 --- a/azure/scope/machinepoolmachine.go +++ b/azure/scope/machinepoolmachine.go @@ -184,8 +184,6 @@ func (s *MachinePoolMachineScope) UpdateDeleteStatus(condition clusterv1.Conditi switch { case err == nil: conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.DeletedReason, clusterv1.ConditionSeverityInfo, "%s successfully deleted", service) - case errors.Is(err, azure.ErrNotOwned): - // do nothing case azure.IsOperationNotDoneError(err): conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.DeletingReason, clusterv1.ConditionSeverityInfo, "%s deleting", service) default: @@ -198,8 +196,6 @@ func (s *MachinePoolMachineScope) UpdatePutStatus(condition clusterv1.ConditionT switch { case err == nil: conditions.MarkTrue(s.AzureMachinePoolMachine, condition) - case errors.Is(err, azure.ErrNotOwned): - // do nothing case azure.IsOperationNotDoneError(err): conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.CreatingReason, clusterv1.ConditionSeverityInfo, "%s creating or updating", service) default: @@ -212,8 +208,6 @@ func (s *MachinePoolMachineScope) UpdatePatchStatus(condition clusterv1.Conditio switch { case err == nil: conditions.MarkTrue(s.AzureMachinePoolMachine, condition) - case errors.Is(err, azure.ErrNotOwned): - // do nothing case azure.IsOperationNotDoneError(err): conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.UpdatingReason, clusterv1.ConditionSeverityInfo, "%s updating", service) default: diff --git a/azure/scope/managedcontrolplane.go b/azure/scope/managedcontrolplane.go index 036d6acc320..3d5e94d3e08 100644 --- a/azure/scope/managedcontrolplane.go +++ b/azure/scope/managedcontrolplane.go @@ -695,8 +695,6 @@ func (s *ManagedControlPlaneScope) UpdateDeleteStatus(condition clusterv1.Condit switch { case err == nil: conditions.MarkFalse(s.PatchTarget, condition, infrav1.DeletedReason, clusterv1.ConditionSeverityInfo, "%s successfully deleted", service) - case errors.Is(err, azure.ErrNotOwned): - // do nothing case azure.IsOperationNotDoneError(err): conditions.MarkFalse(s.PatchTarget, condition, infrav1.DeletingReason, clusterv1.ConditionSeverityInfo, "%s deleting", service) default: @@ -709,8 +707,6 @@ func (s *ManagedControlPlaneScope) UpdatePutStatus(condition clusterv1.Condition switch { case err == nil: conditions.MarkTrue(s.PatchTarget, condition) - case errors.Is(err, azure.ErrNotOwned): - // do nothing case azure.IsOperationNotDoneError(err): conditions.MarkFalse(s.PatchTarget, condition, infrav1.CreatingReason, clusterv1.ConditionSeverityInfo, "%s creating or updating", service) default: @@ -723,8 +719,6 @@ func (s *ManagedControlPlaneScope) UpdatePatchStatus(condition clusterv1.Conditi switch { case err == nil: conditions.MarkTrue(s.PatchTarget, condition) - case errors.Is(err, azure.ErrNotOwned): - // do nothing case azure.IsOperationNotDoneError(err): conditions.MarkFalse(s.PatchTarget, condition, infrav1.UpdatingReason, clusterv1.ConditionSeverityInfo, "%s updating", service) default: diff --git a/azure/services/availabilitysets/availabilitysets.go b/azure/services/availabilitysets/availabilitysets.go index 7e10e9f121a..eb42763e3d1 100644 --- a/azure/services/availabilitysets/availabilitysets.go +++ b/azure/services/availabilitysets/availabilitysets.go @@ -119,3 +119,8 @@ func (s *Service) Delete(ctx context.Context) error { s.Scope.UpdateDeleteStatus(infrav1.AvailabilitySetReadyCondition, serviceName, resultingErr) return resultingErr } + +// IsManaged returns always returns true as CAPZ does not support BYO availability set. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + return true, nil +} diff --git a/azure/services/bastionhosts/bastionhosts.go b/azure/services/bastionhosts/bastionhosts.go index 6888959221c..98b9ea55bf8 100644 --- a/azure/services/bastionhosts/bastionhosts.go +++ b/azure/services/bastionhosts/bastionhosts.go @@ -92,3 +92,8 @@ func (s *Service) Delete(ctx context.Context) error { s.Scope.UpdateDeleteStatus(infrav1.BastionHostReadyCondition, serviceName, resultingErr) return resultingErr } + +// IsManaged returns always returns true as CAPZ does not support BYO bastion. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + return true, nil +} diff --git a/azure/services/disks/disks.go b/azure/services/disks/disks.go index fa0a60ab7ff..4bf9327adef 100644 --- a/azure/services/disks/disks.go +++ b/azure/services/disks/disks.go @@ -91,3 +91,8 @@ func (s *Service) Delete(ctx context.Context) error { s.Scope.UpdateDeleteStatus(infrav1.DisksReadyCondition, serviceName, result) return result } + +// IsManaged returns always returns true as CAPZ does not support BYO disk. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + return true, nil +} diff --git a/azure/services/groups/groups.go b/azure/services/groups/groups.go index bdaf2d64d7d..2f0a570048e 100644 --- a/azure/services/groups/groups.go +++ b/azure/services/groups/groups.go @@ -93,7 +93,7 @@ func (s *Service) Delete(ctx context.Context) error { } // check that the resource group is not BYO. - managed, err := s.IsGroupManaged(ctx) + managed, err := s.IsManaged(ctx) if err != nil { if azure.ResourceNotFound(err) { // already deleted or doesn't exist, cleanup status and return. @@ -104,8 +104,8 @@ func (s *Service) Delete(ctx context.Context) error { return errors.Wrap(err, "could not get resource group management state") } if !managed { - log.V(2).Info("Should not delete resource group in unmanaged mode") - return azure.ErrNotOwned + log.V(2).Info("Skipping resource group deletion in unmanaged mode") + return nil } err = s.DeleteResource(ctx, groupSpec, ServiceName) @@ -113,10 +113,10 @@ func (s *Service) Delete(ctx context.Context) error { return err } -// IsGroupManaged returns true if the resource group has an owned tag with the cluster name as value, +// IsManaged returns true if the resource group has an owned tag with the cluster name as value, // meaning that the resource group's lifecycle is managed. -func (s *Service) IsGroupManaged(ctx context.Context) (bool, error) { - ctx, _, done := tele.StartSpanWithLogger(ctx, "groups.Service.IsGroupManaged") +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + ctx, _, done := tele.StartSpanWithLogger(ctx, "groups.Service.IsManaged") defer done() groupSpec := s.Scope.GroupSpec() diff --git a/azure/services/groups/groups_test.go b/azure/services/groups/groups_test.go index cca1e643bd6..ea13b53cb33 100644 --- a/azure/services/groups/groups_test.go +++ b/azure/services/groups/groups_test.go @@ -27,7 +27,6 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/async/mock_async" "sigs.k8s.io/cluster-api-provider-azure/azure/services/groups/mock_groups" gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" @@ -146,7 +145,7 @@ func TestDeleteGroups(t *testing.T) { }, { name: "resource group is not managed by capz", - expectedError: azure.ErrNotOwned.Error(), + expectedError: "", expect: func(s *mock_groups.MockGroupScopeMockRecorder, m *mock_groups.MockclientMockRecorder, r *mock_async.MockReconcilerMockRecorder) { s.GroupSpec().AnyTimes().Return(&fakeGroupSpec) m.Get(gomockinternal.AContext(), &fakeGroupSpec).Return(sampleBYOGroup, nil) diff --git a/azure/services/inboundnatrules/inboundnatrules.go b/azure/services/inboundnatrules/inboundnatrules.go index 212ceee647c..eb758b37182 100644 --- a/azure/services/inboundnatrules/inboundnatrules.go +++ b/azure/services/inboundnatrules/inboundnatrules.go @@ -138,3 +138,8 @@ func (s *Service) Delete(ctx context.Context) error { s.Scope.UpdateDeleteStatus(infrav1.InboundNATRulesReadyCondition, serviceName, result) return result } + +// IsManaged returns always returns true as CAPZ does not support BYO inbound NAT rules. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + return true, nil +} diff --git a/azure/services/loadbalancers/loadbalancers.go b/azure/services/loadbalancers/loadbalancers.go index 2f69c157407..49aa430ee78 100644 --- a/azure/services/loadbalancers/loadbalancers.go +++ b/azure/services/loadbalancers/loadbalancers.go @@ -117,3 +117,8 @@ func (s *Service) Delete(ctx context.Context) error { s.Scope.UpdateDeleteStatus(infrav1.LoadBalancersReadyCondition, serviceName, result) return result } + +// IsManaged returns always returns true as CAPZ does not support BYO load balancers. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + return true, nil +} diff --git a/azure/services/managedclusters/managedclusters.go b/azure/services/managedclusters/managedclusters.go index 78362a7e0bf..c9b927eadbc 100644 --- a/azure/services/managedclusters/managedclusters.go +++ b/azure/services/managedclusters/managedclusters.go @@ -377,3 +377,8 @@ func (s *Service) Delete(ctx context.Context) error { klog.V(2).Infof("successfully deleted managed cluster %s ", s.Scope.ClusterName()) return nil } + +// IsManaged returns always returns true as CAPZ does not support BYO managed cluster. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + return true, nil +} diff --git a/azure/services/natgateways/natgateways.go b/azure/services/natgateways/natgateways.go index 1fd9ae7ed3d..4a1b5ce8bba 100644 --- a/azure/services/natgateways/natgateways.go +++ b/azure/services/natgateways/natgateways.go @@ -67,11 +67,11 @@ func (s *Service) Reconcile(ctx context.Context) error { ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultAzureServiceReconcileTimeout) defer cancel() - if !s.Scope.Vnet().IsManaged(s.Scope.ClusterName()) { + if managed, err := s.IsManaged(ctx); err == nil && !managed { log.V(4).Info("Skipping nat gateways reconcile in custom vnet mode") - - s.Scope.UpdatePutStatus(infrav1.NATGatewaysReadyCondition, serviceName, nil) return nil + } else if err != nil { + return errors.Wrap(err, "failed to check if NAT gateways are managed") } // We go through the list of NatGatewaySpecs to reconcile each one, independently of the resultingErr of the previous one. @@ -115,11 +115,11 @@ func (s *Service) Delete(ctx context.Context) error { ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultAzureServiceReconcileTimeout) defer cancel() - if !s.Scope.Vnet().IsManaged(s.Scope.ClusterName()) { + if managed, err := s.IsManaged(ctx); err == nil && !managed { log.V(4).Info("Skipping nat gateway deletion in custom vnet mode") - - s.Scope.UpdateDeleteStatus(infrav1.NATGatewaysReadyCondition, serviceName, nil) return nil + } else if err != nil { + return errors.Wrap(err, "failed to check if NAT gateways are managed") } specs := s.Scope.NatGatewaySpecs() @@ -141,3 +141,11 @@ func (s *Service) Delete(ctx context.Context) error { s.Scope.UpdateDeleteStatus(infrav1.NATGatewaysReadyCondition, serviceName, resultingErr) return resultingErr } + +// IsManaged returns true if the NAT gateways' lifecycles are managed. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + _, _, done := tele.StartSpanWithLogger(ctx, "natgateways.Service.IsManaged") + defer done() + + return s.Scope.IsVnetManaged(), nil +} diff --git a/azure/services/natgateways/natgateways_test.go b/azure/services/natgateways/natgateways_test.go index 910774d63cd..17817f8ed66 100644 --- a/azure/services/natgateways/natgateways_test.go +++ b/azure/services/natgateways/natgateways_test.go @@ -40,13 +40,6 @@ func init() { } var ( - customVNetSpec = infrav1.VnetSpec{ - ID: "1234", - Name: "my-vnet", - } - ownedVNetSpec = infrav1.VnetSpec{ - Name: "my-vnet", - } natGatewaySpec1 = NatGatewaySpec{ Name: "my-node-natgateway-1", ResourceGroup: "my-rg", @@ -82,8 +75,7 @@ func TestReconcileNatGateways(t *testing.T) { tags: customVNetTags, expectedError: "", expect: func(s *mock_natgateways.MockNatGatewayScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { - s.Vnet().Return(&ownedVNetSpec) - s.ClusterName() + s.IsVnetManaged().Return(true) s.NatGatewaySpecs().Return([]azure.ResourceSpecGetter{}) }, }, @@ -92,9 +84,7 @@ func TestReconcileNatGateways(t *testing.T) { tags: customVNetTags, expectedError: "", expect: func(s *mock_natgateways.MockNatGatewayScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { - s.Vnet().Return(&customVNetSpec) - s.ClusterName() - s.UpdatePutStatus(infrav1.NATGatewaysReadyCondition, serviceName, nil) + s.IsVnetManaged().Return(false) }, }, { @@ -102,8 +92,7 @@ func TestReconcileNatGateways(t *testing.T) { tags: ownedVNetTags, expectedError: "", expect: func(s *mock_natgateways.MockNatGatewayScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { - s.Vnet().Return(&ownedVNetSpec) - s.ClusterName() + s.IsVnetManaged().Return(true) s.NatGatewaySpecs().Return([]azure.ResourceSpecGetter{&natGatewaySpec1}) r.CreateResource(gomockinternal.AContext(), &natGatewaySpec1, serviceName).Return(natGateway1, nil) s.SetNatGatewayIDInSubnets(natGatewaySpec1.Name, *natGateway1.ID) @@ -115,8 +104,7 @@ func TestReconcileNatGateways(t *testing.T) { tags: ownedVNetTags, expectedError: "#: Internal Server Error: StatusCode=500", expect: func(s *mock_natgateways.MockNatGatewayScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { - s.Vnet().Return(&ownedVNetSpec) - s.ClusterName() + s.IsVnetManaged().Return(true) s.NatGatewaySpecs().Return([]azure.ResourceSpecGetter{&natGatewaySpec1}) r.CreateResource(gomockinternal.AContext(), &natGatewaySpec1, serviceName).Return(nil, internalError) s.UpdatePutStatus(infrav1.NATGatewaysReadyCondition, serviceName, internalError) @@ -127,8 +115,7 @@ func TestReconcileNatGateways(t *testing.T) { tags: ownedVNetTags, expectedError: "created resource string is not a network.NatGateway", expect: func(s *mock_natgateways.MockNatGatewayScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { - s.Vnet().Return(&ownedVNetSpec) - s.ClusterName() + s.IsVnetManaged().Return(true) s.NatGatewaySpecs().Return([]azure.ResourceSpecGetter{&natGatewaySpec1}) r.CreateResource(gomockinternal.AContext(), &natGatewaySpec1, serviceName).Return("not a nat gateway", nil) s.UpdatePutStatus(infrav1.NATGatewaysReadyCondition, serviceName, gomockinternal.ErrStrEq("created resource string is not a network.NatGateway")) @@ -176,8 +163,7 @@ func TestDeleteNatGateway(t *testing.T) { tags: ownedVNetTags, expectedError: "", expect: func(s *mock_natgateways.MockNatGatewayScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { - s.Vnet().Return(&ownedVNetSpec) - s.ClusterName() + s.IsVnetManaged().Return(true) s.NatGatewaySpecs().Return([]azure.ResourceSpecGetter{}) }, }, @@ -186,9 +172,7 @@ func TestDeleteNatGateway(t *testing.T) { tags: customVNetTags, expectedError: "", expect: func(s *mock_natgateways.MockNatGatewayScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { - s.Vnet().Return(&customVNetSpec) - s.ClusterName() - s.UpdateDeleteStatus(infrav1.NATGatewaysReadyCondition, serviceName, nil) + s.IsVnetManaged().Return(false) }, }, { @@ -196,8 +180,7 @@ func TestDeleteNatGateway(t *testing.T) { tags: ownedVNetTags, expectedError: "", expect: func(s *mock_natgateways.MockNatGatewayScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { - s.Vnet().Return(&ownedVNetSpec) - s.ClusterName() + s.IsVnetManaged().Return(true) s.NatGatewaySpecs().Return([]azure.ResourceSpecGetter{&natGatewaySpec1}) r.DeleteResource(gomockinternal.AContext(), &natGatewaySpec1, serviceName).Return(nil) s.UpdateDeleteStatus(infrav1.NATGatewaysReadyCondition, serviceName, nil) @@ -208,8 +191,7 @@ func TestDeleteNatGateway(t *testing.T) { tags: ownedVNetTags, expectedError: "#: Internal Server Error: StatusCode=500", expect: func(s *mock_natgateways.MockNatGatewayScopeMockRecorder, r *mock_async.MockReconcilerMockRecorder) { - s.Vnet().Return(&ownedVNetSpec) - s.ClusterName() + s.IsVnetManaged().Return(true) s.NatGatewaySpecs().Return([]azure.ResourceSpecGetter{&natGatewaySpec1}) r.DeleteResource(gomockinternal.AContext(), &natGatewaySpec1, serviceName).Return(internalError) s.UpdateDeleteStatus(infrav1.NATGatewaysReadyCondition, serviceName, internalError) diff --git a/azure/services/networkinterfaces/networkinterfaces.go b/azure/services/networkinterfaces/networkinterfaces.go index 96d91f5dd87..75178f063e9 100644 --- a/azure/services/networkinterfaces/networkinterfaces.go +++ b/azure/services/networkinterfaces/networkinterfaces.go @@ -115,3 +115,8 @@ func (s *Service) Delete(ctx context.Context) error { s.Scope.UpdateDeleteStatus(infrav1.NetworkInterfaceReadyCondition, serviceName, result) return result } + +// IsManaged returns always returns true as CAPZ does not support BYO network interfaces. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + return true, nil +} diff --git a/azure/services/privatedns/privatedns.go b/azure/services/privatedns/privatedns.go index 0ad47829328..b9fdf6066b2 100644 --- a/azure/services/privatedns/privatedns.go +++ b/azure/services/privatedns/privatedns.go @@ -226,3 +226,9 @@ func (s *Service) isVnetLinkManaged(ctx context.Context, resourceGroupName, zone tags := converters.MapToTags(zone.Tags) return tags.HasOwned(s.Scope.ClusterName()), nil } + +// IsManaged returns always returns true. +// TODO: separate private DNS and VNet links so we can implement the IsManaged method for each. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + return true, nil +} diff --git a/azure/services/publicips/publicips.go b/azure/services/publicips/publicips.go index bd1a32dece6..0b282cb956a 100644 --- a/azure/services/publicips/publicips.go +++ b/azure/services/publicips/publicips.go @@ -156,3 +156,8 @@ func (s *Service) isIPManaged(ctx context.Context, ipName string) (bool, error) tags := converters.MapToTags(ip.Tags) return tags.HasOwned(s.Scope.ClusterName()), nil } + +// IsManaged returns always returns true as public IPs are managed on a one-by-one basis. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + return true, nil +} diff --git a/azure/services/roleassignments/roleassignments.go b/azure/services/roleassignments/roleassignments.go index dd360a3c760..3e52934335e 100644 --- a/azure/services/roleassignments/roleassignments.go +++ b/azure/services/roleassignments/roleassignments.go @@ -155,3 +155,8 @@ func (s *Service) Delete(ctx context.Context) error { return nil } + +// IsManaged returns always returns true as CAPZ does not support BYO role assignments. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + return true, nil +} diff --git a/azure/services/routetables/routetables.go b/azure/services/routetables/routetables.go index 02b763371fd..dbcc25e46ab 100644 --- a/azure/services/routetables/routetables.go +++ b/azure/services/routetables/routetables.go @@ -19,6 +19,7 @@ package routetables import ( "context" + "github.com/pkg/errors" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/async" @@ -66,9 +67,11 @@ func (s *Service) Reconcile(ctx context.Context) error { var resErr error - if !s.Scope.IsVnetManaged() { + if managed, err := s.IsManaged(ctx); err == nil && !managed { log.V(4).Info("Skipping route tables reconcile in custom vnet mode") return nil + } else if err != nil { + return errors.Wrap(err, "failed to check if route tables are managed") } specs := s.Scope.RouteTableSpecs() @@ -101,9 +104,11 @@ func (s *Service) Delete(ctx context.Context) error { // Only delete the route tables if their lifecycle is managed by this controller. // route tables are managed if and only if the vnet is managed. - if !s.Scope.IsVnetManaged() { + if managed, err := s.IsManaged(ctx); err == nil && !managed { log.V(4).Info("Skipping route table deletion in custom vnet mode") return nil + } else if err != nil { + return errors.Wrap(err, "failed to check if route tables are managed") } specs := s.Scope.RouteTableSpecs() @@ -125,3 +130,11 @@ func (s *Service) Delete(ctx context.Context) error { s.Scope.UpdateDeleteStatus(infrav1.RouteTablesReadyCondition, serviceName, result) return result } + +// IsManaged returns true if the route tables' lifecycles are managed. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + _, _, done := tele.StartSpanWithLogger(ctx, "routetables.Service.IsManaged") + defer done() + + return s.Scope.IsVnetManaged(), nil +} diff --git a/azure/services/scalesets/scalesets.go b/azure/services/scalesets/scalesets.go index ea036295191..fc7ee6a2de0 100644 --- a/azure/services/scalesets/scalesets.go +++ b/azure/services/scalesets/scalesets.go @@ -742,3 +742,8 @@ func getSecurityProfile(vmssSpec azure.ScaleSetSpec, sku resourceskus.SKU) (*com EncryptionAtHost: to.BoolPtr(*vmssSpec.SecurityProfile.EncryptionAtHost), }, nil } + +// IsManaged returns always returns true as CAPZ does not support BYO scale set. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + return true, nil +} diff --git a/azure/services/securitygroups/securitygroups.go b/azure/services/securitygroups/securitygroups.go index f797b01393a..8e7a4d66a6c 100644 --- a/azure/services/securitygroups/securitygroups.go +++ b/azure/services/securitygroups/securitygroups.go @@ -19,6 +19,7 @@ package securitygroups import ( "context" + "github.com/pkg/errors" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/async" @@ -65,9 +66,11 @@ func (s *Service) Reconcile(ctx context.Context) error { defer cancel() // Only create the NSGs if their lifecycle is managed by this controller. - if !s.Scope.IsVnetManaged() { + if managed, err := s.IsManaged(ctx); err == nil && !managed { log.V(4).Info("Skipping network security groups reconcile in custom VNet mode") return nil + } else if err != nil { + return errors.Wrap(err, "failed to check if security groups are managed") } specs := s.Scope.NSGSpecs() @@ -100,10 +103,12 @@ func (s *Service) Delete(ctx context.Context) error { ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultAzureServiceReconcileTimeout) defer cancel() - // Only delete the NSG if its lifecycle is managed by this controller. - if !s.Scope.IsVnetManaged() { + // Only delete the security groups if their lifecycle is managed by this controller. + if managed, err := s.IsManaged(ctx); err == nil && !managed { log.V(4).Info("Skipping network security groups delete in custom VNet mode") return nil + } else if err != nil { + return errors.Wrap(err, "failed to check if security groups are managed") } specs := s.Scope.NSGSpecs() @@ -127,3 +132,11 @@ func (s *Service) Delete(ctx context.Context) error { s.Scope.UpdateDeleteStatus(infrav1.SecurityGroupsReadyCondition, serviceName, result) return result } + +// IsManaged returns true if the security groups' lifecycles are managed. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + _, _, done := tele.StartSpanWithLogger(ctx, "securitygroups.Service.IsManaged") + defer done() + + return s.Scope.IsVnetManaged(), nil +} diff --git a/azure/services/subnets/subnets.go b/azure/services/subnets/subnets.go index 9af9429debc..fdf3eadf3a6 100644 --- a/azure/services/subnets/subnets.go +++ b/azure/services/subnets/subnets.go @@ -115,9 +115,11 @@ func (s *Service) Delete(ctx context.Context) error { ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultAzureServiceReconcileTimeout) defer cancel() - if !s.Scope.IsVnetManaged() { + if managed, err := s.IsManaged(ctx); err == nil && !managed { log.V(4).Info("Skipping subnets deletion in custom vnet mode") return nil + } else if err != nil { + return errors.Wrap(err, "failed to check if subnets are managed") } specs := s.Scope.SubnetSpecs() @@ -138,6 +140,13 @@ func (s *Service) Delete(ctx context.Context) error { } s.Scope.UpdateDeleteStatus(infrav1.SubnetsReadyCondition, serviceName, result) - return result } + +// IsManaged returns true if the route tables' lifecycles are managed. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + _, _, done := tele.StartSpanWithLogger(ctx, "subnets.Service.IsManaged") + defer done() + + return s.Scope.IsVnetManaged(), nil +} diff --git a/azure/services/tags/tags.go b/azure/services/tags/tags.go index 6361d9c83f8..caf57b2a894 100644 --- a/azure/services/tags/tags.go +++ b/azure/services/tags/tags.go @@ -194,3 +194,8 @@ func tagsChanged(lastAppliedTags map[string]interface{}, desiredTags map[string] // in dst. Nothing changed. return changed, createdOrUpdated, deleted, newAnnotation } + +// IsManaged returns always returns true as CAPZ does not support BYO tags. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + return true, nil +} diff --git a/azure/services/virtualmachines/virtualmachines.go b/azure/services/virtualmachines/virtualmachines.go index a2ffa5fe9c3..ce05f3c0d90 100644 --- a/azure/services/virtualmachines/virtualmachines.go +++ b/azure/services/virtualmachines/virtualmachines.go @@ -225,3 +225,8 @@ func getResourceNameByID(resourceID string) string { resourceName := explodedResourceID[len(explodedResourceID)-1] return resourceName } + +// IsManaged returns always returns true as CAPZ does not support BYO VM. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + return true, nil +} diff --git a/azure/services/virtualnetworks/virtualnetworks.go b/azure/services/virtualnetworks/virtualnetworks.go index 4cc9bd89d6d..70963dd3d3c 100644 --- a/azure/services/virtualnetworks/virtualnetworks.go +++ b/azure/services/virtualnetworks/virtualnetworks.go @@ -114,7 +114,7 @@ func (s *Service) Delete(ctx context.Context) error { } // Check that the vnet is not BYO. - managed, err := s.IsManaged(ctx, vnetSpec) + managed, err := s.IsManaged(ctx) if err != nil { if azure.ResourceNotFound(err) { // already deleted or doesn't exist, cleanup status and return. @@ -136,10 +136,11 @@ func (s *Service) Delete(ctx context.Context) error { // IsManaged returns true if the virtual network has an owned tag with the cluster name as value, // meaning that the vnet's lifecycle is managed. -func (s *Service) IsManaged(ctx context.Context, spec azure.ResourceSpecGetter) (bool, error) { +func (s *Service) IsManaged(ctx context.Context) (bool, error) { ctx, _, done := tele.StartSpanWithLogger(ctx, "virtualnetworks.Service.IsManaged") defer done() + spec := s.Scope.VNetSpec() if spec == nil { return false, errors.New("cannot get vnet to check if it is managed: spec is nil") } diff --git a/azure/services/virtualnetworks/virtualnetworks_test.go b/azure/services/virtualnetworks/virtualnetworks_test.go index 6cbde7ba33a..8b8db077a52 100644 --- a/azure/services/virtualnetworks/virtualnetworks_test.go +++ b/azure/services/virtualnetworks/virtualnetworks_test.go @@ -27,7 +27,6 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/services/async/mock_async" "sigs.k8s.io/cluster-api-provider-azure/azure/services/virtualnetworks/mock_virtualnetworks" gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" @@ -153,7 +152,7 @@ func TestDeleteVnet(t *testing.T) { name: "delete vnet succeeds, should not return an error", expectedError: "", expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_async.MockGetterMockRecorder, r *mock_async.MockReconcilerMockRecorder) { - s.VNetSpec().Return(&fakeVNetSpec) + s.VNetSpec().Times(2).Return(&fakeVNetSpec) m.Get(gomockinternal.AContext(), &fakeVNetSpec).Return(managedVnet, nil) s.ClusterName().Return("test-cluster") r.DeleteResource(gomockinternal.AContext(), &fakeVNetSpec, serviceName).Return(nil) @@ -164,7 +163,7 @@ func TestDeleteVnet(t *testing.T) { name: "delete vnet fails, should return an error", expectedError: internalError.Error(), expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_async.MockGetterMockRecorder, r *mock_async.MockReconcilerMockRecorder) { - s.VNetSpec().Return(&fakeVNetSpec) + s.VNetSpec().Times(2).Return(&fakeVNetSpec) m.Get(gomockinternal.AContext(), &fakeVNetSpec).Return(managedVnet, nil) s.ClusterName().Return("test-cluster") r.DeleteResource(gomockinternal.AContext(), &fakeVNetSpec, serviceName).Return(internalError) @@ -175,7 +174,7 @@ func TestDeleteVnet(t *testing.T) { name: "vnet is not managed, do nothing", expectedError: "", expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_async.MockGetterMockRecorder, r *mock_async.MockReconcilerMockRecorder) { - s.VNetSpec().Return(&fakeVNetSpec) + s.VNetSpec().Times(2).Return(&fakeVNetSpec) m.Get(gomockinternal.AContext(), &fakeVNetSpec).Return(customVnet, nil) s.ClusterName().Return("test-cluster") }, @@ -215,44 +214,43 @@ func TestDeleteVnet(t *testing.T) { func TestIsVnetManaged(t *testing.T) { testcases := []struct { name string - vnetSpec azure.ResourceSpecGetter expectedError string result bool expect func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_async.MockGetterMockRecorder) }{ { name: "spec is nil", - vnetSpec: nil, result: false, expectedError: "cannot get vnet to check if it is managed: spec is nil", expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_async.MockGetterMockRecorder) { + s.VNetSpec().Return(nil) }, }, { name: "managed vnet returns true", - vnetSpec: &fakeVNetSpec, result: true, expectedError: "", expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_async.MockGetterMockRecorder) { + s.VNetSpec().Return(&fakeVNetSpec) m.Get(gomockinternal.AContext(), &fakeVNetSpec).Return(managedVnet, nil) s.ClusterName().Return("test-cluster") }, }, { name: "custom vnet returns false", - vnetSpec: &fakeVNetSpec, result: false, expectedError: "", expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_async.MockGetterMockRecorder) { + s.VNetSpec().Return(&fakeVNetSpec) m.Get(gomockinternal.AContext(), &fakeVNetSpec).Return(customVnet, nil) s.ClusterName().Return("test-cluster") }, }, { name: "GET fails returns an error", - vnetSpec: &fakeVNetSpec, expectedError: internalError.Error(), expect: func(s *mock_virtualnetworks.MockVNetScopeMockRecorder, m *mock_async.MockGetterMockRecorder) { + s.VNetSpec().Return(&fakeVNetSpec) m.Get(gomockinternal.AContext(), &fakeVNetSpec).Return(network.VirtualNetwork{}, internalError) }, }, @@ -275,7 +273,7 @@ func TestIsVnetManaged(t *testing.T) { Getter: getterMock, } - result, err := s.IsManaged(context.TODO(), tc.vnetSpec) + result, err := s.IsManaged(context.TODO()) if tc.expectedError != "" { g.Expect(err).To(HaveOccurred()) g.Expect(err).To(MatchError(tc.expectedError)) diff --git a/azure/services/vmextensions/vmextensions.go b/azure/services/vmextensions/vmextensions.go index b77ade4ad4d..a06c148c2a8 100644 --- a/azure/services/vmextensions/vmextensions.go +++ b/azure/services/vmextensions/vmextensions.go @@ -100,3 +100,8 @@ func (s *Service) Reconcile(ctx context.Context) error { func (s *Service) Delete(_ context.Context) error { return nil } + +// IsManaged returns always returns true as CAPZ does not support BYO VM extension. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + return true, nil +} diff --git a/azure/services/vmssextensions/vmssextensions.go b/azure/services/vmssextensions/vmssextensions.go index cc34dc240f1..e3885600f85 100644 --- a/azure/services/vmssextensions/vmssextensions.go +++ b/azure/services/vmssextensions/vmssextensions.go @@ -77,3 +77,8 @@ func (s *Service) Reconcile(ctx context.Context) error { func (s *Service) Delete(_ context.Context) error { return nil } + +// IsManaged returns always returns true as CAPZ does not support BYO VMSS extension. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + return true, nil +} diff --git a/azure/services/vnetpeerings/vnetpeerings.go b/azure/services/vnetpeerings/vnetpeerings.go index 6321a203b9b..67818671940 100644 --- a/azure/services/vnetpeerings/vnetpeerings.go +++ b/azure/services/vnetpeerings/vnetpeerings.go @@ -111,3 +111,8 @@ func (s *Service) Delete(ctx context.Context) error { s.Scope.UpdateDeleteStatus(infrav1.VnetPeeringReadyCondition, serviceName, result) return result } + +// IsManaged returns always returns true as CAPZ does not support BYO VNet peering. +func (s *Service) IsManaged(ctx context.Context) (bool, error) { + return true, nil +} diff --git a/controllers/azurecluster_reconciler.go b/controllers/azurecluster_reconciler.go index 2f6c229ccd9..eb7be42cfaf 100644 --- a/controllers/azurecluster_reconciler.go +++ b/controllers/azurecluster_reconciler.go @@ -102,26 +102,36 @@ func (s *azureClusterService) Delete(ctx context.Context) error { groupSvc, err := s.getService(groups.ServiceName) if err != nil { - return err + return errors.Wrap(err, "failed to get group service") } - if err := groupSvc.Delete(ctx); err != nil { - if errors.Is(err, azure.ErrNotOwned) { - // If the resource group is not managed we need to delete resources inside the group one by one. - // services are deleted in reverse order from the order in which they are reconciled. - for i := len(s.services) - 1; i >= 1; i-- { - if err := s.services[i].Delete(ctx); err != nil { - return errors.Wrapf(err, "failed to delete AzureCluster service %s", s.services[i].Name()) - } - } - } else { + + managed, err := groupSvc.IsManaged(ctx) + if err != nil { + if azure.ResourceNotFound(err) { + // If the resource group is not found, there is nothing to delete, return early. + return nil + } + return errors.Wrap(err, "failed to determine if the AzureCluster resource group is managed") + } + if managed { + // if the resource group is managed, we delete the entire resource group directly. + if err := groupSvc.Delete(ctx); err != nil { return errors.Wrap(err, "failed to delete resource group") } + } else { + // If the resource group is not managed we need to delete resources inside the group one by one. + // services are deleted in reverse order from the order in which they are reconciled. + for i := len(s.services) - 1; i >= 0; i-- { + if err := s.services[i].Delete(ctx); err != nil { + return errors.Wrapf(err, "failed to delete AzureCluster service %s", s.services[i].Name()) + } + } } return nil } -func (s *azureClusterService) getService(name string) (azure.Reconciler, error) { +func (s *azureClusterService) getService(name string) (azure.ServiceReconciler, error) { for _, service := range s.services { if service.Name() == name { return service, nil diff --git a/controllers/azurecluster_reconciler_test.go b/controllers/azurecluster_reconciler_test.go index 1eb06c3b270..cb3d8054ed7 100644 --- a/controllers/azurecluster_reconciler_test.go +++ b/controllers/azurecluster_reconciler_test.go @@ -107,14 +107,24 @@ func TestAzureClusterServiceDelete(t *testing.T) { expect: func(grp *mock_azure.MockServiceReconcilerMockRecorder, one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { gomock.InOrder( grp.Name().Return(groups.ServiceName), + grp.IsManaged(gomockinternal.AContext()).Return(true, nil), grp.Delete(gomockinternal.AContext()).Return(nil)) }, }, + "Error when checking if resource group is managed": { + expectedError: "failed to determine if the AzureCluster resource group is managed: an error happened", + expect: func(grp *mock_azure.MockServiceReconcilerMockRecorder, one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { + gomock.InOrder( + grp.Name().Return(groups.ServiceName), + grp.IsManaged(gomockinternal.AContext()).Return(false, errors.New("an error happened"))) + }, + }, "Resource Group delete fails": { expectedError: "failed to delete resource group: internal error", expect: func(grp *mock_azure.MockServiceReconcilerMockRecorder, one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { gomock.InOrder( grp.Name().Return(groups.ServiceName), + grp.IsManaged(gomockinternal.AContext()).Return(true, nil), grp.Delete(gomockinternal.AContext()).Return(errors.New("internal error"))) }, }, @@ -123,10 +133,11 @@ func TestAzureClusterServiceDelete(t *testing.T) { expect: func(grp *mock_azure.MockServiceReconcilerMockRecorder, one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { gomock.InOrder( grp.Name().Return(groups.ServiceName), - grp.Delete(gomockinternal.AContext()).Return(azure.ErrNotOwned), + grp.IsManaged(gomockinternal.AContext()).Return(false, nil), three.Delete(gomockinternal.AContext()).Return(nil), two.Delete(gomockinternal.AContext()).Return(nil), - one.Delete(gomockinternal.AContext()).Return(nil)) + one.Delete(gomockinternal.AContext()).Return(nil), + grp.Delete(gomockinternal.AContext()).Return(nil)) }, }, "service delete fails": { @@ -134,7 +145,7 @@ func TestAzureClusterServiceDelete(t *testing.T) { expect: func(grp *mock_azure.MockServiceReconcilerMockRecorder, one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { gomock.InOrder( grp.Name().Return(groups.ServiceName), - grp.Delete(gomockinternal.AContext()).Return(azure.ErrNotOwned), + grp.IsManaged(gomockinternal.AContext()).Return(false, nil), three.Delete(gomockinternal.AContext()).Return(nil), two.Delete(gomockinternal.AContext()).Return(errors.New("some error happened")), two.Name().Return("two")) diff --git a/controllers/helpers.go b/controllers/helpers.go index 71afb335baf..1187df8b2c2 100644 --- a/controllers/helpers.go +++ b/controllers/helpers.go @@ -575,7 +575,7 @@ func ShouldDeleteIndividualResources(ctx context.Context, clusterScope *scope.Cl return true } grpSvc := groups.New(clusterScope) - managed, err := grpSvc.IsGroupManaged(ctx) + managed, err := grpSvc.IsManaged(ctx) // Since this is a best effort attempt to speed up delete, we don't fail the delete if we can't get the RG status. // Instead, take the long way and delete all resources one by one. return err != nil || !managed