From 268645a3bee5e88344c78c6965db8f7b099773b9 Mon Sep 17 00:00:00 2001 From: Matthew Booth Date: Mon, 26 Feb 2024 14:05:17 +0000 Subject: [PATCH] Reduce cyclomatic complexity of reconcileNetworkComponents This change refactors reconcileNetworkComponents into several smaller logical functions which are easier to read and reason about. It also makes the gocyclo linter happy when making new changes to this code. Co-Authored-By: Emilien Macchi --- controllers/openstackcluster_controller.go | 299 ++++++++++-------- .../openstackcluster_controller_test.go | 48 ++- 2 files changed, 205 insertions(+), 142 deletions(-) diff --git a/controllers/openstackcluster_controller.go b/controllers/openstackcluster_controller.go index 3abcad6cdc..6fe81d5220 100644 --- a/controllers/openstackcluster_controller.go +++ b/controllers/openstackcluster_controller.go @@ -602,81 +602,123 @@ func reconcileNetworkComponents(scope *scope.WithLogger, cluster *clusterv1.Clus } if len(openStackCluster.Spec.ManagedSubnets) == 0 { - scope.Logger().V(4).Info("No need to reconcile network, searching network and subnet instead") - - if openStackCluster.Status.Network == nil { - openStackCluster.Status.Network = &infrav1.NetworkStatusWithSubnets{} - } - - err := getCAPONetwork(openStackCluster, networkingService) - if err != nil { + if err := reconcilePreExistingNetworkComponents(scope, networkingService, openStackCluster); err != nil { return err } - - filteredSubnets, err := filterSubnets(networkingService, openStackCluster) - if err != nil { + } else if len(openStackCluster.Spec.ManagedSubnets) == 1 { + if err := reconcileProvisionedNetworkComponents(networkingService, openStackCluster, clusterName); err != nil { return err } + } else { + return fmt.Errorf("failed to reconcile network: ManagedSubnets only supports one element, %d provided", len(openStackCluster.Spec.ManagedSubnets)) + } - var subnets []infrav1.Subnet - for subnet := range filteredSubnets { - filterSubnet := &filteredSubnets[subnet] - subnets = append(subnets, infrav1.Subnet{ - ID: filterSubnet.ID, - Name: filterSubnet.Name, - CIDR: filterSubnet.CIDR, - Tags: filterSubnet.Tags, - }) - } + err = networkingService.ReconcileSecurityGroups(openStackCluster, clusterName) + if err != nil { + handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to reconcile security groups: %w", err)) + return fmt.Errorf("failed to reconcile security groups: %w", err) + } - if err := utils.ValidateSubnets(subnets); err != nil { - return err - } - openStackCluster.Status.Network.Subnets = subnets + return reconcileControlPlaneEndpoint(scope, networkingService, openStackCluster, clusterName) +} - // If network is not yet populated on the Status, use networkID defined in the filtered subnets to get the Network. - err = populateCAPONetworkFromSubnet(networkingService, filteredSubnets, openStackCluster) +// reconcilePreExistingNetworkComponents reconciles the cluster network status when the cluster is +// using pre-existing networks and subnets which are not provisioned by the +// cluster controller. +func reconcilePreExistingNetworkComponents(scope *scope.WithLogger, networkingService *networking.Service, openStackCluster *infrav1.OpenStackCluster) error { + scope.Logger().V(4).Info("No need to reconcile network, searching network and subnet instead") + + if openStackCluster.Status.Network == nil { + openStackCluster.Status.Network = &infrav1.NetworkStatusWithSubnets{} + } + + emptyNetwork := infrav1.NetworkFilter{} + if openStackCluster.Spec.Network != emptyNetwork { + netOpts := openStackCluster.Spec.Network.ToListOpt() + networkList, err := networkingService.GetNetworksByFilter(&netOpts) if err != nil { - return err + handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to find network: %w", err)) + return fmt.Errorf("error fetching networks: %w", err) } - } else if len(openStackCluster.Spec.ManagedSubnets) == 1 { - err := networkingService.ReconcileNetwork(openStackCluster, clusterName) - if err != nil { - handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to reconcile network: %w", err)) - return fmt.Errorf("failed to reconcile network: %w", err) + if len(networkList) == 0 { + handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to find any network")) + return fmt.Errorf("failed to find any network") } - err = networkingService.ReconcileSubnet(openStackCluster, clusterName) - if err != nil { - handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to reconcile subnets: %w", err)) - return fmt.Errorf("failed to reconcile subnets: %w", err) + if len(networkList) == 1 { + setClusterNetwork(openStackCluster, &networkList[0]) + } + } + + subnets, err := getClusterSubnets(networkingService, openStackCluster) + if err != nil { + return err + } + + // Populate the cluster status with the cluster subnets + capoSubnets := make([]infrav1.Subnet, len(subnets)) + for i := range subnets { + subnet := &subnets[i] + capoSubnets[i] = infrav1.Subnet{ + ID: subnet.ID, + Name: subnet.Name, + CIDR: subnet.CIDR, + Tags: subnet.Tags, } - err = networkingService.ReconcileRouter(openStackCluster, clusterName) + } + if err := utils.ValidateSubnets(capoSubnets); err != nil { + return err + } + openStackCluster.Status.Network.Subnets = capoSubnets + + // If network is not yet populated, use networkID defined on the first + // cluster subnet to get the Network. Cluster subnets are constrained to + // be in the same network. + if openStackCluster.Status.Network.ID == "" && len(subnets) > 0 { + network, err := networkingService.GetNetworkByID(subnets[0].NetworkID) if err != nil { - handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to reconcile router: %w", err)) - return fmt.Errorf("failed to reconcile router: %w", err) + return err } - } else { - return fmt.Errorf("failed to reconcile network: ManagedSubnets only supports one element, %d provided", len(openStackCluster.Spec.ManagedSubnets)) + setClusterNetwork(openStackCluster, network) } - err = networkingService.ReconcileSecurityGroups(openStackCluster, clusterName) + return nil +} + +func reconcileProvisionedNetworkComponents(networkingService *networking.Service, openStackCluster *infrav1.OpenStackCluster, clusterName string) error { + err := networkingService.ReconcileNetwork(openStackCluster, clusterName) if err != nil { - handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to reconcile security groups: %w", err)) - return fmt.Errorf("failed to reconcile security groups: %w", err) + handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to reconcile network: %w", err)) + return fmt.Errorf("failed to reconcile network: %w", err) + } + err = networkingService.ReconcileSubnet(openStackCluster, clusterName) + if err != nil { + handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to reconcile subnets: %w", err)) + return fmt.Errorf("failed to reconcile subnets: %w", err) + } + err = networkingService.ReconcileRouter(openStackCluster, clusterName) + if err != nil { + handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to reconcile router: %w", err)) + return fmt.Errorf("failed to reconcile router: %w", err) } + return nil +} + +// reconcileControlPlaneEndpoint configures the control plane endpoint for the +// cluster, creating it if necessary, and updates ControlPlaneEndpoint in the +// cluster spec. +func reconcileControlPlaneEndpoint(scope *scope.WithLogger, networkingService *networking.Service, openStackCluster *infrav1.OpenStackCluster, clusterName string) error { // Calculate the port that we will use for the API server - var apiServerPort int - switch { - case openStackCluster.Spec.ControlPlaneEndpoint.IsValid(): - apiServerPort = int(openStackCluster.Spec.ControlPlaneEndpoint.Port) - case openStackCluster.Spec.APIServerPort != 0: - apiServerPort = openStackCluster.Spec.APIServerPort - default: - apiServerPort = 6443 - } + apiServerPort := getAPIServerPort(openStackCluster) - if openStackCluster.Spec.APIServerLoadBalancer.Enabled { + // host must be set by a matching control plane endpoint provider below + var host string + + switch { + // API server load balancer is enabled. Create an Octavia load balancer. + // Note that we reconcile the load balancer even if the control plane + // endpoint is already set. + case openStackCluster.Spec.APIServerLoadBalancer.Enabled: loadBalancerService, err := loadbalancer.NewService(scope) if err != nil { return err @@ -690,49 +732,63 @@ func reconcileNetworkComponents(scope *scope.WithLogger, cluster *clusterv1.Clus } return fmt.Errorf("failed to reconcile load balancer: %w", err) } - } - if !openStackCluster.Spec.ControlPlaneEndpoint.IsValid() { - var host string - // If there is a load balancer use the floating IP for it if set, falling back to the internal IP - switch { - case openStackCluster.Spec.APIServerLoadBalancer.Enabled: - if openStackCluster.Status.APIServerLoadBalancer.IP != "" { - host = openStackCluster.Status.APIServerLoadBalancer.IP - } else { - host = openStackCluster.Status.APIServerLoadBalancer.InternalIP - } - case !openStackCluster.Spec.DisableAPIServerFloatingIP: - // If floating IPs are not disabled, get one to use as the VIP for the control plane - fp, err := networkingService.GetOrCreateFloatingIP(openStackCluster, openStackCluster, clusterName, openStackCluster.Spec.APIServerFloatingIP) - if err != nil { - handleUpdateOSCError(openStackCluster, fmt.Errorf("floating IP cannot be got or created: %w", err)) - return fmt.Errorf("floating IP cannot be got or created: %w", err) - } - host = fp.FloatingIP - case openStackCluster.Spec.APIServerFixedIP != "": - // If a fixed IP was specified, assume that the user is providing the extra configuration - // to use that IP as the VIP for the API server, e.g. using keepalived or kube-vip - host = openStackCluster.Spec.APIServerFixedIP - default: - // For now, we do not provide a managed VIP without either a load balancer or a floating IP - // In the future, we could manage a VIP port on the cluster network and set allowedAddressPairs - // accordingly when creating control plane machines - // However this would require us to deploy software on the control plane hosts to manage the - // VIP (e.g. keepalived/kube-vip) - return fmt.Errorf("unable to determine VIP for API server") + // Control plane endpoint is the floating IP if one was defined, otherwise the VIP address + if openStackCluster.Status.APIServerLoadBalancer.IP != "" { + host = openStackCluster.Status.APIServerLoadBalancer.IP + } else { + host = openStackCluster.Status.APIServerLoadBalancer.InternalIP } - // Set APIEndpoints so the Cluster API Cluster Controller can pull them - openStackCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ - Host: host, - Port: int32(apiServerPort), + // Control plane endpoint is already set + // Note that checking this here means that we don't re-execute any of + // the branches below if the control plane endpoint is already set. + case openStackCluster.Spec.ControlPlaneEndpoint.IsValid(): + host = openStackCluster.Spec.ControlPlaneEndpoint.Host + + // API server load balancer is disabled, but floating IP is not. Create + // a floating IP to be attached directly to a control plane host. + case !openStackCluster.Spec.DisableAPIServerFloatingIP: + fp, err := networkingService.GetOrCreateFloatingIP(openStackCluster, openStackCluster, clusterName, openStackCluster.Spec.APIServerFloatingIP) + if err != nil { + handleUpdateOSCError(openStackCluster, fmt.Errorf("floating IP cannot be got or created: %w", err)) + return fmt.Errorf("floating IP cannot be got or created: %w", err) } + host = fp.FloatingIP + + // API server load balancer is disabled and we aren't using a control + // plane floating IP. In this case we configure APIServerFixedIP as the + // control plane endpoint and leave it to the user to configure load + // balancing. + case openStackCluster.Spec.APIServerFixedIP != "": + host = openStackCluster.Spec.APIServerFixedIP + + // Control plane endpoint is not set, and none can be created + default: + err := fmt.Errorf("unable to determine control plane endpoint") + handleUpdateOSCError(openStackCluster, err) + return err + } + + openStackCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ + Host: host, + Port: int32(apiServerPort), } return nil } +// getAPIServerPort returns the port to use for the API server based on the cluster spec. +func getAPIServerPort(openStackCluster *infrav1.OpenStackCluster) int { + switch { + case openStackCluster.Spec.ControlPlaneEndpoint.IsValid(): + return int(openStackCluster.Spec.ControlPlaneEndpoint.Port) + case openStackCluster.Spec.APIServerPort != 0: + return openStackCluster.Spec.APIServerPort + } + return 6443 +} + func (r *OpenStackClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { clusterToInfraFn := util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("OpenStackCluster"), mgr.GetClient(), &infrav1.OpenStackCluster{}) log := ctrl.LoggerFrom(ctx) @@ -788,9 +844,9 @@ func handleUpdateOSCError(openstackCluster *infrav1.OpenStackCluster, message er openstackCluster.Status.FailureMessage = pointer.String(message.Error()) } -// filterSubnets retrieves the subnets based on the Subnet filters specified on OpenstackCluster. -func filterSubnets(networkingService *networking.Service, openStackCluster *infrav1.OpenStackCluster) ([]subnets.Subnet, error) { - var filteredSubnets []subnets.Subnet +// getClusterSubnets retrieves the subnets based on the Subnet filters specified on OpenstackCluster. +func getClusterSubnets(networkingService *networking.Service, openStackCluster *infrav1.OpenStackCluster) ([]subnets.Subnet, error) { + var clusterSubnets []subnets.Subnet var err error openStackClusterSubnets := openStackCluster.Spec.Subnets networkID := "" @@ -800,12 +856,14 @@ func filterSubnets(networkingService *networking.Service, openStackCluster *infr if len(openStackClusterSubnets) == 0 { if networkID == "" { - return nil, nil + // This should be a validation error + return nil, fmt.Errorf("no network or subnets specified in OpenStackCluster spec") } + empty := &infrav1.SubnetFilter{} listOpt := empty.ToListOpt() listOpt.NetworkID = networkID - filteredSubnets, err = networkingService.GetSubnetsByFilter(listOpt) + clusterSubnets, err = networkingService.GetSubnetsByFilter(listOpt) if err != nil { err = fmt.Errorf("failed to find subnets: %w", err) if errors.Is(err, networking.ErrFilterMatch) { @@ -813,7 +871,7 @@ func filterSubnets(networkingService *networking.Service, openStackCluster *infr } return nil, err } - if len(filteredSubnets) > 2 { + if len(clusterSubnets) > 2 { return nil, fmt.Errorf("more than two subnets found in the Network. Specify the subnets in the OpenStackCluster.Spec instead") } } else { @@ -826,55 +884,18 @@ func filterSubnets(networkingService *networking.Service, openStackCluster *infr } return nil, err } - filteredSubnets = append(filteredSubnets, *filteredSubnet) + clusterSubnets = append(clusterSubnets, *filteredSubnet) + + // Constrain the next search to the network of the first subnet + networkID = filteredSubnet.NetworkID } } - return filteredSubnets, nil + return clusterSubnets, nil } -// convertOpenStackNetworkToCAPONetwork converts an OpenStack network to a capo network. -// It returns the converted subnet. -func convertOpenStackNetworkToCAPONetwork(openStackCluster *infrav1.OpenStackCluster, network *networks.Network) { +// setClusterNetwork sets network information in the cluster status from an OpenStack network. +func setClusterNetwork(openStackCluster *infrav1.OpenStackCluster, network *networks.Network) { openStackCluster.Status.Network.ID = network.ID openStackCluster.Status.Network.Name = network.Name openStackCluster.Status.Network.Tags = network.Tags } - -// populateCAPONetworkFromSubnet gets a network based on the networkID of the subnets and converts it to the CAPO format. -// It returns an error in case it failed to retrieve the network. -func populateCAPONetworkFromSubnet(networkingService *networking.Service, subnets []subnets.Subnet, openStackCluster *infrav1.OpenStackCluster) error { - if openStackCluster.Status.Network.ID == "" && len(subnets) > 0 { - if len(subnets) > 1 && subnets[0].NetworkID != subnets[1].NetworkID { - return fmt.Errorf("unable to identify the network to use. NetworkID %s from subnet %s does not match NetworkID %s from subnet %s", subnets[0].NetworkID, subnets[0].ID, subnets[1].NetworkID, subnets[1].ID) - } - - network, err := networkingService.GetNetworkByID(subnets[0].NetworkID) - if err != nil { - return err - } - convertOpenStackNetworkToCAPONetwork(openStackCluster, network) - } - return nil -} - -// getCAPONetwork gets a network based on a filter, if defined, and convert the network to the CAPO format. -// It returns an error in case it failed to retrieve the network. -func getCAPONetwork(openStackCluster *infrav1.OpenStackCluster, networkingService *networking.Service) error { - emptyNetwork := infrav1.NetworkFilter{} - if openStackCluster.Spec.Network != emptyNetwork { - netOpts := openStackCluster.Spec.Network.ToListOpt() - networkList, err := networkingService.GetNetworksByFilter(&netOpts) - if err != nil { - handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to find network: %w", err)) - return fmt.Errorf("failed to find network: %w", err) - } - if len(networkList) == 0 { - handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to find any network")) - return fmt.Errorf("failed to find any network") - } - if len(networkList) == 1 { - convertOpenStackNetworkToCAPONetwork(openStackCluster, &networkList[0]) - } - } - return nil -} diff --git a/controllers/openstackcluster_controller_test.go b/controllers/openstackcluster_controller_test.go index 4a19d65f7a..971d0308d2 100644 --- a/controllers/openstackcluster_controller_test.go +++ b/controllers/openstackcluster_controller_test.go @@ -725,7 +725,7 @@ func createRequestFromOSCluster(openStackCluster *infrav1.OpenStackCluster) reco } } -func Test_ConvertOpenStackNetworkToCAPONetwork(t *testing.T) { +func Test_setClusterNetwork(t *testing.T) { openStackCluster := &infrav1.OpenStackCluster{} openStackCluster.Status.Network = &infrav1.NetworkStatusWithSubnets{} @@ -735,7 +735,7 @@ func Test_ConvertOpenStackNetworkToCAPONetwork(t *testing.T) { Tags: []string{"tag1", "tag2"}, } - convertOpenStackNetworkToCAPONetwork(openStackCluster, filterednetwork) + setClusterNetwork(openStackCluster, filterednetwork) expected := infrav1.NetworkStatus{ ID: "network1", Name: "network1", @@ -743,7 +743,49 @@ func Test_ConvertOpenStackNetworkToCAPONetwork(t *testing.T) { } if !reflect.DeepEqual(openStackCluster.Status.Network.NetworkStatus, expected) { - t.Errorf("ConvertOpenStackNetworkToCAPONetwork() = %v, want %v", openStackCluster.Status.Network.NetworkStatus, expected) + t.Errorf("setClusterNetwork() = %v, want %v", openStackCluster.Status.Network.NetworkStatus, expected) + } +} + +func Test_getAPIServerPort(t *testing.T) { + tests := []struct { + name string + openStackCluster *infrav1.OpenStackCluster + want int + }{ + { + name: "default", + openStackCluster: &infrav1.OpenStackCluster{}, + want: 6443, + }, + { + name: "with a control plane endpoint", + openStackCluster: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + ControlPlaneEndpoint: clusterv1.APIEndpoint{ + Host: "192.168.0.1", + Port: 6444, + }, + }, + }, + want: 6444, + }, + { + name: "with API server port", + openStackCluster: &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + APIServerPort: 6445, + }, + }, + want: 6445, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getAPIServerPort(tt.openStackCluster); got != tt.want { + t.Errorf("getAPIServerPort() = %v, want %v", got, tt.want) + } + }) } }