diff --git a/api/v1alpha5/conversion.go b/api/v1alpha5/conversion.go index 3f41962b92..c27b08ae42 100644 --- a/api/v1alpha5/conversion.go +++ b/api/v1alpha5/conversion.go @@ -437,3 +437,14 @@ func Convert_v1alpha5_OpenStackClusterStatus_To_v1alpha7_OpenStackClusterStatus( func Convert_v1alpha7_OpenStackMachineSpec_To_v1alpha5_OpenStackMachineSpec(in *infrav1.OpenStackMachineSpec, out *OpenStackMachineSpec, s conversion.Scope) error { return autoConvert_v1alpha7_OpenStackMachineSpec_To_v1alpha5_OpenStackMachineSpec(in, out, s) } + +func Convert_v1alpha7_OpenStackMachineStatus_To_v1alpha5_OpenStackMachineStatus(in *infrav1.OpenStackMachineStatus, out *OpenStackMachineStatus, s conversion.Scope) error { + err := autoConvert_v1alpha7_OpenStackMachineStatus_To_v1alpha5_OpenStackMachineStatus(in, out, s) + if err != nil { + return err + } + + in.PortsStatus = nil + + return nil +} diff --git a/api/v1alpha5/zz_generated.conversion.go b/api/v1alpha5/zz_generated.conversion.go index 52681a9c65..16c98fdfba 100644 --- a/api/v1alpha5/zz_generated.conversion.go +++ b/api/v1alpha5/zz_generated.conversion.go @@ -199,11 +199,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha7.OpenStackMachineStatus)(nil), (*OpenStackMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha7_OpenStackMachineStatus_To_v1alpha5_OpenStackMachineStatus(a.(*v1alpha7.OpenStackMachineStatus), b.(*OpenStackMachineStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*OpenStackMachineTemplate)(nil), (*v1alpha7.OpenStackMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha5_OpenStackMachineTemplate_To_v1alpha7_OpenStackMachineTemplate(a.(*OpenStackMachineTemplate), b.(*v1alpha7.OpenStackMachineTemplate), scope) }); err != nil { @@ -394,6 +389,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1alpha7.OpenStackMachineStatus)(nil), (*OpenStackMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha7_OpenStackMachineStatus_To_v1alpha5_OpenStackMachineStatus(a.(*v1alpha7.OpenStackMachineStatus), b.(*OpenStackMachineStatus), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1alpha7.PortOpts)(nil), (*PortOpts)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha7_PortOpts_To_v1alpha5_PortOpts(a.(*v1alpha7.PortOpts), b.(*PortOpts), scope) }); err != nil { @@ -1161,16 +1161,12 @@ func autoConvert_v1alpha7_OpenStackMachineStatus_To_v1alpha5_OpenStackMachineSta out.Addresses = *(*[]v1.NodeAddress)(unsafe.Pointer(&in.Addresses)) out.InstanceState = (*InstanceState)(unsafe.Pointer(in.InstanceState)) out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason)) + // WARNING: in.PortsStatus requires manual conversion: does not exist in peer-type out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) out.Conditions = *(*v1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } -// Convert_v1alpha7_OpenStackMachineStatus_To_v1alpha5_OpenStackMachineStatus is an autogenerated conversion function. -func Convert_v1alpha7_OpenStackMachineStatus_To_v1alpha5_OpenStackMachineStatus(in *v1alpha7.OpenStackMachineStatus, out *OpenStackMachineStatus, s conversion.Scope) error { - return autoConvert_v1alpha7_OpenStackMachineStatus_To_v1alpha5_OpenStackMachineStatus(in, out, s) -} - func autoConvert_v1alpha5_OpenStackMachineTemplate_To_v1alpha7_OpenStackMachineTemplate(in *OpenStackMachineTemplate, out *v1alpha7.OpenStackMachineTemplate, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1alpha5_OpenStackMachineTemplateSpec_To_v1alpha7_OpenStackMachineTemplateSpec(&in.Spec, &out.Spec, s); err != nil { diff --git a/api/v1alpha6/conversion.go b/api/v1alpha6/conversion.go index ae8e343ea8..64a90d1a2c 100644 --- a/api/v1alpha6/conversion.go +++ b/api/v1alpha6/conversion.go @@ -659,3 +659,14 @@ func Convert_v1alpha6_OpenStackClusterStatus_To_v1alpha7_OpenStackClusterStatus( func Convert_v1alpha7_OpenStackMachineSpec_To_v1alpha6_OpenStackMachineSpec(in *infrav1.OpenStackMachineSpec, out *OpenStackMachineSpec, s apiconversion.Scope) error { return autoConvert_v1alpha7_OpenStackMachineSpec_To_v1alpha6_OpenStackMachineSpec(in, out, s) } + +func Convert_v1alpha7_OpenStackMachineStatus_To_v1alpha6_OpenStackMachineStatus(in *infrav1.OpenStackMachineStatus, out *OpenStackMachineStatus, s apiconversion.Scope) error { + err := autoConvert_v1alpha7_OpenStackMachineStatus_To_v1alpha6_OpenStackMachineStatus(in, out, s) + if err != nil { + return err + } + + in.PortsStatus = nil + + return nil +} diff --git a/api/v1alpha6/zz_generated.conversion.go b/api/v1alpha6/zz_generated.conversion.go index 6bfa0129c3..658912eb3c 100644 --- a/api/v1alpha6/zz_generated.conversion.go +++ b/api/v1alpha6/zz_generated.conversion.go @@ -209,11 +209,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1alpha7.OpenStackMachineStatus)(nil), (*OpenStackMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha7_OpenStackMachineStatus_To_v1alpha6_OpenStackMachineStatus(a.(*v1alpha7.OpenStackMachineStatus), b.(*OpenStackMachineStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*OpenStackMachineTemplate)(nil), (*v1alpha7.OpenStackMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha6_OpenStackMachineTemplate_To_v1alpha7_OpenStackMachineTemplate(a.(*OpenStackMachineTemplate), b.(*v1alpha7.OpenStackMachineTemplate), scope) }); err != nil { @@ -404,6 +399,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1alpha7.OpenStackMachineStatus)(nil), (*OpenStackMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha7_OpenStackMachineStatus_To_v1alpha6_OpenStackMachineStatus(a.(*v1alpha7.OpenStackMachineStatus), b.(*OpenStackMachineStatus), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1alpha7.PortOpts)(nil), (*PortOpts)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha7_PortOpts_To_v1alpha6_PortOpts(a.(*v1alpha7.PortOpts), b.(*PortOpts), scope) }); err != nil { @@ -1184,16 +1184,12 @@ func autoConvert_v1alpha7_OpenStackMachineStatus_To_v1alpha6_OpenStackMachineSta out.Addresses = *(*[]v1.NodeAddress)(unsafe.Pointer(&in.Addresses)) out.InstanceState = (*InstanceState)(unsafe.Pointer(in.InstanceState)) out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason)) + // WARNING: in.PortsStatus requires manual conversion: does not exist in peer-type out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) out.Conditions = *(*v1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } -// Convert_v1alpha7_OpenStackMachineStatus_To_v1alpha6_OpenStackMachineStatus is an autogenerated conversion function. -func Convert_v1alpha7_OpenStackMachineStatus_To_v1alpha6_OpenStackMachineStatus(in *v1alpha7.OpenStackMachineStatus, out *OpenStackMachineStatus, s conversion.Scope) error { - return autoConvert_v1alpha7_OpenStackMachineStatus_To_v1alpha6_OpenStackMachineStatus(in, out, s) -} - func autoConvert_v1alpha6_OpenStackMachineTemplate_To_v1alpha7_OpenStackMachineTemplate(in *OpenStackMachineTemplate, out *v1alpha7.OpenStackMachineTemplate, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1alpha6_OpenStackMachineTemplateSpec_To_v1alpha7_OpenStackMachineTemplateSpec(&in.Spec, &out.Spec, s); err != nil { diff --git a/api/v1alpha7/conditions_consts.go b/api/v1alpha7/conditions_consts.go index b54eade0b0..922327ca19 100644 --- a/api/v1alpha7/conditions_consts.go +++ b/api/v1alpha7/conditions_consts.go @@ -22,6 +22,10 @@ const ( // InstanceReadyCondition reports on current status of the OpenStack instance. Ready indicates the instance is in a Running state. InstanceReadyCondition clusterv1.ConditionType = "InstanceReady" + // PortsReadyCondition reports on the current status of the ports. Ready indicates that all ports have been created + // successfully and ready to be attached to the instance. + PortsReadyCondition clusterv1.ConditionType = "PortsReady" + // WaitingForClusterInfrastructureReason used when machine is waiting for cluster infrastructure to be ready before proceeding. WaitingForClusterInfrastructureReason = "WaitingForClusterInfrastructure" // WaitingForBootstrapDataReason used when machine is waiting for bootstrap data to be ready before proceeding. @@ -42,6 +46,10 @@ const ( InstanceDeleteFailedReason = "InstanceDeleteFailed" // OpenstackErrorReason used when there is an error communicating with OpenStack. OpenStackErrorReason = "OpenStackError" + // PortsCreateFailedReason used when creating the ports failed. + PortsCreateFailedReason = "PortsCreateFailed" + // PortsDeleteFailedReason used when deleting the ports failed. + PortsDeleteFailedReason = "PortsDeleteFailed" ) const ( diff --git a/api/v1alpha7/openstackmachine_types.go b/api/v1alpha7/openstackmachine_types.go index f5f4f698aa..c22ca58845 100644 --- a/api/v1alpha7/openstackmachine_types.go +++ b/api/v1alpha7/openstackmachine_types.go @@ -113,6 +113,10 @@ type OpenStackMachineStatus struct { FailureReason *errors.MachineStatusError `json:"failureReason,omitempty"` + // PortsStatus is the status of the ports associated with the machine. + // +optional + PortsStatus []PortStatus `json:"portsStatus,omitempty"` + // FailureMessage will be set in the event that there is a terminal problem // reconciling the Machine and will contain a more verbose string suitable // for logging and human consumption. diff --git a/api/v1alpha7/types.go b/api/v1alpha7/types.go index 3ef5305ff2..b3138f8580 100644 --- a/api/v1alpha7/types.go +++ b/api/v1alpha7/types.go @@ -128,6 +128,12 @@ type PortOpts struct { ValueSpecs []ValueSpec `json:"valueSpecs,omitempty"` } +type PortStatus struct { + // ID is the unique identifier of the port. + // +optional + ID string `json:"id,omitempty"` +} + type BindingProfile struct { // OVSHWOffload enables or disables the OVS hardware offload feature. OVSHWOffload bool `json:"ovsHWOffload,omitempty"` @@ -149,12 +155,13 @@ type AddressPair struct { } type BastionStatus struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - SSHKeyName string `json:"sshKeyName,omitempty"` - State InstanceState `json:"state,omitempty"` - IP string `json:"ip,omitempty"` - FloatingIP string `json:"floatingIP,omitempty"` + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + SSHKeyName string `json:"sshKeyName,omitempty"` + State InstanceState `json:"state,omitempty"` + PortsStatus PortStatus `json:"portsStatus,omitempty"` + IP string `json:"ip,omitempty"` + FloatingIP string `json:"floatingIP,omitempty"` } type RootVolume struct { diff --git a/api/v1alpha7/zz_generated.deepcopy.go b/api/v1alpha7/zz_generated.deepcopy.go index 1d81a83410..f1b5ead9b3 100644 --- a/api/v1alpha7/zz_generated.deepcopy.go +++ b/api/v1alpha7/zz_generated.deepcopy.go @@ -102,6 +102,7 @@ func (in *Bastion) DeepCopy() *Bastion { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BastionStatus) DeepCopyInto(out *BastionStatus) { *out = *in + out.PortsStatus = in.PortsStatus } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BastionStatus. @@ -721,6 +722,11 @@ func (in *OpenStackMachineStatus) DeepCopyInto(out *OpenStackMachineStatus) { *out = new(errors.MachineStatusError) **out = **in } + if in.PortsStatus != nil { + in, out := &in.PortsStatus, &out.PortsStatus + *out = make([]PortStatus, len(*in)) + copy(*out, *in) + } if in.FailureMessage != nil { in, out := &in.FailureMessage, &out.FailureMessage *out = new(string) @@ -903,6 +909,21 @@ func (in *PortOpts) DeepCopy() *PortOpts { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortStatus) DeepCopyInto(out *PortStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortStatus. +func (in *PortStatus) DeepCopy() *PortStatus { + if in == nil { + return nil + } + out := new(PortStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RootVolume) DeepCopyInto(out *RootVolume) { *out = *in diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml index a5114ac6a2..95635adaae 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml @@ -4404,6 +4404,12 @@ spec: type: string name: type: string + portsStatus: + properties: + id: + description: ID is the unique identifier of the port. + type: string + type: object sshKeyName: type: string state: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml index e5171cd868..63fd0ec50f 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml @@ -1603,6 +1603,16 @@ spec: description: InstanceState is the state of the OpenStack instance for this machine. type: string + portsStatus: + description: PortsStatus is the status of the ports associated with + the machine. + items: + properties: + id: + description: ID is the unique identifier of the port. + type: string + type: object + type: array ready: description: Ready is true when the provider resource is ready. type: boolean diff --git a/controllers/openstackcluster_controller.go b/controllers/openstackcluster_controller.go index 08177fc9ed..0ef45d57f1 100644 --- a/controllers/openstackcluster_controller.go +++ b/controllers/openstackcluster_controller.go @@ -179,7 +179,7 @@ func (r *OpenStackClusterReconciler) reconcileDelete(ctx context.Context, scope return ctrl.Result{}, fmt.Errorf("failed to delete router: %w", err) } - if err = networkingService.DeletePorts(openStackCluster); err != nil { + if err = networkingService.DeleteClusterPorts(openStackCluster); err != nil { handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to delete ports: %w", err)) return reconcile.Result{}, fmt.Errorf("failed to delete ports: %w", err) } @@ -239,7 +239,7 @@ func deleteBastion(scope scope.Scope, cluster *clusterv1.Cluster, openStackClust instanceSpec := bastionToInstanceSpec(openStackCluster, cluster.Name) - if err = computeService.DeleteInstance(openStackCluster, openStackCluster, instanceStatus, instanceSpec); err != nil { + if err = computeService.DeleteInstance(openStackCluster, instanceStatus, instanceSpec); err != nil { handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to delete bastion: %w", err)) return fmt.Errorf("failed to delete bastion: %w", err) } @@ -320,6 +320,8 @@ func reconcileBastion(scope scope.Scope, cluster *clusterv1.Cluster, openStackCl return err } + // TODO(emilien) Create ports for bastion host + instanceSpec := bastionToInstanceSpec(openStackCluster, cluster.Name) bastionHash, err := compute.HashInstanceSpec(instanceSpec) if err != nil { @@ -349,7 +351,7 @@ func reconcileBastion(scope scope.Scope, cluster *clusterv1.Cluster, openStackCl } } - instanceStatus, err = computeService.CreateInstance(openStackCluster, openStackCluster, instanceSpec, cluster.Name) + instanceStatus, err = computeService.CreateInstance(openStackCluster, openStackCluster, instanceSpec, cluster.Name, []string{}) if err != nil { return fmt.Errorf("failed to reconcile bastion: %w", err) } @@ -398,18 +400,30 @@ func bastionToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, clusterNa RootVolume: openStackCluster.Spec.Bastion.Instance.RootVolume, } - instanceSpec.SecurityGroups = openStackCluster.Spec.Bastion.Instance.SecurityGroups + instanceSpec.SecurityGroups = getBastionSecurityGroups(openStackCluster) + + instanceSpec.Ports = openStackCluster.Spec.Bastion.Instance.Ports + + return instanceSpec +} + +// getBastionSecurityGroups returns a combination of openStackCluster.Spec.Bastion.Instance.SecurityGroups +// and the security group managed by the OpenStackCluster. +func getBastionSecurityGroups(openStackCluster *infrav1.OpenStackCluster) []infrav1.SecurityGroupFilter { + instanceSpecSecurityGroups := openStackCluster.Spec.Bastion.Instance.SecurityGroups + var managedSecurityGroup string if openStackCluster.Spec.ManagedSecurityGroups { if openStackCluster.Status.BastionSecurityGroup != nil { - instanceSpec.SecurityGroups = append(instanceSpec.SecurityGroups, infrav1.SecurityGroupFilter{ - ID: openStackCluster.Status.BastionSecurityGroup.ID, - }) + managedSecurityGroup = openStackCluster.Status.BastionSecurityGroup.ID } } - instanceSpec.Ports = openStackCluster.Spec.Bastion.Instance.Ports - - return instanceSpec + if managedSecurityGroup != "" { + instanceSpecSecurityGroups = append(instanceSpecSecurityGroups, infrav1.SecurityGroupFilter{ + ID: managedSecurityGroup, + }) + } + return instanceSpecSecurityGroups } // bastionHashHasChanged returns a boolean whether if the latest bastion hash, built from the instance spec, has changed or not. diff --git a/controllers/openstackmachine_controller.go b/controllers/openstackmachine_controller.go index b0345c078c..d89d4ce554 100644 --- a/controllers/openstackmachine_controller.go +++ b/controllers/openstackmachine_controller.go @@ -246,6 +246,14 @@ func (r *OpenStackMachineReconciler) reconcileDelete(scope scope.Scope, cluster } } + portsStatus := openStackMachine.Status.PortsStatus + for _, portStatus := range portsStatus { + if err := networkingService.DeleteMachinePort(openStackMachine, portStatus); err != nil { + conditions.MarkFalse(openStackMachine, infrav1.PortsReadyCondition, infrav1.PortsDeleteFailedReason, clusterv1.ConditionSeverityError, "Deleting port failed: %v", err) + return ctrl.Result{}, fmt.Errorf("delete port %q: %w", portStatus.ID, err) + } + } + instanceStatus, err := computeService.GetInstanceStatusByName(openStackMachine, openStackMachine.Name) if err != nil { return ctrl.Result{}, err @@ -275,7 +283,7 @@ func (r *OpenStackMachineReconciler) reconcileDelete(scope scope.Scope, cluster instanceSpec := machineToInstanceSpec(openStackCluster, machine, openStackMachine, "") - if err := computeService.DeleteInstance(openStackCluster, openStackMachine, instanceStatus, instanceSpec); err != nil { + if err := computeService.DeleteInstance(openStackMachine, instanceStatus, instanceSpec); err != nil { conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceDeleteFailedReason, clusterv1.ConditionSeverityError, "Deleting instance failed: %v", err) return ctrl.Result{}, fmt.Errorf("delete instance: %w", err) } @@ -328,9 +336,24 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope return ctrl.Result{}, err } - instanceStatus, err := r.getOrCreate(scope.Logger(), cluster, openStackCluster, machine, openStackMachine, computeService, userData) + managedSecurityGroups := getManagedSecurityGroups(openStackCluster, machine, openStackMachine) + instanceTags := getInstanceTags(openStackMachine, openStackCluster) + portsStatus, err := r.getOrCreatePorts(scope.Logger(), clusterName, openStackCluster, openStackMachine.Spec.Ports, openStackMachine.Spec.Trunk, managedSecurityGroups, instanceTags, openStackMachine.Name, openStackMachine, networkingService) if err != nil { - // Conditions set in getOrCreate + // Conditions set in getOrCreatePorts + return ctrl.Result{}, err + } + // TODO(emilien) do we want to deal with Ports Status like we do for the nova instance? Might be expensive... + // In the meantime, we consider that ports are ready if they are created. + conditions.MarkTrue(openStackMachine, infrav1.PortsReadyCondition) + portIDs := make([]string, len(*portsStatus)) + for i, portStatus := range *portsStatus { + portIDs[i] = portStatus.ID + } + + instanceStatus, err := r.getOrCreateInstance(scope.Logger(), cluster, openStackCluster, machine, openStackMachine, computeService, userData, portIDs) + if err != nil { + // Conditions set in getOrCreateInstance return ctrl.Result{}, err } @@ -430,7 +453,32 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope return ctrl.Result{}, nil } -func (r *OpenStackMachineReconciler) getOrCreate(logger logr.Logger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, computeService *compute.Service, userData string) (*compute.InstanceStatus, error) { +func (r *OpenStackMachineReconciler) getOrCreatePorts(logger logr.Logger, clusterName string, openStackCluster *infrav1.OpenStackCluster, machinePorts []infrav1.PortOpts, trunkEnabled bool, securityGroups []infrav1.SecurityGroupFilter, instanceTags []string, instanceName string, openStackMachine *infrav1.OpenStackMachine, networkingService *networking.Service) (*[]infrav1.PortStatus, error) { + // TODO(emilien) implement portsStatus where we check if the machine has ports created and part of the OpenStackMachine.Status + // Check `GetInstanceStatusByName` as it's done for instances. For each port found in Status, we might want get the port name and verify there is no duplicate. + // If Status is empty, we create the ports and add them to the Status. + // If Status is not empty, we try to adopt the existing ports by checking if the ports are still there and if not, we create them and add them to the Status. + + // For now, we create the ports. + var portsStatus *[]infrav1.PortStatus + var err error + + if openStackMachine.Status.PortsStatus == nil { + logger.Info("Creating ports for OpenStackMachine", "name", openStackMachine.Name) + portsStatus, err = networkingService.CreatePorts(openStackMachine, clusterName, openStackCluster, machinePorts, trunkEnabled, securityGroups, instanceTags, instanceName) + if err != nil { + // Add Conditions + conditions.MarkFalse(openStackMachine, infrav1.PortsReadyCondition, infrav1.PortsCreateFailedReason, clusterv1.ConditionSeverityError, err.Error()) + return nil, fmt.Errorf("create ports: %w", err) + } + } + + openStackMachine.Status.PortsStatus = *portsStatus + + return portsStatus, nil +} + +func (r *OpenStackMachineReconciler) getOrCreateInstance(logger logr.Logger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, computeService *compute.Service, userData string, portIDs []string) (*compute.InstanceStatus, error) { instanceStatus, err := computeService.GetInstanceStatusByName(openStackMachine, openStackMachine.Name) if err != nil { logger.Info("Unable to get OpenStack instance", "name", openStackMachine.Name) @@ -441,7 +489,7 @@ func (r *OpenStackMachineReconciler) getOrCreate(logger logr.Logger, cluster *cl if instanceStatus == nil { instanceSpec := machineToInstanceSpec(openStackCluster, machine, openStackMachine, userData) logger.Info("Machine does not exist, creating Machine", "name", openStackMachine.Name) - instanceStatus, err = computeService.CreateInstance(openStackMachine, openStackCluster, instanceSpec, cluster.Name) + instanceStatus, err = computeService.CreateInstance(openStackMachine, openStackCluster, instanceSpec, cluster.Name, portIDs) if err != nil { conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceCreateFailedReason, clusterv1.ConditionSeverityError, err.Error()) return nil, fmt.Errorf("create OpenStack instance: %w", err) @@ -472,6 +520,19 @@ func machineToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, machine * instanceSpec.FailureDomain = *machine.Spec.FailureDomain } + instanceSpec.Tags = getInstanceTags(openStackMachine, openStackCluster) + + instanceSpec.SecurityGroups = getManagedSecurityGroups(openStackCluster, machine, openStackMachine) + + instanceSpec.Ports = openStackMachine.Spec.Ports + + return &instanceSpec +} + +// getInstanceTags returns the tags that should be applied to the instance. +// The tags are a combination of the tags specified on the OpenStackMachine and +// the ones specified on the OpenStackCluster. +func getInstanceTags(openStackMachine *infrav1.OpenStackMachine, openStackCluster *infrav1.OpenStackCluster) []string { machineTags := []string{} // Append machine specific tags @@ -494,31 +555,31 @@ func machineToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, machine * } machineTags = deduplicate(machineTags) - instanceSpec.Tags = machineTags + return machineTags +} - instanceSpec.SecurityGroups = openStackMachine.Spec.SecurityGroups - if openStackCluster.Spec.ManagedSecurityGroups { - var managedSecurityGroup string - if util.IsControlPlaneMachine(machine) { - if openStackCluster.Status.ControlPlaneSecurityGroup != nil { - managedSecurityGroup = openStackCluster.Status.ControlPlaneSecurityGroup.ID - } - } else { - if openStackCluster.Status.WorkerSecurityGroup != nil { - managedSecurityGroup = openStackCluster.Status.WorkerSecurityGroup.ID - } +// getManagedSecurityGroups returns a combination of OpenStackMachine.Spec.SecurityGroups +// and the security group managed by the OpenStackCluster whether it's a control plane or a worker machine. +func getManagedSecurityGroups(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) []infrav1.SecurityGroupFilter { + machineSpecSecurityGroups := openStackMachine.Spec.SecurityGroups + var managedSecurityGroup string + if util.IsControlPlaneMachine(machine) { + if openStackCluster.Status.ControlPlaneSecurityGroup != nil { + managedSecurityGroup = openStackCluster.Status.ControlPlaneSecurityGroup.ID } - - if managedSecurityGroup != "" { - instanceSpec.SecurityGroups = append(instanceSpec.SecurityGroups, infrav1.SecurityGroupFilter{ - ID: managedSecurityGroup, - }) + } else { + if openStackCluster.Status.WorkerSecurityGroup != nil { + managedSecurityGroup = openStackCluster.Status.WorkerSecurityGroup.ID } } - instanceSpec.Ports = openStackMachine.Spec.Ports + if managedSecurityGroup != "" { + machineSpecSecurityGroups = append(machineSpecSecurityGroups, infrav1.SecurityGroupFilter{ + ID: managedSecurityGroup, + }) + } - return &instanceSpec + return machineSpecSecurityGroups } func (r *OpenStackMachineReconciler) reconcileLoadBalancerMember(scope scope.Scope, openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine, instanceNS *compute.InstanceNetworkStatus, clusterName string) error { diff --git a/controllers/openstackmachine_controller_test.go b/controllers/openstackmachine_controller_test.go index 0d9045c221..f9ae4991d2 100644 --- a/controllers/openstackmachine_controller_test.go +++ b/controllers/openstackmachine_controller_test.go @@ -17,6 +17,7 @@ limitations under the License. package controllers import ( + "reflect" "testing" . "github.com/onsi/gomega" @@ -254,3 +255,203 @@ func Test_machineToInstanceSpec(t *testing.T) { }) } } + +func Test_getInstanceTags(t *testing.T) { + tests := []struct { + name string + openStackMachine func() *infrav1.OpenStackMachine + openStackCluster func() *infrav1.OpenStackCluster + wantMachineTags []string + }{ + { + name: "No tags", + openStackMachine: func() *infrav1.OpenStackMachine { + return &infrav1.OpenStackMachine{ + Spec: infrav1.OpenStackMachineSpec{}, + } + }, + openStackCluster: func() *infrav1.OpenStackCluster { + return &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{}, + } + }, + wantMachineTags: []string{}, + }, + { + name: "Machine tags only", + openStackMachine: func() *infrav1.OpenStackMachine { + return &infrav1.OpenStackMachine{ + Spec: infrav1.OpenStackMachineSpec{ + Tags: []string{"machine-tag1", "machine-tag2"}, + }, + } + }, + openStackCluster: func() *infrav1.OpenStackCluster { + return &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{}, + } + }, + wantMachineTags: []string{"machine-tag1", "machine-tag2"}, + }, + { + name: "Cluster tags only", + openStackMachine: func() *infrav1.OpenStackMachine { + return &infrav1.OpenStackMachine{ + Spec: infrav1.OpenStackMachineSpec{}, + } + }, + openStackCluster: func() *infrav1.OpenStackCluster { + return &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + Tags: []string{"cluster-tag1", "cluster-tag2"}, + }, + } + }, + wantMachineTags: []string{"cluster-tag1", "cluster-tag2"}, + }, + { + name: "Machine and cluster tags", + openStackMachine: func() *infrav1.OpenStackMachine { + return &infrav1.OpenStackMachine{ + Spec: infrav1.OpenStackMachineSpec{ + Tags: []string{"machine-tag1", "machine-tag2"}, + }, + } + }, + openStackCluster: func() *infrav1.OpenStackCluster { + return &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + Tags: []string{"cluster-tag1", "cluster-tag2"}, + }, + } + }, + wantMachineTags: []string{"machine-tag1", "machine-tag2", "cluster-tag1", "cluster-tag2"}, + }, + { + name: "Duplicate tags", + openStackMachine: func() *infrav1.OpenStackMachine { + return &infrav1.OpenStackMachine{ + Spec: infrav1.OpenStackMachineSpec{ + Tags: []string{"tag1", "tag2", "tag1"}, + }, + } + }, + openStackCluster: func() *infrav1.OpenStackCluster { + return &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + Tags: []string{"tag2", "tag3", "tag3"}, + }, + } + }, + wantMachineTags: []string{"tag1", "tag2", "tag3"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotMachineTags := getInstanceTags(tt.openStackMachine(), tt.openStackCluster()) + if !reflect.DeepEqual(gotMachineTags, tt.wantMachineTags) { + t.Errorf("getInstanceTags() = %v, want %v", gotMachineTags, tt.wantMachineTags) + } + }) + } +} + +func Test_getManagedSecurityGroups(t *testing.T) { + tests := []struct { + name string + openStackCluster func() *infrav1.OpenStackCluster + machine func() *clusterv1.Machine + openStackMachine func() *infrav1.OpenStackMachine + wantSecurityGroups []infrav1.SecurityGroupFilter + }{ + { + name: "Control plane machine with control plane security group", + openStackCluster: func() *infrav1.OpenStackCluster { + c := getDefaultOpenStackCluster() + c.Status.ControlPlaneSecurityGroup = &infrav1.SecurityGroup{ID: controlPlaneSecurityGroupUUID} + return c + }, + machine: func() *clusterv1.Machine { + m := getDefaultMachine() + m.Labels = map[string]string{ + clusterv1.MachineControlPlaneLabel: "true", + } + return m + }, + openStackMachine: getDefaultOpenStackMachine, + wantSecurityGroups: []infrav1.SecurityGroupFilter{ + {ID: controlPlaneSecurityGroupUUID}, + }, + }, + { + name: "Worker machine with worker security group", + openStackCluster: func() *infrav1.OpenStackCluster { + c := getDefaultOpenStackCluster() + c.Status.WorkerSecurityGroup = &infrav1.SecurityGroup{ID: workerSecurityGroupUUID} + return c + }, + machine: getDefaultMachine, + openStackMachine: getDefaultOpenStackMachine, + wantSecurityGroups: []infrav1.SecurityGroupFilter{ + {ID: workerSecurityGroupUUID}, + }, + }, + { + name: "Control plane machine without control plane security group", + openStackCluster: func() *infrav1.OpenStackCluster { + c := getDefaultOpenStackCluster() + c.Status.ControlPlaneSecurityGroup = nil + return c + }, + machine: func() *clusterv1.Machine { + m := getDefaultMachine() + m.Labels = map[string]string{ + clusterv1.MachineControlPlaneLabel: "true", + } + return m + }, + openStackMachine: getDefaultOpenStackMachine, + wantSecurityGroups: []infrav1.SecurityGroupFilter{}, + }, + { + name: "Worker machine without worker security group", + openStackCluster: func() *infrav1.OpenStackCluster { + c := getDefaultOpenStackCluster() + c.Status.WorkerSecurityGroup = nil + return c + }, + machine: getDefaultMachine, + openStackMachine: getDefaultOpenStackMachine, + wantSecurityGroups: []infrav1.SecurityGroupFilter{}, + }, + { + name: "Machine with additional security groups", + openStackCluster: func() *infrav1.OpenStackCluster { + c := getDefaultOpenStackCluster() + c.Status.ControlPlaneSecurityGroup = &infrav1.SecurityGroup{ID: controlPlaneSecurityGroupUUID} + c.Status.WorkerSecurityGroup = &infrav1.SecurityGroup{ID: workerSecurityGroupUUID} + return c + }, + machine: getDefaultMachine, + openStackMachine: func() *infrav1.OpenStackMachine { + m := getDefaultOpenStackMachine() + m.Spec.SecurityGroups = []infrav1.SecurityGroupFilter{{ID: extraSecurityGroupUUID}} + return m + }, + wantSecurityGroups: []infrav1.SecurityGroupFilter{ + {ID: extraSecurityGroupUUID}, + {ID: workerSecurityGroupUUID}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotMachineSecurity := getManagedSecurityGroups(tt.openStackCluster(), tt.machine(), tt.openStackMachine()) + if !reflect.DeepEqual(gotMachineSecurity, tt.wantSecurityGroups) { + t.Errorf("getManagedSecurityGroups() = %v, want %v", gotMachineSecurity, tt.wantSecurityGroups) + } + }) + } +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index b0edc98d5f..1cb8a8ba32 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -151,7 +151,7 @@ var _ = Describe("When calling getOrCreate", func() { openStackMachine := &infrav1.OpenStackMachine{} mockScopeFactory.ComputeClient.EXPECT().ListServers(gomock.Any()).Return(nil, errors.New("Test error when listing servers")) - instanceStatus, err := reconsiler.getOrCreate(logger, cluster, openStackCluster, machine, openStackMachine, computeService, "") + instanceStatus, err := reconsiler.getOrCreateInstance(logger, cluster, openStackCluster, machine, openStackMachine, computeService, "", []string{}) Expect(err).To(HaveOccurred()) Expect(instanceStatus).To(BeNil()) conditions := openStackMachine.GetConditions() diff --git a/pkg/clients/networking.go b/pkg/clients/networking.go index 23ad3c8fdf..e5f609c74d 100644 --- a/pkg/clients/networking.go +++ b/pkg/clients/networking.go @@ -36,6 +36,11 @@ import ( "sigs.k8s.io/cluster-api-provider-openstack/pkg/metrics" ) +// PortExt is the base gophercloud Port with extensions used by PortStatus. +type PortExt struct { + ports.Port +} + type NetworkClient interface { ListFloatingIP(opts floatingips.ListOptsBuilder) ([]floatingips.FloatingIP, error) CreateFloatingIP(opts floatingips.CreateOptsBuilder) (*floatingips.FloatingIP, error) diff --git a/pkg/cloud/services/compute/instance.go b/pkg/cloud/services/compute/instance.go index c6b70af87e..d6b5793582 100644 --- a/pkg/cloud/services/compute/instance.go +++ b/pkg/cloud/services/compute/instance.go @@ -90,7 +90,6 @@ func (s *Service) normalizePortTarget(port *infrav1.PortOpts, openStackCluster * if fixedIP.Subnet == nil { continue } - subnet, err := networkingService.GetSubnetByFilter(fixedIP.Subnet) if err != nil { // Multiple matches might be ok later when we restrict matches to a single network @@ -221,8 +220,8 @@ func (s *Service) constructPorts(openStackCluster *infrav1.OpenStackCluster, ins return ports, nil } -func (s *Service) CreateInstance(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string) (*InstanceStatus, error) { - return s.createInstanceImpl(eventObject, openStackCluster, instanceSpec, clusterName, retryIntervalInstanceStatus) +func (s *Service) CreateInstance(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string, portIDs []string) (*InstanceStatus, error) { + return s.createInstanceImpl(eventObject, openStackCluster, instanceSpec, clusterName, retryIntervalInstanceStatus, portIDs) } func (s *Service) getAndValidateFlavor(flavorName string) (*flavors.Flavor, error) { @@ -234,7 +233,7 @@ func (s *Service) getAndValidateFlavor(flavorName string) (*flavors.Flavor, erro return f, nil } -func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string, retryInterval time.Duration) (*InstanceStatus, error) { +func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string, retryInterval time.Duration, portIDs []string) (*InstanceStatus, error) { var server *clients.ServerExt portList := []servers.Network{} @@ -248,47 +247,44 @@ func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluste return nil, err } - // Ensure we delete the ports we created if we haven't created the server. - defer func() { - if server != nil { - return + if len(portIDs) == 0 { + ports, err := s.constructPorts(openStackCluster, instanceSpec) + if err != nil { + return nil, err } - if err := s.deletePorts(eventObject, portList); err != nil { - s.scope.Logger().V(4).Error(err, "Failed to clean up ports after failure") + networkingService, err := s.getNetworkingService() + if err != nil { + return nil, err } - }() - ports, err := s.constructPorts(openStackCluster, instanceSpec) - if err != nil { - return nil, err - } - - networkingService, err := s.getNetworkingService() - if err != nil { - return nil, err - } + securityGroups, err := networkingService.GetSecurityGroups(instanceSpec.SecurityGroups) + if err != nil { + return nil, fmt.Errorf("error getting security groups: %v", err) + } - securityGroups, err := networkingService.GetSecurityGroups(instanceSpec.SecurityGroups) - if err != nil { - return nil, fmt.Errorf("error getting security groups: %v", err) - } + for i := range ports { + portOpts := &ports[i] + iTags := []string{} + if len(instanceSpec.Tags) > 0 { + iTags = instanceSpec.Tags + } + portName := networking.GetPortName(instanceSpec.Name, portOpts, i) + port, err := networkingService.GetOrCreatePort(eventObject, clusterName, portName, portOpts, securityGroups, iTags) + if err != nil { + return nil, err + } - for i := range ports { - portOpts := &ports[i] - iTags := []string{} - if len(instanceSpec.Tags) > 0 { - iTags = instanceSpec.Tags + portList = append(portList, servers.Network{ + Port: port.ID, + }) } - portName := networking.GetPortName(instanceSpec.Name, portOpts, i) - port, err := networkingService.GetOrCreatePort(eventObject, clusterName, portName, portOpts, securityGroups, iTags) - if err != nil { - return nil, err + } else { + for _, portID := range portIDs { + portList = append(portList, servers.Network{ + Port: portID, + }) } - - portList = append(portList, servers.Network{ - Port: port.ID, - }) } instanceCreateTimeout := getTimeout("CLUSTER_API_OPENSTACK_INSTANCE_CREATE_TIMEOUT", timeoutInstanceCreate) @@ -619,7 +615,7 @@ func (s *Service) GetManagementPort(openStackCluster *infrav1.OpenStackCluster, return &allPorts[0], nil } -func (s *Service) DeleteInstance(openStackCluster *infrav1.OpenStackCluster, eventObject runtime.Object, instanceStatus *InstanceStatus, instanceSpec *InstanceSpec) error { +func (s *Service) DeleteInstance(eventObject runtime.Object, instanceStatus *InstanceStatus, instanceSpec *InstanceSpec) error { if instanceStatus == nil { /* Attaching volumes to an instance is a two-step process: @@ -642,49 +638,6 @@ func (s *Service) DeleteInstance(openStackCluster *infrav1.OpenStackCluster, eve return s.deleteVolumes(instanceSpec) } - instanceInterfaces, err := s.getComputeClient().ListAttachedInterfaces(instanceStatus.ID()) - if err != nil { - return err - } - - trunkSupported, err := s.isTrunkExtSupported() - if err != nil { - return fmt.Errorf("obtaining network extensions: %v", err) - } - - networkingService, err := s.getNetworkingService() - if err != nil { - return err - } - - // get and delete trunks - for _, port := range instanceInterfaces { - if err = s.deleteAttachInterface(eventObject, instanceStatus.InstanceIdentifier(), port.PortID); err != nil { - return err - } - - if trunkSupported { - if err = networkingService.DeleteTrunk(eventObject, port.PortID); err != nil { - return err - } - } - if err = networkingService.DeletePort(eventObject, port.PortID); err != nil { - return err - } - } - - // delete port of error instance - if instanceStatus.State() == infrav1.InstanceStateError { - portOpts, err := s.constructPorts(openStackCluster, instanceSpec) - if err != nil { - return err - } - - if err := networkingService.GarbageCollectErrorInstancesPort(eventObject, instanceSpec.Name, portOpts); err != nil { - return err - } - } - return s.deleteInstance(eventObject, instanceStatus.InstanceIdentifier()) } @@ -716,50 +669,6 @@ func (s *Service) deleteVolume(instanceName string, nameSuffix string) error { return s.getVolumeClient().DeleteVolume(volume.ID, volumes.DeleteOpts{}) } -func (s *Service) deletePorts(eventObject runtime.Object, nets []servers.Network) error { - trunkSupported, err := s.isTrunkExtSupported() - if err != nil { - return err - } - - for _, n := range nets { - networkingService, err := s.getNetworkingService() - if err != nil { - return err - } - - if trunkSupported { - if err = networkingService.DeleteTrunk(eventObject, n.Port); err != nil { - return err - } - } - if err := networkingService.DeletePort(eventObject, n.Port); err != nil { - return err - } - } - return nil -} - -func (s *Service) deleteAttachInterface(eventObject runtime.Object, instance *InstanceIdentifier, portID string) error { - err := s.getComputeClient().DeleteAttachedInterface(instance.ID, portID) - if err != nil { - if capoerrors.IsNotFound(err) { - record.Eventf(eventObject, "SuccessfulDeleteAttachInterface", "Attach interface did not exist: instance %s, port %s", instance.ID, portID) - return nil - } - if capoerrors.IsConflict(err) { - // we don't want to block deletion because of Conflict - // due to instance must be paused/active/shutoff in order to detach interface - return nil - } - record.Warnf(eventObject, "FailedDeleteAttachInterface", "Failed to delete attach interface: instance %s, port %s: %v", instance.ID, portID, err) - return err - } - - record.Eventf(eventObject, "SuccessfulDeleteAttachInterface", "Deleted attach interface: instance %s, port %s", instance.ID, portID) - return nil -} - func (s *Service) deleteInstance(eventObject runtime.Object, instance *InstanceIdentifier) error { err := s.getComputeClient().DeleteServer(instance.ID) if err != nil { diff --git a/pkg/cloud/services/compute/instance_test.go b/pkg/cloud/services/compute/instance_test.go index cafc9e3edc..ee47f27294 100644 --- a/pkg/cloud/services/compute/instance_test.go +++ b/pkg/cloud/services/compute/instance_test.go @@ -27,97 +27,23 @@ import ( "github.com/google/go-cmp/cmp" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes" - common "github.com/gophercloud/gophercloud/openstack/common/extensions" - "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces" "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones" "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags" - "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks" "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gstruct" - gomegatypes "github.com/onsi/gomega/types" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/pointer" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7" "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients" "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients/mock" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking" "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" ) -type gomegaMockMatcher struct { - matcher gomegatypes.GomegaMatcher - description string -} - -func newGomegaMockMatcher(matcher gomegatypes.GomegaMatcher) *gomegaMockMatcher { - return &gomegaMockMatcher{ - matcher: matcher, - description: "", - } -} - -func (m *gomegaMockMatcher) String() string { - return m.description -} - -func (m *gomegaMockMatcher) Matches(x interface{}) bool { - success, err := m.matcher.Match(x) - Expect(err).NotTo(HaveOccurred()) - if !success { - m.description = m.matcher.FailureMessage(x) - } - return success -} - -func Test_getPortName(t *testing.T) { - type args struct { - instanceName string - opts *infrav1.PortOpts - netIndex int - } - tests := []struct { - name string - args args - want string - }{ - { - name: "with nil PortOpts", - args: args{"test-1-instance", nil, 2}, - want: "test-1-instance-2", - }, - { - name: "with PortOpts name suffix", - args: args{"test-1-instance", &infrav1.PortOpts{NameSuffix: "foo"}, 4}, - want: "test-1-instance-foo", - }, - { - name: "without PortOpts name suffix", - args: args{"test-1-instance", &infrav1.PortOpts{}, 4}, - want: "test-1-instance-4", - }, - { - name: "with PortOpts name suffix", - args: args{"test-1-instance", &infrav1.PortOpts{NameSuffix: "foo2", Network: &infrav1.NetworkFilter{ID: "bar"}, DisablePortSecurity: pointer.Bool(true)}, 4}, - want: "test-1-instance-foo2", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := networking.GetPortName(tt.args.instanceName, tt.args.opts, tt.args.netIndex); got != tt.want { - t.Errorf("getPortName() = %v, want %v", got, tt.want) - } - }) - } -} - func TestService_getImageID(t *testing.T) { const imageIDA = "ce96e584-7ebc-46d6-9e55-987d72e3806c" const imageIDB = "8f536889-5198-42d7-8314-cb78f4f4755c" @@ -341,40 +267,6 @@ func TestService_ReconcileInstance(t *testing.T) { }, nil) } - expectCreatePort := func(networkRecorder *mock.MockNetworkClientMockRecorder, name string, networkID string) { - networkRecorder.ListPort(ports.ListOpts{ - Name: name, - NetworkID: networkID, - }).Return([]ports.Port{}, nil) - - // gomock won't match a pointer to a nil slice in SecurityGroups, so we do this - networkRecorder.CreatePort(gomock.Any()).DoAndReturn(func(createOpts ports.CreateOptsBuilder) (*ports.Port, error) { - createOptsMap, err := createOpts.ToPortCreateMap() - Expect(err).NotTo(HaveOccurred()) - - // Match only the fields we're interested in - portOpts := createOptsMap["port"].(map[string]interface{}) - Expect(portOpts).To(MatchKeys(IgnoreExtras, Keys{ - "network_id": Equal(networkUUID), - "name": Equal(portName), - })) - - return &ports.Port{ - ID: portUUID, - NetworkID: networkUUID, - Name: portName, - Description: portOpts["description"].(string), - }, nil - }) - networkRecorder.ReplaceAllAttributesTags("ports", portUUID, attributestags.ReplaceAllOpts{Tags: []string{"test-tag"}}).Return(nil, nil) - } - - // Expected calls if we delete the network port - expectCleanupDefaultPort := func(networkRecorder *mock.MockNetworkClientMockRecorder) { - networkRecorder.ListExtensions() - networkRecorder.DeletePort(portUUID).Return(nil) - } - // Expected calls when using default image and flavor expectDefaultImageAndFlavor := func(computeRecorder *mock.MockComputeClientMockRecorder, imageRecorder *mock.MockImageClientMockRecorder) { imageRecorder.ListImages(images.ListOpts{Name: imageName}).Return([]images.Image{{ID: imageUUID}}, nil) @@ -464,45 +356,6 @@ func TestService_ReconcileInstance(t *testing.T) { }, wantErr: false, }, - { - name: "Delete ports on server create error", - getInstanceSpec: getDefaultInstanceSpec, - expect: func(r *recorders) { - expectUseExistingDefaultPort(r.network) - expectDefaultImageAndFlavor(r.compute, r.image) - - expectCreateServer(r.compute, getDefaultServerMap(), true) - - // Make sure we delete ports - expectCleanupDefaultPort(r.network) - }, - wantErr: true, - }, - { - name: "Delete previously created ports on port creation error", - getInstanceSpec: func() *InstanceSpec { - s := getDefaultInstanceSpec() - s.Ports = []infrav1.PortOpts{ - {Description: "Test port 0"}, - {Description: "Test port 1"}, - } - return s - }, - expect: func(r *recorders) { - expectDefaultImageAndFlavor(r.compute, r.image) - expectUseExistingDefaultPort(r.network) - - // Looking up the second port fails - r.network.ListPort(ports.ListOpts{ - Name: "test-openstack-machine-1", - NetworkID: networkUUID, - }).Return(nil, fmt.Errorf("test error")) - - // We should cleanup the first port - expectCleanupDefaultPort(r.network) - }, - wantErr: true, - }, { name: "Poll until server is created", getInstanceSpec: getDefaultInstanceSpec, @@ -524,8 +377,6 @@ func TestService_ReconcileInstance(t *testing.T) { expectCreateServer(r.compute, getDefaultServerMap(), false) expectServerPoll(r.compute, []string{"BUILDING", "ERROR"}) - - // Don't delete ports because the server is created: DeleteInstance will do it }, wantErr: true, }, @@ -644,8 +495,6 @@ func TestService_ReconcileInstance(t *testing.T) { Multiattach: false, }).Return(&volumes.Volume{ID: rootVolumeUUID}, nil) expectVolumePoll(r.volume, rootVolumeUUID, []string{"creating", "error"}) - - expectCleanupDefaultPort(r.network) }, wantErr: true, }, @@ -894,66 +743,6 @@ func TestService_ReconcileInstance(t *testing.T) { expect: func(r *recorders) { expectUseExistingDefaultPort(r.network) expectDefaultImageAndFlavor(r.compute, r.image) - - // Make sure we delete ports - expectCleanupDefaultPort(r.network) - }, - wantErr: true, - }, - { - name: "Delete trunks on port creation error", - getInstanceSpec: func() *InstanceSpec { - s := getDefaultInstanceSpec() - s.Ports = []infrav1.PortOpts{ - {Description: "Test port 0", Trunk: pointer.Bool(true)}, - {Description: "Test port 1"}, - } - return s - }, - expect: func(r *recorders) { - expectDefaultImageAndFlavor(r.compute, r.image) - extensions := []extensions.Extension{ - {Extension: common.Extension{Alias: "trunk"}}, - } - r.network.ListExtensions().Return(extensions, nil) - - expectCreatePort(r.network, portName, networkUUID) - - // Check for existing trunk - r.network.ListTrunk(newGomegaMockMatcher( - MatchFields(IgnoreExtras, Fields{ - "Name": Equal(portName), - "PortID": Equal(portUUID), - }), - )).Return([]trunks.Trunk{}, nil) - - // Create new trunk - r.network.CreateTrunk(newGomegaMockMatcher(MatchFields(IgnoreExtras, Fields{ - "Name": Equal(portName), - "PortID": Equal(portUUID), - }))).Return(&trunks.Trunk{ - PortID: portUUID, - ID: trunkUUID, - }, nil) - r.network.ReplaceAllAttributesTags("trunks", trunkUUID, attributestags.ReplaceAllOpts{Tags: []string{"test-tag"}}).Return(nil, nil) - - // Looking up the second port fails - r.network.ListPort(ports.ListOpts{ - Name: "test-openstack-machine-1", - NetworkID: networkUUID, - }).Return(nil, fmt.Errorf("test error")) - - r.network.ListExtensions().Return(extensions, nil) - - r.network.ListTrunk(newGomegaMockMatcher( - MatchFields(IgnoreExtras, Fields{ - "PortID": Equal(portUUID), - }), - )).Return([]trunks.Trunk{{ID: trunkUUID}}, nil) - - // We should cleanup the first port and its trunk - r.network.DeleteTrunk(trunkUUID).Return(nil) - r.network.DeletePort(portUUID).Return(nil) }, wantErr: true, }, @@ -976,7 +765,7 @@ func TestService_ReconcileInstance(t *testing.T) { } // Call CreateInstance with a reduced retry interval to speed up the test - _, err = s.createInstanceImpl(&infrav1.OpenStackMachine{}, getDefaultOpenStackCluster(), tt.getInstanceSpec(), "cluster-name", time.Nanosecond) + _, err = s.createInstanceImpl(&infrav1.OpenStackMachine{}, getDefaultOpenStackCluster(), tt.getInstanceSpec(), "cluster-name", time.Nanosecond, []string{}) if (err != nil) != tt.wantErr { t.Errorf("Service.CreateInstance() error = %v, wantErr %v", err, tt.wantErr) return @@ -1021,21 +810,6 @@ func TestService_DeleteInstance(t *testing.T) { eventObject: &infrav1.OpenStackMachine{}, instanceStatus: getDefaultInstanceStatus, expect: func(r *recorders) { - r.compute.ListAttachedInterfaces(instanceUUID).Return([]attachinterfaces.Interface{ - { - PortID: portUUID, - }, - }, nil) - r.network.ListExtensions().Return([]extensions.Extension{{ - Extension: common.Extension{ - Alias: "trunk", - }, - }}, nil) - r.compute.DeleteAttachedInterface(instanceUUID, portUUID).Return(nil) - // FIXME: Why we are looking for a trunk when we know the port is not trunked? - r.network.ListTrunk(trunks.ListOpts{PortID: portUUID}).Return([]trunks.Trunk{}, nil) - r.network.DeletePort(portUUID).Return(nil) - r.compute.DeleteServer(instanceUUID).Return(nil) r.compute.GetServer(instanceUUID).Return(nil, gophercloud.ErrDefault404{}) }, @@ -1087,7 +861,7 @@ func TestService_DeleteInstance(t *testing.T) { RootVolume: tt.rootVolume, } - if err := s.DeleteInstance(&infrav1.OpenStackCluster{}, tt.eventObject, tt.instanceStatus(), instanceSpec); (err != nil) != tt.wantErr { + if err := s.DeleteInstance(tt.eventObject, tt.instanceStatus(), instanceSpec); (err != nil) != tt.wantErr { t.Errorf("Service.DeleteInstance() error = %v, wantErr %v", err, tt.wantErr) } }) diff --git a/pkg/cloud/services/networking/port.go b/pkg/cloud/services/networking/port.go index e3e9b0d8dd..7d4659055d 100644 --- a/pkg/cloud/services/networking/port.go +++ b/pkg/cloud/services/networking/port.go @@ -18,6 +18,7 @@ package networking import ( "context" + "errors" "fmt" "strings" "time" @@ -166,6 +167,14 @@ func (s *Service) GetOrCreatePort(eventObject runtime.Object, clusterName string return nil, err } + defer func() { + if err != nil { + if err := s.DeletePort(eventObject, port.ID); err != nil { + record.Warnf(eventObject, "FailedDeletePort", "Failed to delete port %s: %v", portName, err) + } + } + }() + var tags []string tags = append(tags, instanceTags...) tags = append(tags, portOpts.Tags...) @@ -265,7 +274,25 @@ func (s *Service) DeletePort(eventObject runtime.Object, portID string) error { return nil } -func (s *Service) DeletePorts(openStackCluster *infrav1.OpenStackCluster) error { +func (s *Service) DeleteMachinePort(openStackMachine *infrav1.OpenStackMachine, port infrav1.PortStatus) error { + trunkSupported, err := s.isTrunkExtSupported() + if err != nil { + return err + } + + if trunkSupported { + if err := s.DeleteTrunk(openStackMachine, port.ID); err != nil { + return fmt.Errorf("error deleting trunk of port %s: %v", port.ID, err) + } + } + if err := s.DeletePort(openStackMachine, port.ID); err != nil { + return fmt.Errorf("error deleting port %s: %v", port.ID, err) + } + + return nil +} + +func (s *Service) DeleteClusterPorts(openStackCluster *infrav1.OpenStackCluster) error { // If the network is not ready, do nothing if openStackCluster.Status.Network == nil || openStackCluster.Status.Network.ID == "" { return nil @@ -283,8 +310,18 @@ func (s *Service) DeletePorts(openStackCluster *infrav1.OpenStackCluster) error return fmt.Errorf("list ports of network %q: %v", networkID, err) } + trunkSupported, err := s.isTrunkExtSupported() + if err != nil { + return err + } + for _, port := range portList { if strings.HasPrefix(port.Name, openStackCluster.Name) { + if trunkSupported { + if err = s.DeleteTrunk(openStackCluster, port.ID); err != nil { + return fmt.Errorf("delete trunk of port %s failed : %v", port.ID, err) + } + } err := s.DeletePort(openStackCluster, port.ID) if err != nil { return fmt.Errorf("delete port %s of network %q failed : %v", port.ID, networkID, err) @@ -295,38 +332,217 @@ func (s *Service) DeletePorts(openStackCluster *infrav1.OpenStackCluster) error return nil } -func (s *Service) GarbageCollectErrorInstancesPort(eventObject runtime.Object, instanceName string, portOpts []infrav1.PortOpts) error { - for i := range portOpts { - portOpt := &portOpts[i] +// GetPortName appends a suffix to an instance name in order to try and get a unique name per port. +func GetPortName(instanceName string, opts *infrav1.PortOpts, netIndex int) string { + if opts != nil && opts.NameSuffix != "" { + return fmt.Sprintf("%s-%s", instanceName, opts.NameSuffix) + } + return fmt.Sprintf("%s-%d", instanceName, netIndex) +} + +func (s *Service) CreatePorts(eventObject runtime.Object, clusterName string, openStackCluster *infrav1.OpenStackCluster, ports []infrav1.PortOpts, trunkEnabled bool, securityGroups []infrav1.SecurityGroupFilter, instanceTags []string, instanceName string) (*[]infrav1.PortStatus, error) { + return s.createPortsImpl(eventObject, clusterName, openStackCluster, ports, trunkEnabled, securityGroups, instanceTags, instanceName) +} - portName := GetPortName(instanceName, portOpt, i) +func (s *Service) createPortsImpl(eventObject runtime.Object, clusterName string, openStackCluster *infrav1.OpenStackCluster, ports []infrav1.PortOpts, trunkEnabled bool, securityGroups []infrav1.SecurityGroupFilter, instanceTags []string, instanceName string) (*[]infrav1.PortStatus, error) { + instancePorts, err := s.constructPorts(openStackCluster, ports, trunkEnabled) + if err != nil { + return nil, err + } - // TODO: whould be nice if gophercloud could be persuaded to accept multiple - // names as is allowed by the API in order to reduce API traffic. - portList, err := s.client.ListPort(ports.ListOpts{Name: portName}) + instanceSecurityGroups, err := s.GetSecurityGroups(securityGroups) + if err != nil { + return nil, fmt.Errorf("error getting security groups: %v", err) + } + + portsStatus := make([]infrav1.PortStatus, 0, len(instancePorts)) + + for i := range instancePorts { + portOpts := &instancePorts[i] + iTags := []string{} + if len(instanceTags) > 0 { + iTags = instanceTags + } + portName := GetPortName(instanceName, portOpts, i) + port, err := s.GetOrCreatePort(eventObject, clusterName, portName, portOpts, instanceSecurityGroups, iTags) if err != nil { - return err + record.Warnf(eventObject, "FailedCreatePort", "Failed to create port; name=%s err=%v", portName, err) + return nil, err } + record.Eventf(eventObject, "SuccessfulCreatePort", "Created port; id=%s", port.ID) - // NOTE: https://github.com/kubernetes-sigs/cluster-api-provider-openstack/issues/1476 - // It is up to the end user to specify a UNIQUE cluster name when provisioning in the - // same project, otherwise things will alias and we could delete more than we should. - if len(portList) > 1 { - return fmt.Errorf("garbage collection of port %s failed, found %d ports with the same name", portName, len(portList)) + portsStatus = append(portsStatus, infrav1.PortStatus{ + ID: port.ID, + }) + } + + return &portsStatus, nil +} + +func (s *Service) constructPorts(openStackCluster *infrav1.OpenStackCluster, ports []infrav1.PortOpts, trunkEnabled bool) ([]infrav1.PortOpts, error) { + // Ensure user-specified ports have all required fields + ports, err := s.normalizePorts(ports, openStackCluster, trunkEnabled) + if err != nil { + return nil, err + } + + // no networks or ports found in the spec, so create a port on the cluster network + if len(ports) == 0 { + port := infrav1.PortOpts{ + Network: &infrav1.NetworkFilter{ + ID: openStackCluster.Status.Network.ID, + }, + Trunk: &trunkEnabled, + } + for _, subnet := range openStackCluster.Status.Network.Subnets { + port.FixedIPs = append(port.FixedIPs, infrav1.FixedIP{ + Subnet: &infrav1.SubnetFilter{ + ID: subnet.ID, + }, + }) } + ports = []infrav1.PortOpts{port} + } - if err := s.DeletePort(eventObject, portList[0].ID); err != nil { + // trunk support is required if any port has trunk enabled + portUsesTrunk := func() bool { + for _, port := range ports { + if port.Trunk != nil && *port.Trunk { + return true + } + } + return false + } + if portUsesTrunk() { + trunkSupported, err := s.isTrunkExtSupported() + if err != nil { + return nil, err + } + if !trunkSupported { + return nil, fmt.Errorf("there is no trunk support. please ensure that the trunk extension is enabled in your OpenStack deployment") + } + } + + return ports, nil +} + +func (s *Service) normalizePorts(ports []infrav1.PortOpts, openStackCluster *infrav1.OpenStackCluster, trunkEnabled bool) ([]infrav1.PortOpts, error) { + normalizedPorts := make([]infrav1.PortOpts, 0, len(ports)) + for i := range ports { + // Deep copy the port to avoid mutating the original + port := ports[i].DeepCopy() + + // No Trunk field specified for the port, inherit the machine default + if port.Trunk == nil { + port.Trunk = &trunkEnabled + } + + if err := s.normalizePortTarget(port, openStackCluster, i); err != nil { + return nil, err + } + + normalizedPorts = append(normalizedPorts, *port) + } + return normalizedPorts, nil +} + +func (s *Service) normalizePortTarget(port *infrav1.PortOpts, openStackCluster *infrav1.OpenStackCluster, portIdx int) error { + // Treat no Network and empty Network the same + noNetwork := port.Network == nil || (*port.Network == infrav1.NetworkFilter{}) + + // No network or subnets defined: use cluster defaults + if noNetwork && len(port.FixedIPs) == 0 { + port.Network = &infrav1.NetworkFilter{ + ID: openStackCluster.Status.Network.ID, + } + for _, subnet := range openStackCluster.Status.Network.Subnets { + port.FixedIPs = append(port.FixedIPs, infrav1.FixedIP{ + Subnet: &infrav1.SubnetFilter{ + ID: subnet.ID, + }, + }) + } + + return nil + } + + // No network, but fixed IPs are defined(we handled the no fixed + // IPs case above): try to infer network from a subnet + if noNetwork { + s.scope.Logger().V(4).Info("No network defined for port, attempting to infer from subnet", "port", portIdx) + + // Look for a unique subnet defined in FixedIPs. If we find one + // we can use it to infer the network ID. We don't need to worry + // here about the case where different FixedIPs have different + // networks because that will cause an error later when we try + // to create the port. + networkID, err := func() (string, error) { + for i, fixedIP := range port.FixedIPs { + if fixedIP.Subnet == nil { + continue + } + + subnet, err := s.GetSubnetByFilter(fixedIP.Subnet) + if err != nil { + // Multiple matches might be ok later when we restrict matches to a single network + if errors.Is(err, ErrMultipleMatches) { + s.scope.Logger().V(4).Info("Couldn't infer network from subnet", "subnetIndex", i, "err", err) + continue + } + + return "", err + } + + // Cache the subnet ID in the FixedIP + fixedIP.Subnet.ID = subnet.ID + return subnet.NetworkID, nil + } + + // TODO: This is a spec error: it should set the machine to failed + return "", fmt.Errorf("port %d has no network and unable to infer from fixed IPs", portIdx) + }() + if err != nil { return err } + + port.Network = &infrav1.NetworkFilter{ + ID: networkID, + } + + return nil + } + + // Nothing to do if network ID is already set + if port.Network.ID != "" { + return nil } + // Network is defined by Filter + netIDs, err := s.GetNetworkIDsByFilter(port.Network.ToListOpt()) + if err != nil { + return err + } + + // TODO: These are spec errors: they should set the machine to failed + if len(netIDs) > 1 { + return fmt.Errorf("network filter for port %d returns more than one result", portIdx) + } else if len(netIDs) == 0 { + return fmt.Errorf("network filter for port %d returns no networks", portIdx) + } + + port.Network.ID = netIDs[0] + return nil } -// GetPortName appends a suffix to an instance name in order to try and get a unique name per port. -func GetPortName(instanceName string, opts *infrav1.PortOpts, netIndex int) string { - if opts != nil && opts.NameSuffix != "" { - return fmt.Sprintf("%s-%s", instanceName, opts.NameSuffix) +// isTrunkExtSupported verifies trunk setup on the OpenStack deployment. +func (s *Service) isTrunkExtSupported() (trunknSupported bool, err error) { + trunkSupport, err := s.GetTrunkSupport() + if err != nil { + return false, fmt.Errorf("there was an issue verifying whether trunk support is available, Please try again later: %v", err) } - return fmt.Sprintf("%s-%d", instanceName, netIndex) + if !trunkSupport { + return false, nil + } + return true, nil } diff --git a/pkg/cloud/services/networking/port_test.go b/pkg/cloud/services/networking/port_test.go index d0e2f9ec91..0e9412a2f3 100644 --- a/pkg/cloud/services/networking/port_test.go +++ b/pkg/cloud/services/networking/port_test.go @@ -19,17 +19,22 @@ package networking import ( "testing" + "github.com/go-logr/logr" "github.com/golang/mock/gomock" + "github.com/google/go-cmp/cmp" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" . "github.com/onsi/gomega" + "k8s.io/utils/pointer" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7" "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients/mock" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" ) func Test_GetOrCreatePort(t *testing.T) { @@ -536,85 +541,406 @@ func Test_GetOrCreatePort(t *testing.T) { } } -func Test_GarbageCollectErrorInstancesPort(t *testing.T) { +func pointerTo(b bool) *bool { + return &b +} + +func TestService_normalizePorts(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() - instanceName := "foo" - portID1 := "dc6e0ae3-dad6-4240-a9cb-e541916f20d3" - portID2 := "a38ab1cb-c2cc-4c1b-9d1d-696ec73356d2" - portName1 := GetPortName(instanceName, nil, 0) - portName2 := GetPortName(instanceName, nil, 1) + const ( + defaultNetworkID = "3c66f3ca-2d26-4d9d-ae3b-568f54129773" + defaultSubnetID = "d8dbba89-8c39-4192-a571-e702fca35bac" + + networkID = "afa54944-1443-4132-9ef5-ce37eb4d6ab6" + subnetID = "d786e715-c299-4a97-911d-640c10fc0392" + ) + + openStackCluster := &infrav1.OpenStackCluster{ + Status: infrav1.OpenStackClusterStatus{ + Network: &infrav1.NetworkStatusWithSubnets{ + NetworkStatus: infrav1.NetworkStatus{ + ID: defaultNetworkID, + }, + Subnets: []infrav1.Subnet{ + {ID: defaultSubnetID}, + }, + }, + }, + } tests := []struct { - // man is the name of the test. - name string - // expect allows definition of any expected calls to the mock. - expect func(m *mock.MockNetworkClientMockRecorder) - // portOpts defines the instance ports as defined in the OSM spec. - portOpts []infrav1.PortOpts - // wantErr defines whether the test is supposed to fail. - wantErr bool + name string + ports []infrav1.PortOpts + instanceTrunk bool + expectNetwork func(m *mock.MockNetworkClientMockRecorder) + want []infrav1.PortOpts + wantErr bool }{ { - name: "garbage collects all ports for an instance", - expect: func(m *mock.MockNetworkClientMockRecorder) { - o1 := ports.ListOpts{ - Name: portName1, - } - p1 := []ports.Port{ - { - ID: portID1, - Name: portName1, + name: "No ports: no ports", + ports: []infrav1.PortOpts{}, + want: []infrav1.PortOpts{}, + }, + { + name: "Nil network, no fixed IPs: cluster defaults", + ports: []infrav1.PortOpts{ + { + Network: nil, + FixedIPs: nil, + }, + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: defaultNetworkID, }, - } - m.ListPort(o1).Return(p1, nil) - m.DeletePort(portID1) - o2 := ports.ListOpts{ - Name: portName2, - } - p2 := []ports.Port{ - { - ID: portID2, - Name: portName2, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: defaultSubnetID, + }, + }, }, - } - - m.ListPort(o2).Return(p2, nil) - m.DeletePort(portID2) + Trunk: pointer.Bool(false), + }, + }, + }, + { + name: "Empty network, no fixed IPs: cluster defaults", + ports: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{}, + FixedIPs: nil, + }, + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: defaultNetworkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: defaultSubnetID, + }, + }, + }, + Trunk: pointer.Bool(false), + }, + }, + }, + { + name: "Port inherits trunk from instance", + ports: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{}, + FixedIPs: nil, + }, + }, + instanceTrunk: true, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: defaultNetworkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: defaultSubnetID, + }, + }, + }, + Trunk: pointer.Bool(true), + }, + }, + }, + { + name: "Port overrides trunk from instance", + ports: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{}, + FixedIPs: nil, + Trunk: pointer.Bool(true), + }, + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: defaultNetworkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: defaultSubnetID, + }, + }, + }, + Trunk: pointer.Bool(true), + }, + }, + }, + { + name: "Network defined by ID: unchanged", + ports: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: networkID, + }, + }, + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: networkID, + }, + Trunk: pointer.Bool(false), + }, + }, + }, + { + name: "Network defined by filter: add ID from network lookup", + ports: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + Name: "test-network", + }, + }, + }, + expectNetwork: func(m *mock.MockNetworkClientMockRecorder) { + m.ListNetwork(networks.ListOpts{Name: "test-network"}).Return([]networks.Network{ + {ID: networkID}, + }, nil) + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: networkID, + Name: "test-network", + }, + Trunk: pointer.Bool(false), + }, + }, + }, + { + name: "No network, fixed IP has subnet by ID: add ID from subnet", + ports: []infrav1.PortOpts{ + { + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: subnetID, + }, + }, + }, + }, + }, + expectNetwork: func(m *mock.MockNetworkClientMockRecorder) { + m.GetSubnet(subnetID).Return(&subnets.Subnet{ID: subnetID, NetworkID: networkID}, nil) + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: networkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: subnetID, + }, + }, + }, + Trunk: pointer.Bool(false), + }, + }, + }, + { + name: "No network, fixed IP has subnet by filter: add ID from subnet", + ports: []infrav1.PortOpts{ + { + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + Name: "test-subnet", + }, + }, + }, + }, + }, + expectNetwork: func(m *mock.MockNetworkClientMockRecorder) { + m.ListSubnet(subnets.ListOpts{Name: "test-subnet"}).Return([]subnets.Subnet{ + {ID: subnetID, NetworkID: networkID}, + }, nil) + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: networkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: subnetID, + Name: "test-subnet", + }, + }, + }, + Trunk: pointer.Bool(false), + }, + }, + }, + { + name: "No network, fixed IP subnet returns no matches: error", + ports: []infrav1.PortOpts{ + { + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + Name: "test-subnet", + }, + }, + }, + }, + }, + expectNetwork: func(m *mock.MockNetworkClientMockRecorder) { + m.ListSubnet(subnets.ListOpts{Name: "test-subnet"}).Return([]subnets.Subnet{}, nil) + }, + wantErr: true, + }, + { + name: "No network, only fixed IP subnet returns multiple matches: error", + ports: []infrav1.PortOpts{ + { + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + Name: "test-subnet", + }, + }, + }, + }, + }, + expectNetwork: func(m *mock.MockNetworkClientMockRecorder) { + m.ListSubnet(subnets.ListOpts{Name: "test-subnet"}).Return([]subnets.Subnet{ + {ID: subnetID, NetworkID: networkID}, + {ID: "8008494c-301e-4e5c-951b-a8ab568447fd", NetworkID: "5d48bfda-db28-42ee-8374-50e13d1fe5ea"}, + }, nil) + }, + wantErr: true, + }, + { + name: "No network, first fixed IP subnet returns multiple matches: used ID from second fixed IP", + ports: []infrav1.PortOpts{ + { + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + Name: "test-subnet1", + }, + }, + { + Subnet: &infrav1.SubnetFilter{ + Name: "test-subnet2", + }, + }, + }, + }, + }, + expectNetwork: func(m *mock.MockNetworkClientMockRecorder) { + m.ListSubnet(subnets.ListOpts{Name: "test-subnet1"}).Return([]subnets.Subnet{ + {ID: subnetID, NetworkID: networkID}, + {ID: "8008494c-301e-4e5c-951b-a8ab568447fd", NetworkID: "5d48bfda-db28-42ee-8374-50e13d1fe5ea"}, + }, nil) + m.ListSubnet(subnets.ListOpts{Name: "test-subnet2"}).Return([]subnets.Subnet{ + {ID: subnetID, NetworkID: networkID}, + }, nil) }, - portOpts: []infrav1.PortOpts{ - {}, - {}, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: networkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + Name: "test-subnet1", + }, + }, + { + Subnet: &infrav1.SubnetFilter{ + ID: subnetID, + Name: "test-subnet2", + }, + }, + }, + Trunk: pointer.Bool(false), + }, }, - wantErr: false, }, } - - eventObject := &infrav1.OpenStackMachine{} - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + mockClient := mock.NewMockNetworkClient(mockCtrl) - tt.expect(mockClient.EXPECT()) + if tt.expectNetwork != nil { + tt.expectNetwork(mockClient.EXPECT()) + } s := Service{ client: mockClient, + scope: scope.NewMockScopeFactory(mockCtrl, "", logr.Discard()), } - err := s.GarbageCollectErrorInstancesPort( - eventObject, - instanceName, - tt.portOpts, - ) + + got, err := s.normalizePorts(tt.ports, openStackCluster, tt.instanceTrunk) if tt.wantErr { g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).NotTo(HaveOccurred()) + return } + + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(got).To(Equal(tt.want), cmp.Diff(got, tt.want)) }) } } -func pointerTo(b bool) *bool { - return &b +func Test_getPortName(t *testing.T) { + type args struct { + instanceName string + opts *infrav1.PortOpts + netIndex int + } + tests := []struct { + name string + args args + want string + }{ + { + name: "with nil PortOpts", + args: args{"test-1-instance", nil, 2}, + want: "test-1-instance-2", + }, + { + name: "with PortOpts name suffix", + args: args{"test-1-instance", &infrav1.PortOpts{NameSuffix: "foo"}, 4}, + want: "test-1-instance-foo", + }, + { + name: "without PortOpts name suffix", + args: args{"test-1-instance", &infrav1.PortOpts{}, 4}, + want: "test-1-instance-4", + }, + { + name: "with PortOpts name suffix", + args: args{"test-1-instance", &infrav1.PortOpts{NameSuffix: "foo2", Network: &infrav1.NetworkFilter{ID: "bar"}, DisablePortSecurity: pointer.Bool(true)}, 4}, + want: "test-1-instance-foo2", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := GetPortName(tt.args.instanceName, tt.args.opts, tt.args.netIndex); got != tt.want { + t.Errorf("getPortName() = %v, want %v", got, tt.want) + } + }) + } }