diff --git a/api/v1alpha5/conversion.go b/api/v1alpha5/conversion.go index 3f41962b92..c27b08ae42 100644 --- a/api/v1alpha5/conversion.go +++ b/api/v1alpha5/conversion.go @@ -437,3 +437,14 @@ func Convert_v1alpha5_OpenStackClusterStatus_To_v1alpha7_OpenStackClusterStatus( func Convert_v1alpha7_OpenStackMachineSpec_To_v1alpha5_OpenStackMachineSpec(in *infrav1.OpenStackMachineSpec, out *OpenStackMachineSpec, s conversion.Scope) error { return autoConvert_v1alpha7_OpenStackMachineSpec_To_v1alpha5_OpenStackMachineSpec(in, out, s) } + +func Convert_v1alpha7_OpenStackMachineStatus_To_v1alpha5_OpenStackMachineStatus(in *infrav1.OpenStackMachineStatus, out *OpenStackMachineStatus, s conversion.Scope) error { + err := autoConvert_v1alpha7_OpenStackMachineStatus_To_v1alpha5_OpenStackMachineStatus(in, out, s) + if err != nil { + return err + } + + in.PortsStatus = nil + + return nil +} diff --git a/api/v1alpha5/zz_generated.conversion.go b/api/v1alpha5/zz_generated.conversion.go index 52681a9c65..5edb6bb3d0 100644 --- a/api/v1alpha5/zz_generated.conversion.go +++ b/api/v1alpha5/zz_generated.conversion.go @@ -1161,16 +1161,12 @@ func autoConvert_v1alpha7_OpenStackMachineStatus_To_v1alpha5_OpenStackMachineSta out.Addresses = *(*[]v1.NodeAddress)(unsafe.Pointer(&in.Addresses)) out.InstanceState = (*InstanceState)(unsafe.Pointer(in.InstanceState)) out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason)) + // WARNING: in.PortsStatus requires manual conversion: does not exist in peer-type out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) out.Conditions = *(*v1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } -// Convert_v1alpha7_OpenStackMachineStatus_To_v1alpha5_OpenStackMachineStatus is an autogenerated conversion function. -func Convert_v1alpha7_OpenStackMachineStatus_To_v1alpha5_OpenStackMachineStatus(in *v1alpha7.OpenStackMachineStatus, out *OpenStackMachineStatus, s conversion.Scope) error { - return autoConvert_v1alpha7_OpenStackMachineStatus_To_v1alpha5_OpenStackMachineStatus(in, out, s) -} - func autoConvert_v1alpha5_OpenStackMachineTemplate_To_v1alpha7_OpenStackMachineTemplate(in *OpenStackMachineTemplate, out *v1alpha7.OpenStackMachineTemplate, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1alpha5_OpenStackMachineTemplateSpec_To_v1alpha7_OpenStackMachineTemplateSpec(&in.Spec, &out.Spec, s); err != nil { diff --git a/api/v1alpha6/conversion.go b/api/v1alpha6/conversion.go index ae8e343ea8..64a90d1a2c 100644 --- a/api/v1alpha6/conversion.go +++ b/api/v1alpha6/conversion.go @@ -659,3 +659,14 @@ func Convert_v1alpha6_OpenStackClusterStatus_To_v1alpha7_OpenStackClusterStatus( func Convert_v1alpha7_OpenStackMachineSpec_To_v1alpha6_OpenStackMachineSpec(in *infrav1.OpenStackMachineSpec, out *OpenStackMachineSpec, s apiconversion.Scope) error { return autoConvert_v1alpha7_OpenStackMachineSpec_To_v1alpha6_OpenStackMachineSpec(in, out, s) } + +func Convert_v1alpha7_OpenStackMachineStatus_To_v1alpha6_OpenStackMachineStatus(in *infrav1.OpenStackMachineStatus, out *OpenStackMachineStatus, s apiconversion.Scope) error { + err := autoConvert_v1alpha7_OpenStackMachineStatus_To_v1alpha6_OpenStackMachineStatus(in, out, s) + if err != nil { + return err + } + + in.PortsStatus = nil + + return nil +} diff --git a/api/v1alpha6/zz_generated.conversion.go b/api/v1alpha6/zz_generated.conversion.go index 6bfa0129c3..94c79ffc4f 100644 --- a/api/v1alpha6/zz_generated.conversion.go +++ b/api/v1alpha6/zz_generated.conversion.go @@ -1184,16 +1184,12 @@ func autoConvert_v1alpha7_OpenStackMachineStatus_To_v1alpha6_OpenStackMachineSta out.Addresses = *(*[]v1.NodeAddress)(unsafe.Pointer(&in.Addresses)) out.InstanceState = (*InstanceState)(unsafe.Pointer(in.InstanceState)) out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason)) + // WARNING: in.PortsStatus requires manual conversion: does not exist in peer-type out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) out.Conditions = *(*v1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } -// Convert_v1alpha7_OpenStackMachineStatus_To_v1alpha6_OpenStackMachineStatus is an autogenerated conversion function. -func Convert_v1alpha7_OpenStackMachineStatus_To_v1alpha6_OpenStackMachineStatus(in *v1alpha7.OpenStackMachineStatus, out *OpenStackMachineStatus, s conversion.Scope) error { - return autoConvert_v1alpha7_OpenStackMachineStatus_To_v1alpha6_OpenStackMachineStatus(in, out, s) -} - func autoConvert_v1alpha6_OpenStackMachineTemplate_To_v1alpha7_OpenStackMachineTemplate(in *OpenStackMachineTemplate, out *v1alpha7.OpenStackMachineTemplate, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1alpha6_OpenStackMachineTemplateSpec_To_v1alpha7_OpenStackMachineTemplateSpec(&in.Spec, &out.Spec, s); err != nil { diff --git a/api/v1alpha7/conditions_consts.go b/api/v1alpha7/conditions_consts.go index b54eade0b0..49fa89da8d 100644 --- a/api/v1alpha7/conditions_consts.go +++ b/api/v1alpha7/conditions_consts.go @@ -22,6 +22,10 @@ const ( // InstanceReadyCondition reports on current status of the OpenStack instance. Ready indicates the instance is in a Running state. InstanceReadyCondition clusterv1.ConditionType = "InstanceReady" + // PortsReadyCondition reports on the current status of the ports. Ready indicates that all ports have been created + // successfully and ready to be attached to the instance. + PortsReadyCondition clusterv1.ConditionType = "PortsReady" + // WaitingForClusterInfrastructureReason used when machine is waiting for cluster infrastructure to be ready before proceeding. WaitingForClusterInfrastructureReason = "WaitingForClusterInfrastructure" // WaitingForBootstrapDataReason used when machine is waiting for bootstrap data to be ready before proceeding. @@ -42,6 +46,8 @@ const ( InstanceDeleteFailedReason = "InstanceDeleteFailed" // OpenstackErrorReason used when there is an error communicating with OpenStack. OpenStackErrorReason = "OpenStackError" + // PortsCreateFailedReason used when creating the ports failed. + PortsCreateFailedReason = "PortsCreateFailed" ) const ( diff --git a/api/v1alpha7/openstackmachine_types.go b/api/v1alpha7/openstackmachine_types.go index f5f4f698aa..c22ca58845 100644 --- a/api/v1alpha7/openstackmachine_types.go +++ b/api/v1alpha7/openstackmachine_types.go @@ -113,6 +113,10 @@ type OpenStackMachineStatus struct { FailureReason *errors.MachineStatusError `json:"failureReason,omitempty"` + // PortsStatus is the status of the ports associated with the machine. + // +optional + PortsStatus []PortStatus `json:"portsStatus,omitempty"` + // FailureMessage will be set in the event that there is a terminal problem // reconciling the Machine and will contain a more verbose string suitable // for logging and human consumption. diff --git a/api/v1alpha7/types.go b/api/v1alpha7/types.go index 3ef5305ff2..b3138f8580 100644 --- a/api/v1alpha7/types.go +++ b/api/v1alpha7/types.go @@ -128,6 +128,12 @@ type PortOpts struct { ValueSpecs []ValueSpec `json:"valueSpecs,omitempty"` } +type PortStatus struct { + // ID is the unique identifier of the port. + // +optional + ID string `json:"id,omitempty"` +} + type BindingProfile struct { // OVSHWOffload enables or disables the OVS hardware offload feature. OVSHWOffload bool `json:"ovsHWOffload,omitempty"` @@ -149,12 +155,13 @@ type AddressPair struct { } type BastionStatus struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - SSHKeyName string `json:"sshKeyName,omitempty"` - State InstanceState `json:"state,omitempty"` - IP string `json:"ip,omitempty"` - FloatingIP string `json:"floatingIP,omitempty"` + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + SSHKeyName string `json:"sshKeyName,omitempty"` + State InstanceState `json:"state,omitempty"` + PortsStatus PortStatus `json:"portsStatus,omitempty"` + IP string `json:"ip,omitempty"` + FloatingIP string `json:"floatingIP,omitempty"` } type RootVolume struct { diff --git a/api/v1alpha7/zz_generated.deepcopy.go b/api/v1alpha7/zz_generated.deepcopy.go index 1d81a83410..d0de896acf 100644 --- a/api/v1alpha7/zz_generated.deepcopy.go +++ b/api/v1alpha7/zz_generated.deepcopy.go @@ -721,6 +721,11 @@ func (in *OpenStackMachineStatus) DeepCopyInto(out *OpenStackMachineStatus) { *out = new(errors.MachineStatusError) **out = **in } + if in.PortsStatus != nil { + in, out := &in.PortsStatus, &out.PortsStatus + *out = make([]PortStatus, len(*in)) + copy(*out, *in) + } if in.FailureMessage != nil { in, out := &in.FailureMessage, &out.FailureMessage *out = new(string) @@ -903,6 +908,21 @@ func (in *PortOpts) DeepCopy() *PortOpts { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortStatus) DeepCopyInto(out *PortStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortStatus. +func (in *PortStatus) DeepCopy() *PortStatus { + if in == nil { + return nil + } + out := new(PortStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RootVolume) DeepCopyInto(out *RootVolume) { *out = *in diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml index e5171cd868..63fd0ec50f 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml @@ -1603,6 +1603,16 @@ spec: description: InstanceState is the state of the OpenStack instance for this machine. type: string + portsStatus: + description: PortsStatus is the status of the ports associated with + the machine. + items: + properties: + id: + description: ID is the unique identifier of the port. + type: string + type: object + type: array ready: description: Ready is true when the provider resource is ready. type: boolean diff --git a/controllers/openstackcluster_controller.go b/controllers/openstackcluster_controller.go index 08177fc9ed..1ba79da5cf 100644 --- a/controllers/openstackcluster_controller.go +++ b/controllers/openstackcluster_controller.go @@ -320,6 +320,8 @@ func reconcileBastion(scope scope.Scope, cluster *clusterv1.Cluster, openStackCl return err } + // TODO(emilien) Create ports for bastion host + instanceSpec := bastionToInstanceSpec(openStackCluster, cluster.Name) bastionHash, err := compute.HashInstanceSpec(instanceSpec) if err != nil { @@ -349,7 +351,7 @@ func reconcileBastion(scope scope.Scope, cluster *clusterv1.Cluster, openStackCl } } - instanceStatus, err = computeService.CreateInstance(openStackCluster, openStackCluster, instanceSpec, cluster.Name) + instanceStatus, err = computeService.CreateInstance(openStackCluster, openStackCluster, instanceSpec, cluster.Name, []string{}) if err != nil { return fmt.Errorf("failed to reconcile bastion: %w", err) } diff --git a/controllers/openstackmachine_controller.go b/controllers/openstackmachine_controller.go index b0345c078c..962e8edcd5 100644 --- a/controllers/openstackmachine_controller.go +++ b/controllers/openstackmachine_controller.go @@ -328,9 +328,24 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope return ctrl.Result{}, err } - instanceStatus, err := r.getOrCreate(scope.Logger(), cluster, openStackCluster, machine, openStackMachine, computeService, userData) + managedSecurityGroups := getManagedSecurityGroups(openStackCluster, machine, openStackMachine) + instanceTags := getInstanceTags(openStackMachine, openStackCluster) + portsStatus, err := r.getOrCreatePorts(scope.Logger(), clusterName, openStackCluster, openStackMachine.Spec.Ports, openStackMachine.Spec.Trunk, managedSecurityGroups, instanceTags, openStackMachine.Name, openStackMachine, networkingService) if err != nil { - // Conditions set in getOrCreate + // Conditions set in getOrCreatePorts + return ctrl.Result{}, err + } + // TODO(emilien) do we want to deal with Ports Status like we do for the nova instance? Might be expensive... + // In the meantime, we consider that ports are ready if they are created. + conditions.MarkTrue(openStackMachine, infrav1.PortsReadyCondition) + portIDs := make([]string, len(*portsStatus)) + for i, portStatus := range *portsStatus { + portIDs[i] = portStatus.ID + } + + instanceStatus, err := r.getOrCreateInstance(scope.Logger(), cluster, openStackCluster, machine, openStackMachine, computeService, userData, portIDs) + if err != nil { + // Conditions set in getOrCreateInstance return ctrl.Result{}, err } @@ -430,7 +445,32 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope return ctrl.Result{}, nil } -func (r *OpenStackMachineReconciler) getOrCreate(logger logr.Logger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, computeService *compute.Service, userData string) (*compute.InstanceStatus, error) { +func (r *OpenStackMachineReconciler) getOrCreatePorts(logger logr.Logger, clusterName string, openStackCluster *infrav1.OpenStackCluster, machinePorts []infrav1.PortOpts, trunkEnabled bool, securityGroups []infrav1.SecurityGroupFilter, instanceTags []string, instanceName string, openStackMachine *infrav1.OpenStackMachine, networkingService *networking.Service) (*[]infrav1.PortStatus, error) { + // TODO(emilien) implement portsStatus where we check if the machine has ports created and part of the OpenStackMachine.Status + // Check `GetInstanceStatusByName` as it's done for instances. For each port found in Status, we might want get the port name and verify there is no duplicate. + // If Status is empty, we create the ports and add them to the Status. + // If Status is not empty, we try to adopt the existing ports by checking if the ports are still there and if not, we create them and add them to the Status. + + // For now, we create the ports. + var portsStatus *[]infrav1.PortStatus + var err error + + if openStackMachine.Status.PortsStatus == nil { + logger.Info("Creating ports for OpenStackMachine", "name", openStackMachine.Name) + portsStatus, err = networkingService.CreatePorts(openStackMachine, clusterName, openStackCluster, machinePorts, trunkEnabled, securityGroups, instanceTags, instanceName) + if err != nil { + // Add Conditions + conditions.MarkFalse(openStackMachine, infrav1.PortsReadyCondition, infrav1.PortsCreateFailedReason, clusterv1.ConditionSeverityError, err.Error()) + return nil, fmt.Errorf("create ports: %w", err) + } + } + + openStackMachine.Status.PortsStatus = *portsStatus + + return portsStatus, nil +} + +func (r *OpenStackMachineReconciler) getOrCreateInstance(logger logr.Logger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, computeService *compute.Service, userData string, portIDs []string) (*compute.InstanceStatus, error) { instanceStatus, err := computeService.GetInstanceStatusByName(openStackMachine, openStackMachine.Name) if err != nil { logger.Info("Unable to get OpenStack instance", "name", openStackMachine.Name) @@ -441,7 +481,7 @@ func (r *OpenStackMachineReconciler) getOrCreate(logger logr.Logger, cluster *cl if instanceStatus == nil { instanceSpec := machineToInstanceSpec(openStackCluster, machine, openStackMachine, userData) logger.Info("Machine does not exist, creating Machine", "name", openStackMachine.Name) - instanceStatus, err = computeService.CreateInstance(openStackMachine, openStackCluster, instanceSpec, cluster.Name) + instanceStatus, err = computeService.CreateInstance(openStackMachine, openStackCluster, instanceSpec, cluster.Name, portIDs) if err != nil { conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceCreateFailedReason, clusterv1.ConditionSeverityError, err.Error()) return nil, fmt.Errorf("create OpenStack instance: %w", err) @@ -472,6 +512,19 @@ func machineToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, machine * instanceSpec.FailureDomain = *machine.Spec.FailureDomain } + instanceSpec.Tags = getInstanceTags(openStackMachine, openStackCluster) + + instanceSpec.SecurityGroups = getManagedSecurityGroups(openStackCluster, machine, openStackMachine) + + instanceSpec.Ports = openStackMachine.Spec.Ports + + return &instanceSpec +} + +// getInstanceTags returns the tags that should be applied to the instance. +// The tags are a combination of the tags specified on the OpenStackMachine and +// the ones specified on the OpenStackCluster. +func getInstanceTags(openStackMachine *infrav1.OpenStackMachine, openStackCluster *infrav1.OpenStackCluster) []string { machineTags := []string{} // Append machine specific tags @@ -494,31 +547,31 @@ func machineToInstanceSpec(openStackCluster *infrav1.OpenStackCluster, machine * } machineTags = deduplicate(machineTags) - instanceSpec.Tags = machineTags + return machineTags +} - instanceSpec.SecurityGroups = openStackMachine.Spec.SecurityGroups - if openStackCluster.Spec.ManagedSecurityGroups { - var managedSecurityGroup string - if util.IsControlPlaneMachine(machine) { - if openStackCluster.Status.ControlPlaneSecurityGroup != nil { - managedSecurityGroup = openStackCluster.Status.ControlPlaneSecurityGroup.ID - } - } else { - if openStackCluster.Status.WorkerSecurityGroup != nil { - managedSecurityGroup = openStackCluster.Status.WorkerSecurityGroup.ID - } +// getManagedSecurityGroups returns a combination of OpenStackMachine.Spec.SecurityGroups +// and the security group managed by the OpenStackCluster whether it's a control plane or a worker machine. +func getManagedSecurityGroups(openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) []infrav1.SecurityGroupFilter { + machineSpecSecurityGroups := openStackMachine.Spec.SecurityGroups + var managedSecurityGroup string + if util.IsControlPlaneMachine(machine) { + if openStackCluster.Status.ControlPlaneSecurityGroup != nil { + managedSecurityGroup = openStackCluster.Status.ControlPlaneSecurityGroup.ID } - - if managedSecurityGroup != "" { - instanceSpec.SecurityGroups = append(instanceSpec.SecurityGroups, infrav1.SecurityGroupFilter{ - ID: managedSecurityGroup, - }) + } else { + if openStackCluster.Status.WorkerSecurityGroup != nil { + managedSecurityGroup = openStackCluster.Status.WorkerSecurityGroup.ID } } - instanceSpec.Ports = openStackMachine.Spec.Ports + if managedSecurityGroup != "" { + machineSpecSecurityGroups = append(machineSpecSecurityGroups, infrav1.SecurityGroupFilter{ + ID: managedSecurityGroup, + }) + } - return &instanceSpec + return machineSpecSecurityGroups } func (r *OpenStackMachineReconciler) reconcileLoadBalancerMember(scope scope.Scope, openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine, instanceNS *compute.InstanceNetworkStatus, clusterName string) error { diff --git a/controllers/openstackmachine_controller_test.go b/controllers/openstackmachine_controller_test.go index 0d9045c221..f9ae4991d2 100644 --- a/controllers/openstackmachine_controller_test.go +++ b/controllers/openstackmachine_controller_test.go @@ -17,6 +17,7 @@ limitations under the License. package controllers import ( + "reflect" "testing" . "github.com/onsi/gomega" @@ -254,3 +255,203 @@ func Test_machineToInstanceSpec(t *testing.T) { }) } } + +func Test_getInstanceTags(t *testing.T) { + tests := []struct { + name string + openStackMachine func() *infrav1.OpenStackMachine + openStackCluster func() *infrav1.OpenStackCluster + wantMachineTags []string + }{ + { + name: "No tags", + openStackMachine: func() *infrav1.OpenStackMachine { + return &infrav1.OpenStackMachine{ + Spec: infrav1.OpenStackMachineSpec{}, + } + }, + openStackCluster: func() *infrav1.OpenStackCluster { + return &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{}, + } + }, + wantMachineTags: []string{}, + }, + { + name: "Machine tags only", + openStackMachine: func() *infrav1.OpenStackMachine { + return &infrav1.OpenStackMachine{ + Spec: infrav1.OpenStackMachineSpec{ + Tags: []string{"machine-tag1", "machine-tag2"}, + }, + } + }, + openStackCluster: func() *infrav1.OpenStackCluster { + return &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{}, + } + }, + wantMachineTags: []string{"machine-tag1", "machine-tag2"}, + }, + { + name: "Cluster tags only", + openStackMachine: func() *infrav1.OpenStackMachine { + return &infrav1.OpenStackMachine{ + Spec: infrav1.OpenStackMachineSpec{}, + } + }, + openStackCluster: func() *infrav1.OpenStackCluster { + return &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + Tags: []string{"cluster-tag1", "cluster-tag2"}, + }, + } + }, + wantMachineTags: []string{"cluster-tag1", "cluster-tag2"}, + }, + { + name: "Machine and cluster tags", + openStackMachine: func() *infrav1.OpenStackMachine { + return &infrav1.OpenStackMachine{ + Spec: infrav1.OpenStackMachineSpec{ + Tags: []string{"machine-tag1", "machine-tag2"}, + }, + } + }, + openStackCluster: func() *infrav1.OpenStackCluster { + return &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + Tags: []string{"cluster-tag1", "cluster-tag2"}, + }, + } + }, + wantMachineTags: []string{"machine-tag1", "machine-tag2", "cluster-tag1", "cluster-tag2"}, + }, + { + name: "Duplicate tags", + openStackMachine: func() *infrav1.OpenStackMachine { + return &infrav1.OpenStackMachine{ + Spec: infrav1.OpenStackMachineSpec{ + Tags: []string{"tag1", "tag2", "tag1"}, + }, + } + }, + openStackCluster: func() *infrav1.OpenStackCluster { + return &infrav1.OpenStackCluster{ + Spec: infrav1.OpenStackClusterSpec{ + Tags: []string{"tag2", "tag3", "tag3"}, + }, + } + }, + wantMachineTags: []string{"tag1", "tag2", "tag3"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotMachineTags := getInstanceTags(tt.openStackMachine(), tt.openStackCluster()) + if !reflect.DeepEqual(gotMachineTags, tt.wantMachineTags) { + t.Errorf("getInstanceTags() = %v, want %v", gotMachineTags, tt.wantMachineTags) + } + }) + } +} + +func Test_getManagedSecurityGroups(t *testing.T) { + tests := []struct { + name string + openStackCluster func() *infrav1.OpenStackCluster + machine func() *clusterv1.Machine + openStackMachine func() *infrav1.OpenStackMachine + wantSecurityGroups []infrav1.SecurityGroupFilter + }{ + { + name: "Control plane machine with control plane security group", + openStackCluster: func() *infrav1.OpenStackCluster { + c := getDefaultOpenStackCluster() + c.Status.ControlPlaneSecurityGroup = &infrav1.SecurityGroup{ID: controlPlaneSecurityGroupUUID} + return c + }, + machine: func() *clusterv1.Machine { + m := getDefaultMachine() + m.Labels = map[string]string{ + clusterv1.MachineControlPlaneLabel: "true", + } + return m + }, + openStackMachine: getDefaultOpenStackMachine, + wantSecurityGroups: []infrav1.SecurityGroupFilter{ + {ID: controlPlaneSecurityGroupUUID}, + }, + }, + { + name: "Worker machine with worker security group", + openStackCluster: func() *infrav1.OpenStackCluster { + c := getDefaultOpenStackCluster() + c.Status.WorkerSecurityGroup = &infrav1.SecurityGroup{ID: workerSecurityGroupUUID} + return c + }, + machine: getDefaultMachine, + openStackMachine: getDefaultOpenStackMachine, + wantSecurityGroups: []infrav1.SecurityGroupFilter{ + {ID: workerSecurityGroupUUID}, + }, + }, + { + name: "Control plane machine without control plane security group", + openStackCluster: func() *infrav1.OpenStackCluster { + c := getDefaultOpenStackCluster() + c.Status.ControlPlaneSecurityGroup = nil + return c + }, + machine: func() *clusterv1.Machine { + m := getDefaultMachine() + m.Labels = map[string]string{ + clusterv1.MachineControlPlaneLabel: "true", + } + return m + }, + openStackMachine: getDefaultOpenStackMachine, + wantSecurityGroups: []infrav1.SecurityGroupFilter{}, + }, + { + name: "Worker machine without worker security group", + openStackCluster: func() *infrav1.OpenStackCluster { + c := getDefaultOpenStackCluster() + c.Status.WorkerSecurityGroup = nil + return c + }, + machine: getDefaultMachine, + openStackMachine: getDefaultOpenStackMachine, + wantSecurityGroups: []infrav1.SecurityGroupFilter{}, + }, + { + name: "Machine with additional security groups", + openStackCluster: func() *infrav1.OpenStackCluster { + c := getDefaultOpenStackCluster() + c.Status.ControlPlaneSecurityGroup = &infrav1.SecurityGroup{ID: controlPlaneSecurityGroupUUID} + c.Status.WorkerSecurityGroup = &infrav1.SecurityGroup{ID: workerSecurityGroupUUID} + return c + }, + machine: getDefaultMachine, + openStackMachine: func() *infrav1.OpenStackMachine { + m := getDefaultOpenStackMachine() + m.Spec.SecurityGroups = []infrav1.SecurityGroupFilter{{ID: extraSecurityGroupUUID}} + return m + }, + wantSecurityGroups: []infrav1.SecurityGroupFilter{ + {ID: extraSecurityGroupUUID}, + {ID: workerSecurityGroupUUID}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotMachineSecurity := getManagedSecurityGroups(tt.openStackCluster(), tt.machine(), tt.openStackMachine()) + if !reflect.DeepEqual(gotMachineSecurity, tt.wantSecurityGroups) { + t.Errorf("getManagedSecurityGroups() = %v, want %v", gotMachineSecurity, tt.wantSecurityGroups) + } + }) + } +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index b0edc98d5f..1cb8a8ba32 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -151,7 +151,7 @@ var _ = Describe("When calling getOrCreate", func() { openStackMachine := &infrav1.OpenStackMachine{} mockScopeFactory.ComputeClient.EXPECT().ListServers(gomock.Any()).Return(nil, errors.New("Test error when listing servers")) - instanceStatus, err := reconsiler.getOrCreate(logger, cluster, openStackCluster, machine, openStackMachine, computeService, "") + instanceStatus, err := reconsiler.getOrCreateInstance(logger, cluster, openStackCluster, machine, openStackMachine, computeService, "", []string{}) Expect(err).To(HaveOccurred()) Expect(instanceStatus).To(BeNil()) conditions := openStackMachine.GetConditions() diff --git a/pkg/clients/networking.go b/pkg/clients/networking.go index 23ad3c8fdf..e5f609c74d 100644 --- a/pkg/clients/networking.go +++ b/pkg/clients/networking.go @@ -36,6 +36,11 @@ import ( "sigs.k8s.io/cluster-api-provider-openstack/pkg/metrics" ) +// PortExt is the base gophercloud Port with extensions used by PortStatus. +type PortExt struct { + ports.Port +} + type NetworkClient interface { ListFloatingIP(opts floatingips.ListOptsBuilder) ([]floatingips.FloatingIP, error) CreateFloatingIP(opts floatingips.CreateOptsBuilder) (*floatingips.FloatingIP, error) diff --git a/pkg/cloud/services/compute/instance.go b/pkg/cloud/services/compute/instance.go index c6b70af87e..09a531b2bd 100644 --- a/pkg/cloud/services/compute/instance.go +++ b/pkg/cloud/services/compute/instance.go @@ -90,7 +90,6 @@ func (s *Service) normalizePortTarget(port *infrav1.PortOpts, openStackCluster * if fixedIP.Subnet == nil { continue } - subnet, err := networkingService.GetSubnetByFilter(fixedIP.Subnet) if err != nil { // Multiple matches might be ok later when we restrict matches to a single network @@ -221,8 +220,8 @@ func (s *Service) constructPorts(openStackCluster *infrav1.OpenStackCluster, ins return ports, nil } -func (s *Service) CreateInstance(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string) (*InstanceStatus, error) { - return s.createInstanceImpl(eventObject, openStackCluster, instanceSpec, clusterName, retryIntervalInstanceStatus) +func (s *Service) CreateInstance(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string, portIDs []string) (*InstanceStatus, error) { + return s.createInstanceImpl(eventObject, openStackCluster, instanceSpec, clusterName, retryIntervalInstanceStatus, portIDs) } func (s *Service) getAndValidateFlavor(flavorName string) (*flavors.Flavor, error) { @@ -234,7 +233,7 @@ func (s *Service) getAndValidateFlavor(flavorName string) (*flavors.Flavor, erro return f, nil } -func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string, retryInterval time.Duration) (*InstanceStatus, error) { +func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluster *infrav1.OpenStackCluster, instanceSpec *InstanceSpec, clusterName string, retryInterval time.Duration, portIDs []string) (*InstanceStatus, error) { var server *clients.ServerExt portList := []servers.Network{} @@ -254,41 +253,50 @@ func (s *Service) createInstanceImpl(eventObject runtime.Object, openStackCluste return } + // TODO(emilien) we probably want to move that in port management if err := s.deletePorts(eventObject, portList); err != nil { s.scope.Logger().V(4).Error(err, "Failed to clean up ports after failure") } }() - ports, err := s.constructPorts(openStackCluster, instanceSpec) - if err != nil { - return nil, err - } - - networkingService, err := s.getNetworkingService() - if err != nil { - return nil, err - } - - securityGroups, err := networkingService.GetSecurityGroups(instanceSpec.SecurityGroups) - if err != nil { - return nil, fmt.Errorf("error getting security groups: %v", err) - } - - for i := range ports { - portOpts := &ports[i] - iTags := []string{} - if len(instanceSpec.Tags) > 0 { - iTags = instanceSpec.Tags + if len(portIDs) == 0 { + ports, err := s.constructPorts(openStackCluster, instanceSpec) + if err != nil { + return nil, err } - portName := networking.GetPortName(instanceSpec.Name, portOpts, i) - port, err := networkingService.GetOrCreatePort(eventObject, clusterName, portName, portOpts, securityGroups, iTags) + + networkingService, err := s.getNetworkingService() if err != nil { return nil, err } - portList = append(portList, servers.Network{ - Port: port.ID, - }) + securityGroups, err := networkingService.GetSecurityGroups(instanceSpec.SecurityGroups) + if err != nil { + return nil, fmt.Errorf("error getting security groups: %v", err) + } + + for i := range ports { + portOpts := &ports[i] + iTags := []string{} + if len(instanceSpec.Tags) > 0 { + iTags = instanceSpec.Tags + } + portName := networking.GetPortName(instanceSpec.Name, portOpts, i) + port, err := networkingService.GetOrCreatePort(eventObject, clusterName, portName, portOpts, securityGroups, iTags) + if err != nil { + return nil, err + } + + portList = append(portList, servers.Network{ + Port: port.ID, + }) + } + } else { + for _, portID := range portIDs { + portList = append(portList, servers.Network{ + Port: portID, + }) + } } instanceCreateTimeout := getTimeout("CLUSTER_API_OPENSTACK_INSTANCE_CREATE_TIMEOUT", timeoutInstanceCreate) diff --git a/pkg/cloud/services/compute/instance_test.go b/pkg/cloud/services/compute/instance_test.go index cafc9e3edc..b8e33b8c8d 100644 --- a/pkg/cloud/services/compute/instance_test.go +++ b/pkg/cloud/services/compute/instance_test.go @@ -48,7 +48,6 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7" "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients" "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients/mock" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking" "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" ) @@ -77,47 +76,6 @@ func (m *gomegaMockMatcher) Matches(x interface{}) bool { return success } -func Test_getPortName(t *testing.T) { - type args struct { - instanceName string - opts *infrav1.PortOpts - netIndex int - } - tests := []struct { - name string - args args - want string - }{ - { - name: "with nil PortOpts", - args: args{"test-1-instance", nil, 2}, - want: "test-1-instance-2", - }, - { - name: "with PortOpts name suffix", - args: args{"test-1-instance", &infrav1.PortOpts{NameSuffix: "foo"}, 4}, - want: "test-1-instance-foo", - }, - { - name: "without PortOpts name suffix", - args: args{"test-1-instance", &infrav1.PortOpts{}, 4}, - want: "test-1-instance-4", - }, - { - name: "with PortOpts name suffix", - args: args{"test-1-instance", &infrav1.PortOpts{NameSuffix: "foo2", Network: &infrav1.NetworkFilter{ID: "bar"}, DisablePortSecurity: pointer.Bool(true)}, 4}, - want: "test-1-instance-foo2", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := networking.GetPortName(tt.args.instanceName, tt.args.opts, tt.args.netIndex); got != tt.want { - t.Errorf("getPortName() = %v, want %v", got, tt.want) - } - }) - } -} - func TestService_getImageID(t *testing.T) { const imageIDA = "ce96e584-7ebc-46d6-9e55-987d72e3806c" const imageIDB = "8f536889-5198-42d7-8314-cb78f4f4755c" @@ -976,7 +934,7 @@ func TestService_ReconcileInstance(t *testing.T) { } // Call CreateInstance with a reduced retry interval to speed up the test - _, err = s.createInstanceImpl(&infrav1.OpenStackMachine{}, getDefaultOpenStackCluster(), tt.getInstanceSpec(), "cluster-name", time.Nanosecond) + _, err = s.createInstanceImpl(&infrav1.OpenStackMachine{}, getDefaultOpenStackCluster(), tt.getInstanceSpec(), "cluster-name", time.Nanosecond, []string{}) if (err != nil) != tt.wantErr { t.Errorf("Service.CreateInstance() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/pkg/cloud/services/networking/port.go b/pkg/cloud/services/networking/port.go index e3e9b0d8dd..56fb2efc51 100644 --- a/pkg/cloud/services/networking/port.go +++ b/pkg/cloud/services/networking/port.go @@ -18,6 +18,7 @@ package networking import ( "context" + "errors" "fmt" "strings" "time" @@ -330,3 +331,211 @@ func GetPortName(instanceName string, opts *infrav1.PortOpts, netIndex int) stri } return fmt.Sprintf("%s-%d", instanceName, netIndex) } + +func (s *Service) CreatePorts(eventObject runtime.Object, clusterName string, openStackCluster *infrav1.OpenStackCluster, ports []infrav1.PortOpts, trunkEnabled bool, securityGroups []infrav1.SecurityGroupFilter, instanceTags []string, instanceName string) (*[]infrav1.PortStatus, error) { + return s.createPortsImpl(eventObject, clusterName, openStackCluster, ports, trunkEnabled, securityGroups, instanceTags, instanceName) +} + +func (s *Service) createPortsImpl(eventObject runtime.Object, clusterName string, openStackCluster *infrav1.OpenStackCluster, ports []infrav1.PortOpts, trunkEnabled bool, securityGroups []infrav1.SecurityGroupFilter, instanceTags []string, instanceName string) (*[]infrav1.PortStatus, error) { + instancePorts, err := s.constructPorts(openStackCluster, ports, trunkEnabled) + if err != nil { + return nil, err + } + + instanceSecurityGroups, err := s.GetSecurityGroups(securityGroups) + if err != nil { + return nil, fmt.Errorf("error getting security groups: %v", err) + } + + portsStatus := make([]infrav1.PortStatus, 0, len(instancePorts)) + + for i := range instancePorts { + portOpts := &instancePorts[i] + iTags := []string{} + if len(instanceTags) > 0 { + iTags = instanceTags + } + portName := GetPortName(instanceName, portOpts, i) + port, err := s.GetOrCreatePort(eventObject, clusterName, portName, portOpts, instanceSecurityGroups, iTags) + if err != nil { + record.Warnf(eventObject, "FailedCreatePort", "Failed to create port; name=%s err=%v", portName, err) + return nil, err + } + record.Eventf(eventObject, "SuccessfulCreatePort", "Created port; id=%s", port.ID) + + portsStatus = append(portsStatus, infrav1.PortStatus{ + ID: port.ID, + }) + } + + return &portsStatus, nil +} + +func (s *Service) constructPorts(openStackCluster *infrav1.OpenStackCluster, ports []infrav1.PortOpts, trunkEnabled bool) ([]infrav1.PortOpts, error) { + // Ensure user-specified ports have all required fields + ports, err := s.normalizePorts(ports, openStackCluster, trunkEnabled) + if err != nil { + return nil, err + } + + // no networks or ports found in the spec, so create a port on the cluster network + if len(ports) == 0 { + port := infrav1.PortOpts{ + Network: &infrav1.NetworkFilter{ + ID: openStackCluster.Status.Network.ID, + }, + Trunk: &trunkEnabled, + } + for _, subnet := range openStackCluster.Status.Network.Subnets { + port.FixedIPs = append(port.FixedIPs, infrav1.FixedIP{ + Subnet: &infrav1.SubnetFilter{ + ID: subnet.ID, + }, + }) + } + ports = []infrav1.PortOpts{port} + } + + // trunk support is required if any port has trunk enabled + portUsesTrunk := func() bool { + for _, port := range ports { + if port.Trunk != nil && *port.Trunk { + return true + } + } + return false + } + if portUsesTrunk() { + trunkSupported, err := s.isTrunkExtSupported() + if err != nil { + return nil, err + } + if !trunkSupported { + return nil, fmt.Errorf("there is no trunk support. please ensure that the trunk extension is enabled in your OpenStack deployment") + } + } + + return ports, nil + +} + +func (s *Service) normalizePorts(ports []infrav1.PortOpts, openStackCluster *infrav1.OpenStackCluster, trunkEnabled bool) ([]infrav1.PortOpts, error) { + normalizedPorts := make([]infrav1.PortOpts, 0, len(ports)) + for i := range ports { + // Deep copy the port to avoid mutating the original + port := ports[i].DeepCopy() + + // No Trunk field specified for the port, inherit the machine default + if port.Trunk == nil { + port.Trunk = &trunkEnabled + } + + if err := s.normalizePortTarget(port, openStackCluster, i); err != nil { + return nil, err + } + + normalizedPorts = append(normalizedPorts, *port) + } + return normalizedPorts, nil +} + +func (s *Service) normalizePortTarget(port *infrav1.PortOpts, openStackCluster *infrav1.OpenStackCluster, portIdx int) error { + // Treat no Network and empty Network the same + noNetwork := port.Network == nil || (*port.Network == infrav1.NetworkFilter{}) + + // No network or subnets defined: use cluster defaults + if noNetwork && len(port.FixedIPs) == 0 { + port.Network = &infrav1.NetworkFilter{ + ID: openStackCluster.Status.Network.ID, + } + for _, subnet := range openStackCluster.Status.Network.Subnets { + port.FixedIPs = append(port.FixedIPs, infrav1.FixedIP{ + Subnet: &infrav1.SubnetFilter{ + ID: subnet.ID, + }, + }) + } + + return nil + } + + // No network, but fixed IPs are defined(we handled the no fixed + // IPs case above): try to infer network from a subnet + if noNetwork { + s.scope.Logger().V(4).Info("No network defined for port, attempting to infer from subnet", "port", portIdx) + + // Look for a unique subnet defined in FixedIPs. If we find one + // we can use it to infer the network ID. We don't need to worry + // here about the case where different FixedIPs have different + // networks because that will cause an error later when we try + // to create the port. + networkID, err := func() (string, error) { + for i, fixedIP := range port.FixedIPs { + if fixedIP.Subnet == nil { + continue + } + + subnet, err := s.GetSubnetByFilter(fixedIP.Subnet) + if err != nil { + // Multiple matches might be ok later when we restrict matches to a single network + if errors.Is(err, ErrMultipleMatches) { + s.scope.Logger().V(4).Info("Couldn't infer network from subnet", "subnetIndex", i, "err", err) + continue + } + + return "", err + } + + // Cache the subnet ID in the FixedIP + fixedIP.Subnet.ID = subnet.ID + return subnet.NetworkID, nil + } + + // TODO: This is a spec error: it should set the machine to failed + return "", fmt.Errorf("port %d has no network and unable to infer from fixed IPs", portIdx) + }() + if err != nil { + return err + } + + port.Network = &infrav1.NetworkFilter{ + ID: networkID, + } + + return nil + } + + // Nothing to do if network ID is already set + if port.Network.ID != "" { + return nil + } + + // Network is defined by Filter + netIDs, err := s.GetNetworkIDsByFilter(port.Network.ToListOpt()) + if err != nil { + return err + } + + // TODO: These are spec errors: they should set the machine to failed + if len(netIDs) > 1 { + return fmt.Errorf("network filter for port %d returns more than one result", portIdx) + } else if len(netIDs) == 0 { + return fmt.Errorf("network filter for port %d returns no networks", portIdx) + } + + port.Network.ID = netIDs[0] + + return nil +} + +// isTrunkExtSupported verifies trunk setup on the OpenStack deployment. +func (s *Service) isTrunkExtSupported() (trunknSupported bool, err error) { + trunkSupport, err := s.GetTrunkSupport() + if err != nil { + return false, fmt.Errorf("there was an issue verifying whether trunk support is available, Please try again later: %v", err) + } + if !trunkSupport { + return false, nil + } + return true, nil +} diff --git a/pkg/cloud/services/networking/port_test.go b/pkg/cloud/services/networking/port_test.go index d0e2f9ec91..77d8a50c4b 100644 --- a/pkg/cloud/services/networking/port_test.go +++ b/pkg/cloud/services/networking/port_test.go @@ -19,17 +19,22 @@ package networking import ( "testing" + "github.com/go-logr/logr" "github.com/golang/mock/gomock" + "github.com/google/go-cmp/cmp" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" . "github.com/onsi/gomega" + "k8s.io/utils/pointer" infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha7" "sigs.k8s.io/cluster-api-provider-openstack/pkg/clients/mock" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope" ) func Test_GetOrCreatePort(t *testing.T) { @@ -618,3 +623,403 @@ func Test_GarbageCollectErrorInstancesPort(t *testing.T) { func pointerTo(b bool) *bool { return &b } + +func TestService_normalizePorts(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + const ( + defaultNetworkID = "3c66f3ca-2d26-4d9d-ae3b-568f54129773" + defaultSubnetID = "d8dbba89-8c39-4192-a571-e702fca35bac" + + networkID = "afa54944-1443-4132-9ef5-ce37eb4d6ab6" + subnetID = "d786e715-c299-4a97-911d-640c10fc0392" + ) + + openStackCluster := &infrav1.OpenStackCluster{ + Status: infrav1.OpenStackClusterStatus{ + Network: &infrav1.NetworkStatusWithSubnets{ + NetworkStatus: infrav1.NetworkStatus{ + ID: defaultNetworkID, + }, + Subnets: []infrav1.Subnet{ + {ID: defaultSubnetID}, + }, + }, + }, + } + + tests := []struct { + name string + ports []infrav1.PortOpts + instanceTrunk bool + expectNetwork func(m *mock.MockNetworkClientMockRecorder) + want []infrav1.PortOpts + wantErr bool + }{ + { + name: "No ports: no ports", + ports: []infrav1.PortOpts{}, + want: []infrav1.PortOpts{}, + }, + { + name: "Nil network, no fixed IPs: cluster defaults", + ports: []infrav1.PortOpts{ + { + Network: nil, + FixedIPs: nil, + }, + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: defaultNetworkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: defaultSubnetID, + }, + }, + }, + Trunk: pointer.Bool(false), + }, + }, + }, + { + name: "Empty network, no fixed IPs: cluster defaults", + ports: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{}, + FixedIPs: nil, + }, + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: defaultNetworkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: defaultSubnetID, + }, + }, + }, + Trunk: pointer.Bool(false), + }, + }, + }, + { + name: "Port inherits trunk from instance", + ports: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{}, + FixedIPs: nil, + }, + }, + instanceTrunk: true, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: defaultNetworkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: defaultSubnetID, + }, + }, + }, + Trunk: pointer.Bool(true), + }, + }, + }, + { + name: "Port overrides trunk from instance", + ports: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{}, + FixedIPs: nil, + Trunk: pointer.Bool(true), + }, + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: defaultNetworkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: defaultSubnetID, + }, + }, + }, + Trunk: pointer.Bool(true), + }, + }, + }, + { + name: "Network defined by ID: unchanged", + ports: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: networkID, + }, + }, + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: networkID, + }, + Trunk: pointer.Bool(false), + }, + }, + }, + { + name: "Network defined by filter: add ID from network lookup", + ports: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + Name: "test-network", + }, + }, + }, + expectNetwork: func(m *mock.MockNetworkClientMockRecorder) { + m.ListNetwork(networks.ListOpts{Name: "test-network"}).Return([]networks.Network{ + {ID: networkID}, + }, nil) + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: networkID, + Name: "test-network", + }, + Trunk: pointer.Bool(false), + }, + }, + }, + { + name: "No network, fixed IP has subnet by ID: add ID from subnet", + ports: []infrav1.PortOpts{ + { + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: subnetID, + }, + }, + }, + }, + }, + expectNetwork: func(m *mock.MockNetworkClientMockRecorder) { + m.GetSubnet(subnetID).Return(&subnets.Subnet{ID: subnetID, NetworkID: networkID}, nil) + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: networkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: subnetID, + }, + }, + }, + Trunk: pointer.Bool(false), + }, + }, + }, + { + name: "No network, fixed IP has subnet by filter: add ID from subnet", + ports: []infrav1.PortOpts{ + { + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + Name: "test-subnet", + }, + }, + }, + }, + }, + expectNetwork: func(m *mock.MockNetworkClientMockRecorder) { + m.ListSubnet(subnets.ListOpts{Name: "test-subnet"}).Return([]subnets.Subnet{ + {ID: subnetID, NetworkID: networkID}, + }, nil) + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: networkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + ID: subnetID, + Name: "test-subnet", + }, + }, + }, + Trunk: pointer.Bool(false), + }, + }, + }, + { + name: "No network, fixed IP subnet returns no matches: error", + ports: []infrav1.PortOpts{ + { + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + Name: "test-subnet", + }, + }, + }, + }, + }, + expectNetwork: func(m *mock.MockNetworkClientMockRecorder) { + m.ListSubnet(subnets.ListOpts{Name: "test-subnet"}).Return([]subnets.Subnet{}, nil) + }, + wantErr: true, + }, + { + name: "No network, only fixed IP subnet returns multiple matches: error", + ports: []infrav1.PortOpts{ + { + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + Name: "test-subnet", + }, + }, + }, + }, + }, + expectNetwork: func(m *mock.MockNetworkClientMockRecorder) { + m.ListSubnet(subnets.ListOpts{Name: "test-subnet"}).Return([]subnets.Subnet{ + {ID: subnetID, NetworkID: networkID}, + {ID: "8008494c-301e-4e5c-951b-a8ab568447fd", NetworkID: "5d48bfda-db28-42ee-8374-50e13d1fe5ea"}, + }, nil) + }, + wantErr: true, + }, + { + name: "No network, first fixed IP subnet returns multiple matches: used ID from second fixed IP", + ports: []infrav1.PortOpts{ + { + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + Name: "test-subnet1", + }, + }, + { + Subnet: &infrav1.SubnetFilter{ + Name: "test-subnet2", + }, + }, + }, + }, + }, + expectNetwork: func(m *mock.MockNetworkClientMockRecorder) { + m.ListSubnet(subnets.ListOpts{Name: "test-subnet1"}).Return([]subnets.Subnet{ + {ID: subnetID, NetworkID: networkID}, + {ID: "8008494c-301e-4e5c-951b-a8ab568447fd", NetworkID: "5d48bfda-db28-42ee-8374-50e13d1fe5ea"}, + }, nil) + m.ListSubnet(subnets.ListOpts{Name: "test-subnet2"}).Return([]subnets.Subnet{ + {ID: subnetID, NetworkID: networkID}, + }, nil) + }, + want: []infrav1.PortOpts{ + { + Network: &infrav1.NetworkFilter{ + ID: networkID, + }, + FixedIPs: []infrav1.FixedIP{ + { + Subnet: &infrav1.SubnetFilter{ + Name: "test-subnet1", + }, + }, + { + Subnet: &infrav1.SubnetFilter{ + ID: subnetID, + Name: "test-subnet2", + }, + }, + }, + Trunk: pointer.Bool(false), + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + mockClient := mock.NewMockNetworkClient(mockCtrl) + if tt.expectNetwork != nil { + tt.expectNetwork(mockClient.EXPECT()) + } + s := Service{ + client: mockClient, + scope: scope.NewMockScopeFactory(mockCtrl, "", logr.Discard()), + } + + got, err := s.normalizePorts(tt.ports, openStackCluster, tt.instanceTrunk) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + return + } + + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(got).To(Equal(tt.want), cmp.Diff(got, tt.want)) + }) + } +} + +func Test_getPortName(t *testing.T) { + type args struct { + instanceName string + opts *infrav1.PortOpts + netIndex int + } + tests := []struct { + name string + args args + want string + }{ + { + name: "with nil PortOpts", + args: args{"test-1-instance", nil, 2}, + want: "test-1-instance-2", + }, + { + name: "with PortOpts name suffix", + args: args{"test-1-instance", &infrav1.PortOpts{NameSuffix: "foo"}, 4}, + want: "test-1-instance-foo", + }, + { + name: "without PortOpts name suffix", + args: args{"test-1-instance", &infrav1.PortOpts{}, 4}, + want: "test-1-instance-4", + }, + { + name: "with PortOpts name suffix", + args: args{"test-1-instance", &infrav1.PortOpts{NameSuffix: "foo2", Network: &infrav1.NetworkFilter{ID: "bar"}, DisablePortSecurity: pointer.Bool(true)}, 4}, + want: "test-1-instance-foo2", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := GetPortName(tt.args.instanceName, tt.args.opts, tt.args.netIndex); got != tt.want { + t.Errorf("getPortName() = %v, want %v", got, tt.want) + } + }) + } +}