diff --git a/controllers/vspheremachine_controller.go b/controllers/vspheremachine_controller.go index 26ffb1394c..a189492f7e 100644 --- a/controllers/vspheremachine_controller.go +++ b/controllers/vspheremachine_controller.go @@ -31,6 +31,7 @@ import ( apitypes "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/klog/v2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterutilv1 "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" @@ -61,7 +62,11 @@ import ( "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" ) -const hostInfoErrStr = "host info cannot be used as a label value" +const ( + hostInfoErrStr = "host info cannot be used as a label value" + // VSphereMachineControllerName is the name of the vSphere machine controller. + VSphereMachineControllerName = "vspheremachine-controller" +) // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vspheremachines,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vspheremachines/status,verbs=get;update;patch @@ -97,15 +102,8 @@ func AddMachineControllerToManager(ctx context.Context, controllerManagerContext controllerNameLong = fmt.Sprintf("%s/%s/%s", controllerManagerContext.Namespace, controllerManagerContext.Name, controllerNameShort) } - // Build the controller context. - controllerContext := &capvcontext.ControllerContext{ - ControllerManagerContext: controllerManagerContext, - Name: controllerNameShort, - Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)), - Logger: controllerManagerContext.Logger.WithName(controllerNameShort), - } - builder := ctrl.NewControllerManagedBy(mgr). + Named(VSphereMachineControllerName). // Watch the controlled, infrastructure resource. For(controlledType). WithOptions(options). @@ -126,15 +124,16 @@ func AddMachineControllerToManager(ctx context.Context, controllerManagerContext WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), controllerManagerContext.WatchFilterValue)) r := &machineReconciler{ - ControllerContext: controllerContext, - VMService: &services.VimMachineService{}, - supervisorBased: supervisorBased, + Client: controllerManagerContext.Client, + Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)), + VMService: &services.VimMachineService{Client: controllerManagerContext.Client}, + supervisorBased: supervisorBased, } if supervisorBased { // Watch any VirtualMachine resources owned by this VSphereMachine builder.Owns(&vmoprv1.VirtualMachine{}) - r.VMService = &vmoperator.VmopMachineService{} + r.VMService = &vmoperator.VmopMachineService{Client: controllerManagerContext.Client} networkProvider, err := inframanager.GetNetworkProvider(ctx, controllerManagerContext.Client, controllerManagerContext.NetworkProvider) if err != nil { return errors.Wrap(err, "failed to create a network provider") @@ -160,7 +159,7 @@ func AddMachineControllerToManager(ctx context.Context, controllerManagerContext &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(r.clusterToVSphereMachines), ctrlbldr.WithPredicates( - predicates.ClusterUnpausedAndInfrastructureReady(r.Logger), + predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)), ), ) } @@ -169,7 +168,8 @@ func AddMachineControllerToManager(ctx context.Context, controllerManagerContext } type machineReconciler struct { - *capvcontext.ControllerContext + Client client.Client + Recorder record.Recorder VMService services.VSphereMachineService networkProvider services.NetworkProvider supervisorBased bool @@ -178,11 +178,11 @@ type machineReconciler struct { // Reconcile ensures the back-end state reflects the Kubernetes resource state intent. func (r *machineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { var machineContext capvcontext.MachineContext - logger := ctrl.LoggerFrom(ctx) - logger.V(4).Info("Starting Reconcile") + log := ctrl.LoggerFrom(ctx) + log.V(4).Info("Starting Reconcile") // Fetch VSphereMachine object and populate the machine context - machineContext, err := r.VMService.FetchVSphereMachine(r.Client, req.NamespacedName) + machineContext, err := r.VMService.FetchVSphereMachine(ctx, req.NamespacedName) if err != nil { if apierrors.IsNotFound(err) { return reconcile.Result{}, nil @@ -191,15 +191,17 @@ func (r *machineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ } // Fetch the CAPI Machine and CAPI Cluster. - machine, err := clusterutilv1.GetOwnerMachine(r, r.Client, machineContext.GetObjectMeta()) + machine, err := clusterutilv1.GetOwnerMachine(ctx, r.Client, machineContext.GetObjectMeta()) if err != nil { return reconcile.Result{}, err } if machine == nil { - logger.V(2).Info("waiting on Machine controller to set OwnerRef on infra machine") + log.Info("Waiting on Machine controller to set OwnerRef on VSphereMachine") return reconcile.Result{}, nil } + log = log.WithValues("Machine", klog.KObj(machine)) + ctx = ctrl.LoggerInto(ctx, log) cluster := r.fetchCAPICluster(ctx, machine, machineContext.GetVSphereMachine()) // Create the patch helper. @@ -212,11 +214,9 @@ func (r *machineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ machineContext.GetObjectMeta().Name) } machineContext.SetBaseMachineContext(&capvcontext.BaseMachineContext{ - ControllerContext: r.ControllerContext, - Cluster: cluster, - Machine: machine, - Logger: logger, - PatchHelper: patchHelper, + Cluster: cluster, + Machine: machine, + PatchHelper: patchHelper, }) // always patch the VSphereMachine object defer func() { @@ -228,7 +228,7 @@ func (r *machineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ) // Patch the VSphereMachine resource. - if err := machineContext.Patch(); err != nil { + if err := machineContext.Patch(ctx); err != nil { reterr = kerrors.NewAggregate([]error{reterr, err}) } }() @@ -243,9 +243,9 @@ func (r *machineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ } // Fetch the VSphereCluster and update the machine context - machineContext, err = r.VMService.FetchVSphereCluster(r.Client, cluster, machineContext) + machineContext, err = r.VMService.FetchVSphereCluster(ctx, cluster, machineContext) if err != nil { - logger.Info("unable to retrieve VSphereCluster", "error", err) + log.Error(err, "unable to retrieve VSphereCluster") return reconcile.Result{}, nil } @@ -261,10 +261,11 @@ func (r *machineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ func (r *machineReconciler) reconcileDelete(ctx context.Context, machineCtx capvcontext.MachineContext) (reconcile.Result, error) { log := ctrl.LoggerFrom(ctx) + log.Info("Handling deleted VSphereMachine") conditions.MarkFalse(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") - if err := r.VMService.ReconcileDelete(machineCtx); err != nil { + if err := r.VMService.ReconcileDelete(ctx, machineCtx); err != nil { if apierrors.IsNotFound(err) { // The VM is deleted so remove the finalizer. ctrlutil.RemoveFinalizer(machineCtx.GetVSphereMachine(), infrav1.MachineFinalizer) @@ -280,14 +281,15 @@ func (r *machineReconciler) reconcileDelete(ctx context.Context, machineCtx capv func (r *machineReconciler) reconcileNormal(ctx context.Context, machineCtx capvcontext.MachineContext) (reconcile.Result, error) { log := ctrl.LoggerFrom(ctx) - machineFailed, err := r.VMService.SyncFailureReason(machineCtx) + + machineFailed, err := r.VMService.SyncFailureReason(ctx, machineCtx) if err != nil && !apierrors.IsNotFound(err) { return reconcile.Result{}, err } // If the VSphereMachine is in an error state, return early. if machineFailed { - log.Info("Error state detected, skipping reconciliation") + log.Error(err, "Error state detected, skipping reconciliation") return reconcile.Result{}, nil } @@ -318,7 +320,7 @@ func (r *machineReconciler) reconcileNormal(ctx context.Context, machineCtx capv return reconcile.Result{}, nil } - requeue, err := r.VMService.ReconcileNormal(machineCtx) + requeue, err := r.VMService.ReconcileNormal(ctx, machineCtx) if err != nil { return reconcile.Result{}, err } else if requeue { @@ -343,7 +345,8 @@ func (r *machineReconciler) reconcileNormal(ctx context.Context, machineCtx capv // which would be added onto the node by the CAPI controllers. func (r *machineReconciler) patchMachineLabelsWithHostInfo(ctx context.Context, machineCtx capvcontext.MachineContext) error { log := ctrl.LoggerFrom(ctx) - hostInfo, err := r.VMService.GetHostInfo(machineCtx) + + hostInfo, err := r.VMService.GetHostInfo(ctx, machineCtx) if err != nil { return err } @@ -366,7 +369,7 @@ func (r *machineReconciler) patchMachineLabelsWithHostInfo(ctx context.Context, labels[constants.ESXiHostInfoLabel] = info machine.Labels = labels - return patchHelper.Patch(r, machine) + return patchHelper.Patch(ctx, machine) } func (r *machineReconciler) clusterToVSphereMachines(ctx context.Context, a client.Object) []reconcile.Request { @@ -389,13 +392,14 @@ func (r *machineReconciler) clusterToVSphereMachines(ctx context.Context, a clie func (r *machineReconciler) fetchCAPICluster(ctx context.Context, machine *clusterv1.Machine, vsphereMachine metav1.Object) *clusterv1.Cluster { log := ctrl.LoggerFrom(ctx) - cluster, err := clusterutilv1.GetClusterFromMetadata(r, r.Client, machine.ObjectMeta) + cluster, err := clusterutilv1.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta) if err != nil { log.Info("Machine is missing cluster label or cluster does not exist") return nil } + log = log.WithValues("Cluster", klog.KObj(cluster)) if annotations.IsPaused(cluster, vsphereMachine) { - log.V(4).Info("VSphereMachine %s/%s linked to a cluster that is paused", vsphereMachine.GetNamespace(), vsphereMachine.GetName()) + log.V(4).Info("VSphereMachine is linked to a cluster that is paused") return nil } diff --git a/pkg/context/fake/fake_machine_context.go b/pkg/context/fake/fake_machine_context.go index 74bdaa8c4b..0949d7cba9 100644 --- a/pkg/context/fake/fake_machine_context.go +++ b/pkg/context/fake/fake_machine_context.go @@ -46,7 +46,6 @@ func NewMachineContext(clusterCtx *capvcontext.ClusterContext, controllerCtx *ca ControllerContext: controllerCtx, Cluster: clusterCtx.Cluster, Machine: &machine, - Logger: controllerCtx.Logger.WithName(vsphereMachine.Name), }, VSphereCluster: clusterCtx.VSphereCluster, VSphereMachine: &vsphereMachine, diff --git a/pkg/context/interfaces.go b/pkg/context/interfaces.go index efe002ef2c..83c8b52026 100644 --- a/pkg/context/interfaces.go +++ b/pkg/context/interfaces.go @@ -17,7 +17,8 @@ limitations under the License. package context import ( - "github.com/go-logr/logr" + "context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" @@ -27,8 +28,7 @@ import ( // MachineContext is the context used in VSphereMachine reconciliation. type MachineContext interface { String() string - Patch() error - GetLogger() logr.Logger + Patch(ctx context.Context) error GetVSphereMachine() VSphereMachine GetObjectMeta() metav1.ObjectMeta GetCluster() *clusterv1.Cluster diff --git a/pkg/context/machine_context.go b/pkg/context/machine_context.go index 370af6e92c..9d1966070f 100644 --- a/pkg/context/machine_context.go +++ b/pkg/context/machine_context.go @@ -17,9 +17,9 @@ limitations under the License. package context import ( + "context" "fmt" - "github.com/go-logr/logr" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/patch" @@ -29,11 +29,10 @@ import ( // BaseMachineContext contains information about a CAPI Machine for VSphereMachine reconciliation. type BaseMachineContext struct { - *ControllerContext - Logger logr.Logger - Cluster *clusterv1.Cluster - Machine *clusterv1.Machine - PatchHelper *patch.Helper + ControllerContext *ControllerContext + Cluster *clusterv1.Cluster + Machine *clusterv1.Machine + PatchHelper *patch.Helper } // GetCluster returns the cluster for the BaseMachineContext. @@ -46,11 +45,6 @@ func (c *BaseMachineContext) GetMachine() *clusterv1.Machine { return c.Machine } -// GetLogger returns this context's logger. -func (c *BaseMachineContext) GetLogger() logr.Logger { - return c.Logger -} - // VIMMachineContext is a Go context used with a VSphereMachine. type VIMMachineContext struct { *BaseMachineContext @@ -64,8 +58,8 @@ func (c *VIMMachineContext) String() string { } // Patch updates the object and its status on the API server. -func (c *VIMMachineContext) Patch() error { - return c.PatchHelper.Patch(c, c.VSphereMachine) +func (c *VIMMachineContext) Patch(ctx context.Context) error { + return c.PatchHelper.Patch(ctx, c.VSphereMachine) } // GetVSphereMachine sets the VSphereMachine for the VIMMachineContext. diff --git a/pkg/context/vmware/machine_context.go b/pkg/context/vmware/machine_context.go index 694ad07dc5..f460bbb95b 100644 --- a/pkg/context/vmware/machine_context.go +++ b/pkg/context/vmware/machine_context.go @@ -17,22 +17,23 @@ limitations under the License. package vmware import ( + "context" "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" ) // VMModifier allows a function to be passed to VM creation to modify its spec // The hook is loosely typed so as to allow for different VirtualMachine backends. type VMModifier func(runtime.Object) (runtime.Object, error) -// SupervisorMachineContext is a Go context used with a VSphereMachine. +// SupervisorMachineContext is a Go capvcontext used with a VSphereMachine. type SupervisorMachineContext struct { - *context.BaseMachineContext + *capvcontext.BaseMachineContext VSphereCluster *vmwarev1.VSphereCluster VSphereMachine *vmwarev1.VSphereMachine VMModifiers []VMModifier @@ -44,12 +45,12 @@ func (c *SupervisorMachineContext) String() string { } // Patch updates the object and its status on the API server. -func (c *SupervisorMachineContext) Patch() error { - return c.PatchHelper.Patch(c, c.VSphereMachine) +func (c *SupervisorMachineContext) Patch(ctx context.Context) error { + return c.PatchHelper.Patch(ctx, c.VSphereMachine) } // GetVSphereMachine returns the VSphereMachine from the SupervisorMachineContext. -func (c *SupervisorMachineContext) GetVSphereMachine() context.VSphereMachine { +func (c *SupervisorMachineContext) GetVSphereMachine() capvcontext.VSphereMachine { return c.VSphereMachine } @@ -67,6 +68,6 @@ func (c *SupervisorMachineContext) GetClusterContext() *ClusterContext { } // SetBaseMachineContext sets the BaseMachineContext for the SupervisorMachineContext. -func (c *SupervisorMachineContext) SetBaseMachineContext(base *context.BaseMachineContext) { +func (c *SupervisorMachineContext) SetBaseMachineContext(base *capvcontext.BaseMachineContext) { c.BaseMachineContext = base } diff --git a/pkg/services/interfaces.go b/pkg/services/interfaces.go index 314455409b..e3b7991968 100644 --- a/pkg/services/interfaces.go +++ b/pkg/services/interfaces.go @@ -23,7 +23,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" @@ -33,12 +32,12 @@ import ( // VSphereMachineService is used for vsphere VM lifecycle and syncing with VSphereMachine types. type VSphereMachineService interface { - FetchVSphereMachine(client client.Client, name types.NamespacedName) (capvcontext.MachineContext, error) - FetchVSphereCluster(client client.Client, cluster *clusterv1.Cluster, machineContext capvcontext.MachineContext) (capvcontext.MachineContext, error) - ReconcileDelete(machineCtx capvcontext.MachineContext) error - SyncFailureReason(machineCtx capvcontext.MachineContext) (bool, error) - ReconcileNormal(machineCtx capvcontext.MachineContext) (bool, error) - GetHostInfo(machineCtx capvcontext.MachineContext) (string, error) + FetchVSphereMachine(ctx context.Context, name types.NamespacedName) (capvcontext.MachineContext, error) + FetchVSphereCluster(ctx context.Context, cluster *clusterv1.Cluster, machineContext capvcontext.MachineContext) (capvcontext.MachineContext, error) + ReconcileDelete(ctx context.Context, machineCtx capvcontext.MachineContext) error + SyncFailureReason(ctx context.Context, machineCtx capvcontext.MachineContext) (bool, error) + ReconcileNormal(ctx context.Context, machineCtx capvcontext.MachineContext) (bool, error) + GetHostInfo(ctx context.Context, machineCtx capvcontext.MachineContext) (string, error) } // VirtualMachineService is a service for creating/updating/deleting virtual diff --git a/pkg/services/services_suite_test.go b/pkg/services/services_suite_test.go index f972f67a55..ec08ac3c33 100644 --- a/pkg/services/services_suite_test.go +++ b/pkg/services/services_suite_test.go @@ -21,8 +21,11 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + ctrl "sigs.k8s.io/controller-runtime" ) +var ctx = ctrl.SetupSignalHandler() + func TestCAPVServices(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "CAPV services tests") diff --git a/pkg/services/vimmachine.go b/pkg/services/vimmachine.go index 7b0426afd6..43db16e6a5 100644 --- a/pkg/services/vimmachine.go +++ b/pkg/services/vimmachine.go @@ -28,10 +28,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" "k8s.io/utils/integer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterutilv1 "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -41,18 +43,20 @@ import ( ) // VimMachineService reconciles VSphere VMs. -type VimMachineService struct{} +type VimMachineService struct { + Client client.Client +} // FetchVSphereMachine returns a new MachineContext containing the vsphereMachine. -func (v *VimMachineService) FetchVSphereMachine(c client.Client, name types.NamespacedName) (capvcontext.MachineContext, error) { +func (v *VimMachineService) FetchVSphereMachine(ctx context.Context, name types.NamespacedName) (capvcontext.MachineContext, error) { vsphereMachine := &infrav1.VSphereMachine{} - err := c.Get(context.Background(), name, vsphereMachine) + err := v.Client.Get(ctx, name, vsphereMachine) return &capvcontext.VIMMachineContext{VSphereMachine: vsphereMachine}, err } // FetchVSphereCluster adds the VSphereCluster associated with the passed cluster to the machineContext. -func (v *VimMachineService) FetchVSphereCluster(c client.Client, cluster *clusterv1.Cluster, machineContext capvcontext.MachineContext) (capvcontext.MachineContext, error) { +func (v *VimMachineService) FetchVSphereCluster(ctx context.Context, cluster *clusterv1.Cluster, machineContext capvcontext.MachineContext) (capvcontext.MachineContext, error) { vimMachineCtx, ok := machineContext.(*capvcontext.VIMMachineContext) if !ok { return nil, errors.New("received unexpected VIMMachineContext type") @@ -62,20 +66,20 @@ func (v *VimMachineService) FetchVSphereCluster(c client.Client, cluster *cluste Namespace: machineContext.GetObjectMeta().Namespace, Name: cluster.Spec.InfrastructureRef.Name, } - err := c.Get(context.Background(), vsphereClusterName, vsphereCluster) + err := v.Client.Get(ctx, vsphereClusterName, vsphereCluster) vimMachineCtx.VSphereCluster = vsphereCluster return vimMachineCtx, err } // ReconcileDelete reconciles delete events for the VSphere VM. -func (v *VimMachineService) ReconcileDelete(machineCtx capvcontext.MachineContext) error { +func (v *VimMachineService) ReconcileDelete(ctx context.Context, machineCtx capvcontext.MachineContext) error { vimMachineCtx, ok := machineCtx.(*capvcontext.VIMMachineContext) if !ok { return errors.New("received unexpected VIMMachineContext type") } - vm, err := v.findVSphereVM(vimMachineCtx) + vm, err := v.findVSphereVM(ctx, vimMachineCtx) // Attempt to find the associated VSphereVM resource. if err != nil { return err @@ -84,7 +88,7 @@ func (v *VimMachineService) ReconcileDelete(machineCtx capvcontext.MachineContex if vm != nil && vm.GetDeletionTimestamp().IsZero() { // If the VSphereVM was found and it's not already enqueued for // deletion, go ahead and attempt to delete it. - if err := vimMachineCtx.Client.Delete(vimMachineCtx, vm); err != nil { + if err := v.Client.Delete(ctx, vm); err != nil { return err } } @@ -96,13 +100,13 @@ func (v *VimMachineService) ReconcileDelete(machineCtx capvcontext.MachineContex } // SyncFailureReason returns true if the VSphere Machine has failed. -func (v *VimMachineService) SyncFailureReason(machineCtx capvcontext.MachineContext) (bool, error) { +func (v *VimMachineService) SyncFailureReason(ctx context.Context, machineCtx capvcontext.MachineContext) (bool, error) { vimMachineCtx, ok := machineCtx.(*capvcontext.VIMMachineContext) if !ok { return false, errors.New("received unexpected VIMMachineContext type") } - vsphereVM, err := v.findVSphereVM(vimMachineCtx) + vsphereVM, err := v.findVSphereVM(ctx, vimMachineCtx) if err != nil { return false, err } @@ -116,25 +120,27 @@ func (v *VimMachineService) SyncFailureReason(machineCtx capvcontext.MachineCont } // ReconcileNormal reconciles create and update events for the VSphere VM. -func (v *VimMachineService) ReconcileNormal(machineCtx capvcontext.MachineContext) (bool, error) { +func (v *VimMachineService) ReconcileNormal(ctx context.Context, machineCtx capvcontext.MachineContext) (bool, error) { + log := ctrl.LoggerFrom(ctx) vimMachineCtx, ok := machineCtx.(*capvcontext.VIMMachineContext) if !ok { return false, errors.New("received unexpected VIMMachineContext type") } - vsphereVM, err := v.findVSphereVM(vimMachineCtx) + vsphereVM, err := v.findVSphereVM(ctx, vimMachineCtx) if err != nil && !apierrors.IsNotFound(err) { return false, err } - vm, err := v.createOrPatchVSphereVM(vimMachineCtx, vsphereVM) + log = log.WithValues("VSphereVM", klog.KObj(vsphereVM)) + ctx = ctrl.LoggerInto(ctx, log) + vm, err := v.createOrPatchVSphereVM(ctx, vimMachineCtx, vsphereVM) if err != nil { - vimMachineCtx.Logger.Error(err, "error creating or patching VM", "vsphereVM", vsphereVM) - return false, err + log.Error(err, "error creating or patching VM") } // Waits the VM's ready state. if !vm.Status.Ready { - vimMachineCtx.Logger.Info("waiting for ready state") + log.Info("Waiting for ready state") // VSphereMachine wraps a VMSphereVM, so we are mirroring status from the underlying VMSphereVM // in order to provide evidences about machine provisioning while provisioning is actually happening. conditions.SetMirror(vimMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vm) @@ -142,20 +148,20 @@ func (v *VimMachineService) ReconcileNormal(machineCtx capvcontext.MachineContex } // Reconcile the VSphereMachine's provider ID using the VM's BIOS UUID. - if ok, err := v.reconcileProviderID(vimMachineCtx, vm); !ok { + if ok, err := v.reconcileProviderID(ctx, vimMachineCtx, vm); !ok { if err != nil { return false, errors.Wrapf(err, "unexpected error while reconciling provider ID for %s", vimMachineCtx) } - vimMachineCtx.Logger.Info("provider ID is not reconciled") + log.Info("Provider ID is not reconciled") return true, nil } // Reconcile the VSphereMachine's node addresses from the VM's IP addresses. - if ok, err := v.reconcileNetwork(vimMachineCtx, vm); !ok { + if ok, err := v.reconcileNetwork(ctx, vimMachineCtx, vm); !ok { if err != nil { return false, errors.Wrapf(err, "unexpected error while reconciling network for %s", vimMachineCtx) } - vimMachineCtx.Logger.Info("network is not reconciled") + log.Info("Network is not reconciled") conditions.MarkFalse(vimMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForNetworkAddressesReason, clusterv1.ConditionSeverityInfo, "") return true, nil } @@ -165,14 +171,15 @@ func (v *VimMachineService) ReconcileNormal(machineCtx capvcontext.MachineContex } // GetHostInfo returns the hostname or IP address of the infrastructure host for the VSphere VM. -func (v *VimMachineService) GetHostInfo(c capvcontext.MachineContext) (string, error) { - vimMachineCtx, ok := c.(*capvcontext.VIMMachineContext) +func (v *VimMachineService) GetHostInfo(ctx context.Context, machineCtx capvcontext.MachineContext) (string, error) { + log := ctrl.LoggerFrom(ctx) + vimMachineCtx, ok := machineCtx.(*capvcontext.VIMMachineContext) if !ok { return "", errors.New("received unexpected VIMMachineContext type") } vsphereVM := &infrav1.VSphereVM{} - if err := vimMachineCtx.Client.Get(vimMachineCtx, client.ObjectKey{ + if err := v.Client.Get(ctx, client.ObjectKey{ Namespace: vimMachineCtx.VSphereMachine.Namespace, Name: generateVMObjectName(vimMachineCtx, vimMachineCtx.Machine.Name), }, vsphereVM); err != nil { @@ -182,11 +189,11 @@ func (v *VimMachineService) GetHostInfo(c capvcontext.MachineContext) (string, e if conditions.IsTrue(vsphereVM, infrav1.VMProvisionedCondition) { return vsphereVM.Status.Host, nil } - vimMachineCtx.Logger.V(4).Info("VMProvisionedCondition is set to false", "vsphereVM", vsphereVM.Name) + log.V(4).Info("VMProvisionedCondition is set to false", "vsphereVM", klog.KRef(vsphereVM.Namespace, vsphereVM.Name)) return "", nil } -func (v *VimMachineService) findVSphereVM(vimMachineCtx *capvcontext.VIMMachineContext) (*infrav1.VSphereVM, error) { +func (v *VimMachineService) findVSphereVM(ctx context.Context, vimMachineCtx *capvcontext.VIMMachineContext) (*infrav1.VSphereVM, error) { // Get ready to find the associated VSphereVM resource. vm := &infrav1.VSphereVM{} vmKey := types.NamespacedName{ @@ -194,47 +201,41 @@ func (v *VimMachineService) findVSphereVM(vimMachineCtx *capvcontext.VIMMachineC Name: generateVMObjectName(vimMachineCtx, vimMachineCtx.Machine.Name), } // Attempt to find the associated VSphereVM resource. - if err := vimMachineCtx.Client.Get(vimMachineCtx, vmKey, vm); err != nil { + if err := v.Client.Get(ctx, vmKey, vm); err != nil { return nil, err } return vm, nil } -func (v *VimMachineService) reconcileProviderID(vimMachineCtx *capvcontext.VIMMachineContext, vm *infrav1.VSphereVM) (bool, error) { +func (v *VimMachineService) reconcileProviderID(ctx context.Context, vimMachineCtx *capvcontext.VIMMachineContext, vm *infrav1.VSphereVM) (bool, error) { + log := ctrl.LoggerFrom(ctx).WithValues("VSphereVM", klog.KRef(vm.Namespace, vm.Name)) biosUUID := vm.Spec.BiosUUID if biosUUID == "" { - vimMachineCtx.Logger.Info("spec.biosUUID is empty", - "vmKind", vm.Kind, - "vmNamespace", vm.Namespace, - "vmName", vm.Name) + log.Info("spec.biosUUID is empty") return false, nil } providerID := infrautilv1.ConvertUUIDToProviderID(biosUUID) if providerID == "" { - return false, errors.Errorf("invalid BIOS UUID %s from %s %s/%s for %s", - biosUUID, - vm.Kind, - vm.Namespace, - vm.Name, - vimMachineCtx) + return false, errors.Errorf("invalid BIOS UUID %s for %s", biosUUID, vimMachineCtx) } if vimMachineCtx.VSphereMachine.Spec.ProviderID == nil || *vimMachineCtx.VSphereMachine.Spec.ProviderID != providerID { vimMachineCtx.VSphereMachine.Spec.ProviderID = &providerID - vimMachineCtx.Logger.Info("updated provider ID", "provider-id", providerID) + log.Info("Updated provider ID", "providerID", providerID) } return true, nil } //nolint:nestif -func (v *VimMachineService) reconcileNetwork(vimMachineCtx *capvcontext.VIMMachineContext, vm *infrav1.VSphereVM) (bool, error) { +func (v *VimMachineService) reconcileNetwork(ctx context.Context, vimMachineCtx *capvcontext.VIMMachineContext, vm *infrav1.VSphereVM) (bool, error) { + log := ctrl.LoggerFrom(ctx) var errs []error networkStatusListOfIfaces := vm.Status.Network var networkStatusList []infrav1.NetworkStatus for i, networkStatusListMemberIface := range networkStatusListOfIfaces { if buf, err := json.Marshal(networkStatusListMemberIface); err != nil { - vimMachineCtx.Logger.Error(err, + log.Error(err, "unsupported data for member of status.network list", "index", i) errs = append(errs, err) @@ -246,7 +247,7 @@ func (v *VimMachineService) reconcileNetwork(vimMachineCtx *capvcontext.VIMMachi errs = append(errs, err) } if err != nil { - vimMachineCtx.Logger.Error(err, + log.Error(err, "unsupported data for member of status.network list", "index", i, "data", string(buf)) errs = append(errs, err) @@ -268,14 +269,15 @@ func (v *VimMachineService) reconcileNetwork(vimMachineCtx *capvcontext.VIMMachi vimMachineCtx.VSphereMachine.Status.Addresses = machineAddresses if len(vimMachineCtx.VSphereMachine.Status.Addresses) == 0 { - vimMachineCtx.Logger.Info("waiting on IP addresses") + log.Info("Waiting on IP addresses") return false, kerrors.NewAggregate(errs) } return true, nil } -func (v *VimMachineService) createOrPatchVSphereVM(vimMachineCtx *capvcontext.VIMMachineContext, vsphereVM *infrav1.VSphereVM) (*infrav1.VSphereVM, error) { +func (v *VimMachineService) createOrPatchVSphereVM(ctx context.Context, vimMachineCtx *capvcontext.VIMMachineContext, vsphereVM *infrav1.VSphereVM) (*infrav1.VSphereVM, error) { + log := ctrl.LoggerFrom(ctx) // Create or update the VSphereVM resource. vm := &infrav1.VSphereVM{ ObjectMeta: metav1.ObjectMeta{ @@ -323,7 +325,7 @@ func (v *VimMachineService) createOrPatchVSphereVM(vimMachineCtx *capvcontext.VI vimMachineCtx.VSphereMachine.Spec.VirtualMachineCloneSpec.DeepCopyInto(&vm.Spec.VirtualMachineCloneSpec) // If Failure Domain is present on CAPI machine, use that to override the vm clone spec. - if overrideFunc, ok := v.generateOverrideFunc(vimMachineCtx); ok { + if overrideFunc, ok := v.generateOverrideFunc(ctx, vimMachineCtx); ok { overrideFunc(vm) } @@ -347,53 +349,22 @@ func (v *VimMachineService) createOrPatchVSphereVM(vimMachineCtx *capvcontext.VI return nil } - vmKey := types.NamespacedName{ - Namespace: vm.Namespace, - Name: vm.Name, - } - result, err := ctrlutil.CreateOrPatch(vimMachineCtx, vimMachineCtx.Client, vm, mutateFn) + result, err := ctrlutil.CreateOrPatch(ctx, v.Client, vm, mutateFn) if err != nil { - vimMachineCtx.Logger.Error( - err, - "failed to CreateOrPatch VSphereVM", - "namespace", - vm.Namespace, - "name", - vm.Name, - ) + log.Error(err, "failed to CreateOrPatch VSphereVM") return nil, err } switch result { case ctrlutil.OperationResultNone: - vimMachineCtx.Logger.Info( - "no update required for vm", - "vm", - vmKey, - ) + log.Info("No update required for vm") case ctrlutil.OperationResultCreated: - vimMachineCtx.Logger.Info( - "created vm", - "vm", - vmKey, - ) + log.Info("Created vm") case ctrlutil.OperationResultUpdated: - vimMachineCtx.Logger.Info( - "updated vm", - "vm", - vmKey, - ) + log.Info("Updated vm") case ctrlutil.OperationResultUpdatedStatus: - vimMachineCtx.Logger.Info( - "updated vm and vm status", - "vm", - vmKey, - ) + log.Info("Updated vm and vm status") case ctrlutil.OperationResultUpdatedStatusOnly: - vimMachineCtx.Logger.Info( - "updated vm status", - "vm", - vmKey, - ) + log.Info("Updated vm status") } return vm, nil @@ -413,7 +384,8 @@ func generateVMObjectName(vimMachineCtx *capvcontext.VIMMachineContext, machineN // with the values from the FailureDomain (if any) set on the owner CAPI machine. // //nolint:nestif -func (v *VimMachineService) generateOverrideFunc(vimMachineCtx *capvcontext.VIMMachineContext) (func(vm *infrav1.VSphereVM), bool) { +func (v *VimMachineService) generateOverrideFunc(ctx context.Context, vimMachineCtx *capvcontext.VIMMachineContext) (func(vm *infrav1.VSphereVM), bool) { + log := ctrl.LoggerFrom(ctx) failureDomainName := vimMachineCtx.Machine.Spec.FailureDomain if failureDomainName == nil { return nil, false @@ -421,14 +393,14 @@ func (v *VimMachineService) generateOverrideFunc(vimMachineCtx *capvcontext.VIMM // Use the failureDomain name to fetch the vSphereDeploymentZone object var vsphereDeploymentZone infrav1.VSphereDeploymentZone - if err := vimMachineCtx.Client.Get(vimMachineCtx, client.ObjectKey{Name: *failureDomainName}, &vsphereDeploymentZone); err != nil { - vimMachineCtx.Logger.Error(err, "unable to fetch vsphere deployment zone", "name", *failureDomainName) + if err := v.Client.Get(ctx, client.ObjectKey{Name: *failureDomainName}, &vsphereDeploymentZone); err != nil { + log.Error(err, "unable to fetch vsphere deployment zone", "VSphereDeploymentZone", klog.KRef(vsphereDeploymentZone.Namespace, vsphereDeploymentZone.Name), "name", *failureDomainName) return nil, false } var vsphereFailureDomain infrav1.VSphereFailureDomain - if err := vimMachineCtx.Client.Get(vimMachineCtx, client.ObjectKey{Name: vsphereDeploymentZone.Spec.FailureDomain}, &vsphereFailureDomain); err != nil { - vimMachineCtx.Logger.Error(err, "unable to fetch failure domain", "name", vsphereDeploymentZone.Spec.FailureDomain) + if err := v.Client.Get(ctx, client.ObjectKey{Name: vsphereDeploymentZone.Spec.FailureDomain}, &vsphereFailureDomain); err != nil { + log.Error(err, "unable to fetch failure domain", "VSphereFailureDomain", klog.KRef(vsphereFailureDomain.Namespace, vsphereFailureDomain.Name), "name", vsphereDeploymentZone.Spec.FailureDomain) return nil, false } diff --git a/pkg/services/vimmachine_test.go b/pkg/services/vimmachine_test.go index 04074dcad1..6734af5d73 100644 --- a/pkg/services/vimmachine_test.go +++ b/pkg/services/vimmachine_test.go @@ -68,12 +68,12 @@ var _ = Describe("VimMachineService_GenerateOverrideFunc", func() { BeforeEach(func() { controllerCtx = fake.NewControllerContext(fake.NewControllerManagerContext(deplZone("one"), deplZone("two"), failureDomain("one"), failureDomain("two"))) machineCtx = fake.NewMachineContext(fake.NewClusterContext(controllerCtx), controllerCtx) - vimMachineService = &VimMachineService{} + vimMachineService = &VimMachineService{controllerCtx.Client} }) Context("When Failure Domain is not present", func() { It("does not generate an override function", func() { - _, ok := vimMachineService.generateOverrideFunc(machineCtx) + _, ok := vimMachineService.generateOverrideFunc(ctx, machineCtx) Expect(ok).To(BeFalse()) }) }) @@ -84,12 +84,12 @@ var _ = Describe("VimMachineService_GenerateOverrideFunc", func() { }) It("generates an override function", func() { - _, ok := vimMachineService.generateOverrideFunc(machineCtx) + _, ok := vimMachineService.generateOverrideFunc(ctx, machineCtx) Expect(ok).To(BeTrue()) }) It("uses the deployment zone placement constraint & failure domains topology for VM values", func() { - overrideFunc, ok := vimMachineService.generateOverrideFunc(machineCtx) + overrideFunc, ok := vimMachineService.generateOverrideFunc(ctx, machineCtx) Expect(ok).To(BeTrue()) vm := &infrav1.VSphereVM{Spec: infrav1.VSphereVMSpec{}} @@ -108,7 +108,7 @@ var _ = Describe("VimMachineService_GenerateOverrideFunc", func() { }) It("fails to generate an override function", func() { - overrideFunc, ok := vimMachineService.generateOverrideFunc(machineCtx) + overrideFunc, ok := vimMachineService.generateOverrideFunc(ctx, machineCtx) Expect(ok).To(BeFalse()) Expect(overrideFunc).To(BeNil()) }) @@ -125,7 +125,7 @@ var _ = Describe("VimMachineService_GenerateOverrideFunc", func() { }, } - overrideFunc, ok := vimMachineService.generateOverrideFunc(machineCtx) + overrideFunc, ok := vimMachineService.generateOverrideFunc(ctx, machineCtx) Expect(ok).To(BeTrue()) overrideFunc(vm) @@ -147,7 +147,7 @@ var _ = Describe("VimMachineService_GenerateOverrideFunc", func() { }, } - overrideFunc, ok := vimMachineService.generateOverrideFunc(machineCtx) + overrideFunc, ok := vimMachineService.generateOverrideFunc(ctx, machineCtx) Expect(ok).To(BeTrue()) overrideFunc(vm) @@ -169,7 +169,7 @@ var _ = Describe("VimMachineService_GenerateOverrideFunc", func() { }, } - overrideFunc, ok := vimMachineService.generateOverrideFunc(machineCtx) + overrideFunc, ok := vimMachineService.generateOverrideFunc(ctx, machineCtx) Expect(ok).To(BeTrue()) overrideFunc(vm) @@ -216,9 +216,10 @@ var _ = Describe("VimMachineService_GetHostInfo", func() { BeforeEach(func() { controllerCtx = fake.NewControllerContext(fake.NewControllerManagerContext(getVSphereVM(hostAddr, corev1.ConditionTrue))) machineCtx = fake.NewMachineContext(fake.NewClusterContext(controllerCtx), controllerCtx) + vimMachineService = &VimMachineService{controllerCtx.Client} }) It("Fetches host address from the VSphereVM object", func() { - host, err := vimMachineService.GetHostInfo(machineCtx) + host, err := vimMachineService.GetHostInfo(ctx, machineCtx) Expect(err).NotTo(HaveOccurred()) Expect(host).To(Equal(hostAddr)) }) @@ -228,9 +229,10 @@ var _ = Describe("VimMachineService_GetHostInfo", func() { BeforeEach(func() { controllerCtx = fake.NewControllerContext(fake.NewControllerManagerContext(getVSphereVM(hostAddr, corev1.ConditionFalse))) machineCtx = fake.NewMachineContext(fake.NewClusterContext(controllerCtx), controllerCtx) + vimMachineService = &VimMachineService{controllerCtx.Client} }) It("returns empty string", func() { - host, err := vimMachineService.GetHostInfo(machineCtx) + host, err := vimMachineService.GetHostInfo(ctx, machineCtx) Expect(err).NotTo(HaveOccurred()) Expect(host).To(BeEmpty()) }) @@ -242,7 +244,7 @@ var _ = Describe("VimMachineService_createOrPatchVSphereVM", func() { var ( controllerCtx *context.ControllerContext machineCtx *context.VIMMachineContext - vimMachineService = &VimMachineService{} + vimMachineService *VimMachineService hostAddr = "1.2.3.4" fakeLongClusterName = "fake-long-clustername" ) @@ -268,13 +270,14 @@ var _ = Describe("VimMachineService_createOrPatchVSphereVM", func() { controllerCtx = fake.NewControllerContext(fake.NewControllerManagerContext(getVSphereVM(hostAddr, corev1.ConditionTrue))) machineCtx = fake.NewMachineContext(fake.NewClusterContext(controllerCtx), controllerCtx) machineCtx.Machine.SetName(fakeLongClusterName) + vimMachineService = &VimMachineService{controllerCtx.Client} Context("When VSphereMachine OS is Windows", func() { BeforeEach(func() { machineCtx.VSphereMachine.Spec.OS = infrav1.Windows }) It("returns a renamed vspherevm object", func() { - vm, err := vimMachineService.createOrPatchVSphereVM(machineCtx, getVSphereVM(hostAddr, corev1.ConditionTrue)) + vm, err := vimMachineService.createOrPatchVSphereVM(ctx, machineCtx, getVSphereVM(hostAddr, corev1.ConditionTrue)) vmName := vm.Name Expect(err).NotTo(HaveOccurred()) Expect(vmName).To(Equal("fake-long-rname")) @@ -286,7 +289,7 @@ var _ = Describe("VimMachineService_createOrPatchVSphereVM", func() { machineCtx.VSphereMachine.Spec.OS = infrav1.Linux }) It("returns the same vspherevm name", func() { - vm, err := vimMachineService.createOrPatchVSphereVM(machineCtx, getVSphereVM(hostAddr, corev1.ConditionTrue)) + vm, err := vimMachineService.createOrPatchVSphereVM(ctx, machineCtx, getVSphereVM(hostAddr, corev1.ConditionTrue)) vmName := vm.Name Expect(err).NotTo(HaveOccurred()) Expect(vmName).To(Equal(fakeLongClusterName)) diff --git a/pkg/services/vmoperator/vmoperator_suite_test.go b/pkg/services/vmoperator/vmoperator_suite_test.go index 8c95addd31..9d21406d3e 100644 --- a/pkg/services/vmoperator/vmoperator_suite_test.go +++ b/pkg/services/vmoperator/vmoperator_suite_test.go @@ -21,8 +21,11 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + ctrl "sigs.k8s.io/controller-runtime" ) +var ctx = ctrl.SetupSignalHandler() + func TestCAPVServices(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "VMOperator Services Tests") diff --git a/pkg/services/vmoperator/vmopmachine.go b/pkg/services/vmoperator/vmopmachine.go index d4cb214db7..b2b81f65ff 100644 --- a/pkg/services/vmoperator/vmopmachine.go +++ b/pkg/services/vmoperator/vmopmachine.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -42,18 +43,20 @@ import ( ) // VmopMachineService reconciles VM Operator VM. -type VmopMachineService struct{} +type VmopMachineService struct { + Client client.Client +} // FetchVSphereMachine returns a MachineContext with a VSphereMachine for the passed NamespacedName. -func (v *VmopMachineService) FetchVSphereMachine(client client.Client, name types.NamespacedName) (capvcontext.MachineContext, error) { +func (v *VmopMachineService) FetchVSphereMachine(ctx context.Context, name types.NamespacedName) (capvcontext.MachineContext, error) { vsphereMachine := &vmwarev1.VSphereMachine{} - err := client.Get(context.Background(), name, vsphereMachine) + err := v.Client.Get(ctx, name, vsphereMachine) return &vmware.SupervisorMachineContext{VSphereMachine: vsphereMachine}, err } // FetchVSphereCluster adds the VSphereCluster for the cluster to the MachineContext. -func (v *VmopMachineService) FetchVSphereCluster(c client.Client, cluster *clusterv1.Cluster, machineContext capvcontext.MachineContext) (capvcontext.MachineContext, error) { - ctx, ok := machineContext.(*vmware.SupervisorMachineContext) +func (v *VmopMachineService) FetchVSphereCluster(ctx context.Context, cluster *clusterv1.Cluster, machineContext capvcontext.MachineContext) (capvcontext.MachineContext, error) { + machineCtx, ok := machineContext.(*vmware.SupervisorMachineContext) if !ok { return nil, errors.New("received unexpected SupervisorMachineContext type") } @@ -63,33 +66,34 @@ func (v *VmopMachineService) FetchVSphereCluster(c client.Client, cluster *clust Namespace: machineContext.GetObjectMeta().Namespace, Name: cluster.Spec.InfrastructureRef.Name, } - err := c.Get(context.Background(), key, vsphereCluster) + err := v.Client.Get(ctx, key, vsphereCluster) - ctx.VSphereCluster = vsphereCluster - return ctx, err + machineCtx.VSphereCluster = vsphereCluster + return machineCtx, err } // ReconcileDelete reconciles delete events for VM Operator VM. -func (v *VmopMachineService) ReconcileDelete(machineCtx capvcontext.MachineContext) error { +func (v *VmopMachineService) ReconcileDelete(ctx context.Context, machineCtx capvcontext.MachineContext) error { + log := ctrl.LoggerFrom(ctx) supervisorMachineCtx, ok := machineCtx.(*vmware.SupervisorMachineContext) if !ok { return errors.New("received unexpected SupervisorMachineContext type") } - supervisorMachineCtx.Logger.V(2).Info("Destroying VM") + log.V(2).Info("Destroying VM") // If debug logging is enabled, report the number of vms in the cluster before and after the reconcile - if supervisorMachineCtx.Logger.V(5).Enabled() { - vms, err := getVirtualMachinesInCluster(supervisorMachineCtx) - supervisorMachineCtx.Logger.Info("Trace Destroy PRE: VirtualMachines", "vmcount", len(vms), "error", err) + if log.V(5).Enabled() { + vms, err := v.getVirtualMachinesInCluster(ctx, supervisorMachineCtx) + log.Info("Trace Destroy PRE: VirtualMachines", "vmcount", len(vms), "error", err) defer func() { - vms, err := getVirtualMachinesInCluster(supervisorMachineCtx) - supervisorMachineCtx.Logger.Info("Trace Destroy POST: VirtualMachines", "vmcount", len(vms), "error", err) + vms, err := v.getVirtualMachinesInCluster(ctx, supervisorMachineCtx) + log.Info("Trace Destroy POST: VirtualMachines", "vmcount", len(vms), "error", err) }() } // First, check to see if it's already deleted vmopVM := vmoprv1.VirtualMachine{} - if err := supervisorMachineCtx.Client.Get(supervisorMachineCtx, types.NamespacedName{Namespace: supervisorMachineCtx.Machine.Namespace, Name: supervisorMachineCtx.Machine.Name}, &vmopVM); err != nil { + if err := v.Client.Get(ctx, types.NamespacedName{Namespace: supervisorMachineCtx.Machine.Namespace, Name: supervisorMachineCtx.Machine.Name}, &vmopVM); err != nil { if apierrors.IsNotFound(err) { supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateNotFound return err @@ -105,7 +109,7 @@ func (v *VmopMachineService) ReconcileDelete(machineCtx capvcontext.MachineConte } // If none of the above are true, Delete the VM - if err := supervisorMachineCtx.Client.Delete(supervisorMachineCtx, &vmopVM); err != nil { + if err := v.Client.Delete(ctx, &vmopVM); err != nil { if apierrors.IsNotFound(err) { supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateNotFound return err @@ -119,7 +123,7 @@ func (v *VmopMachineService) ReconcileDelete(machineCtx capvcontext.MachineConte } // SyncFailureReason returns true if there is a Failure on the VM Operator VM. -func (v *VmopMachineService) SyncFailureReason(machineCtx capvcontext.MachineContext) (bool, error) { +func (v *VmopMachineService) SyncFailureReason(_ context.Context, machineCtx capvcontext.MachineContext) (bool, error) { supervisorMachineCtx, ok := machineCtx.(*vmware.SupervisorMachineContext) if !ok { return false, errors.New("received unexpected SupervisorMachineContext type") @@ -129,23 +133,23 @@ func (v *VmopMachineService) SyncFailureReason(machineCtx capvcontext.MachineCon } // ReconcileNormal reconciles create and update events for VM Operator VMs. -func (v *VmopMachineService) ReconcileNormal(machineCtx capvcontext.MachineContext) (bool, error) { +func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx capvcontext.MachineContext) (bool, error) { + log := ctrl.LoggerFrom(ctx) supervisorMachineCtx, ok := machineCtx.(*vmware.SupervisorMachineContext) if !ok { return false, errors.New("received unexpected SupervisorMachineContext type") } supervisorMachineCtx.VSphereMachine.Spec.FailureDomain = supervisorMachineCtx.Machine.Spec.FailureDomain - - supervisorMachineCtx.Logger.V(2).Info("Reconciling VM") + log.V(2).Info("Reconciling VM") // If debug logging is enabled, report the number of vms in the cluster before and after the reconcile - if supervisorMachineCtx.Logger.V(5).Enabled() { - vms, err := getVirtualMachinesInCluster(supervisorMachineCtx) - supervisorMachineCtx.Logger.Info("Trace ReconcileVM PRE: VirtualMachines", "vmcount", len(vms), "error", err) + if log.V(5).Enabled() { + vms, err := v.getVirtualMachinesInCluster(ctx, supervisorMachineCtx) + log.Info("Trace ReconcileVM PRE: VirtualMachines", "vmcount", len(vms), "error", err) defer func() { - vms, err := getVirtualMachinesInCluster(supervisorMachineCtx) - supervisorMachineCtx.Logger.Info("Trace ReconcileVM POST: VirtualMachines", "vmcount", len(vms), "error", err) + vms, err := v.getVirtualMachinesInCluster(ctx, supervisorMachineCtx) + log.Info("Trace ReconcileVM POST: VirtualMachines", "vmcount", len(vms), "error", err) }() } @@ -156,7 +160,7 @@ func (v *VmopMachineService) ReconcileNormal(machineCtx capvcontext.MachineConte vmOperatorVM := v.newVMOperatorVM(supervisorMachineCtx) // Reconcile the VM Operator VirtualMachine. - if err := v.reconcileVMOperatorVM(supervisorMachineCtx, vmOperatorVM); err != nil { + if err := v.reconcileVMOperatorVM(ctx, supervisorMachineCtx, vmOperatorVM); err != nil { conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.VMCreationFailedReason, clusterv1.ConditionSeverityWarning, fmt.Sprintf("failed to create or update VirtualMachine: %v", err)) // TODO: what to do if AlreadyExists error @@ -184,7 +188,7 @@ func (v *VmopMachineService) ReconcileNormal(machineCtx capvcontext.MachineConte // * A BIOS UUID if vmOperatorVM.Status.Phase != vmoprv1.Created { conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.VMProvisionStartedReason, clusterv1.ConditionSeverityInfo, "") - supervisorMachineCtx.Logger.Info(fmt.Sprintf("vm is not yet created: %s", supervisorMachineCtx)) + log.Info(fmt.Sprintf("vm is not yet created: %s", supervisorMachineCtx)) return true, nil } // Mark the VM as created @@ -192,7 +196,7 @@ func (v *VmopMachineService) ReconcileNormal(machineCtx capvcontext.MachineConte if vmOperatorVM.Status.PowerState != vmoprv1.VirtualMachinePoweredOn { conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.PoweringOnReason, clusterv1.ConditionSeverityInfo, "") - supervisorMachineCtx.Logger.Info(fmt.Sprintf("vm is not yet powered on: %s", supervisorMachineCtx)) + log.Info(fmt.Sprintf("vm is not yet powered on: %s", supervisorMachineCtx)) return true, nil } // Mark the VM as poweredOn @@ -200,13 +204,13 @@ func (v *VmopMachineService) ReconcileNormal(machineCtx capvcontext.MachineConte if vmOperatorVM.Status.VmIp == "" { conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.WaitingForNetworkAddressReason, clusterv1.ConditionSeverityInfo, "") - supervisorMachineCtx.Logger.Info(fmt.Sprintf("vm does not have an IP address: %s", supervisorMachineCtx)) + log.Info(fmt.Sprintf("vm does not have an IP address: %s", supervisorMachineCtx)) return true, nil } if vmOperatorVM.Status.BiosUUID == "" { conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.WaitingForBIOSUUIDReason, clusterv1.ConditionSeverityInfo, "") - supervisorMachineCtx.Logger.Info(fmt.Sprintf("vm does not have a BIOS UUID: %s", supervisorMachineCtx)) + log.Info(fmt.Sprintf("vm does not have a BIOS UUID: %s", supervisorMachineCtx)) return true, nil } @@ -214,11 +218,11 @@ func (v *VmopMachineService) ReconcileNormal(machineCtx capvcontext.MachineConte supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateReady if ok := v.reconcileNetwork(supervisorMachineCtx, vmOperatorVM); !ok { - supervisorMachineCtx.Logger.Info("ip not yet assigned") + log.Info("ip not yet assigned") return true, nil } - v.reconcileProviderID(supervisorMachineCtx, vmOperatorVM) + v.reconcileProviderID(ctx, supervisorMachineCtx, vmOperatorVM) // Mark the VSphereMachine as Ready supervisorMachineCtx.VSphereMachine.Status.Ready = true @@ -227,14 +231,14 @@ func (v *VmopMachineService) ReconcileNormal(machineCtx capvcontext.MachineConte } // GetHostInfo returns the hostname or IP address of the infrastructure host for the VM Operator VM. -func (v VmopMachineService) GetHostInfo(machineCtx capvcontext.MachineContext) (string, error) { +func (v VmopMachineService) GetHostInfo(ctx context.Context, machineCtx capvcontext.MachineContext) (string, error) { supervisorMachineCtx, ok := machineCtx.(*vmware.SupervisorMachineContext) if !ok { return "", errors.New("received unexpected SupervisorMachineContext type") } vmOperatorVM := &vmoprv1.VirtualMachine{} - if err := supervisorMachineCtx.Client.Get(supervisorMachineCtx, client.ObjectKey{ + if err := v.Client.Get(ctx, client.ObjectKey{ Name: supervisorMachineCtx.Machine.Name, Namespace: supervisorMachineCtx.Machine.Namespace, }, vmOperatorVM); err != nil { @@ -257,7 +261,7 @@ func (v VmopMachineService) newVMOperatorVM(supervisorMachineCtx *vmware.Supervi } } -func (v VmopMachineService) reconcileVMOperatorVM(supervisorMachineCtx *vmware.SupervisorMachineContext, vmOperatorVM *vmoprv1.VirtualMachine) error { +func (v VmopMachineService) reconcileVMOperatorVM(ctx context.Context, supervisorMachineCtx *vmware.SupervisorMachineContext, vmOperatorVM *vmoprv1.VirtualMachine) error { // All Machine resources should define the version of Kubernetes to use. if supervisorMachineCtx.Machine.Spec.Version == nil || *supervisorMachineCtx.Machine.Spec.Version == "" { return errors.Errorf( @@ -272,7 +276,7 @@ func (v VmopMachineService) reconcileVMOperatorVM(supervisorMachineCtx *vmware.S dataSecretName = *dsn } - _, err := ctrlutil.CreateOrPatch(supervisorMachineCtx, supervisorMachineCtx.Client, vmOperatorVM, func() error { + _, err := ctrlutil.CreateOrPatch(ctx, v.Client, vmOperatorVM, func() error { // Define a new VM Operator virtual machine. // NOTE: Set field-by-field in order to preserve changes made directly // to the VirtualMachine spec by other sources (e.g. the cloud provider) @@ -308,7 +312,7 @@ func (v VmopMachineService) reconcileVMOperatorVM(supervisorMachineCtx *vmware.S addResourcePolicyAnnotations(supervisorMachineCtx, vmOperatorVM) - if err := addVolumes(supervisorMachineCtx, vmOperatorVM); err != nil { + if err := v.addVolumes(ctx, supervisorMachineCtx, vmOperatorVM); err != nil { return err } @@ -327,7 +331,7 @@ func (v VmopMachineService) reconcileVMOperatorVM(supervisorMachineCtx *vmware.S } // Make sure the VSphereMachine owns the VM Operator VirtualMachine. - if err := ctrlutil.SetControllerReference(supervisorMachineCtx.VSphereMachine, vmOperatorVM, supervisorMachineCtx.Scheme); err != nil { + if err := ctrlutil.SetControllerReference(supervisorMachineCtx.VSphereMachine, vmOperatorVM, v.Client.Scheme()); err != nil { return errors.Wrapf(err, "failed to mark %s %s/%s as owner of %s %s/%s", supervisorMachineCtx.VSphereMachine.GroupVersionKind(), supervisorMachineCtx.VSphereMachine.Namespace, @@ -352,28 +356,29 @@ func (v *VmopMachineService) reconcileNetwork(supervisorMachineCtx *vmware.Super return true } -func (v *VmopMachineService) reconcileProviderID(supervisorMachineCtx *vmware.SupervisorMachineContext, vm *vmoprv1.VirtualMachine) { +func (v *VmopMachineService) reconcileProviderID(ctx context.Context, supervisorMachineCtx *vmware.SupervisorMachineContext, vm *vmoprv1.VirtualMachine) { + log := ctrl.LoggerFrom(ctx) providerID := fmt.Sprintf("vsphere://%s", vm.Status.BiosUUID) if supervisorMachineCtx.VSphereMachine.Spec.ProviderID == nil || *supervisorMachineCtx.VSphereMachine.Spec.ProviderID != providerID { supervisorMachineCtx.VSphereMachine.Spec.ProviderID = &providerID - supervisorMachineCtx.Logger.Info("Updated provider ID for machine", "machine", supervisorMachineCtx.VSphereMachine.Name, "provider-id", providerID) + log.Info("Updated provider ID", "providerID", providerID) } if supervisorMachineCtx.VSphereMachine.Status.ID == nil || *supervisorMachineCtx.VSphereMachine.Status.ID != vm.Status.BiosUUID { supervisorMachineCtx.VSphereMachine.Status.ID = &vm.Status.BiosUUID - supervisorMachineCtx.Logger.Info("Updated VM ID for machine", "machine", supervisorMachineCtx.VSphereMachine.Name, "vm-id", vm.Status.BiosUUID) + log.Info("Updated VM ID", "vmID", vm.Status.BiosUUID) } } // getVirtualMachinesInCluster returns all VMOperator VirtualMachine objects in the current cluster. // First filter by clusterSelectorKey. If the result is empty, they fall back to legacyClusterSelectorKey. -func getVirtualMachinesInCluster(supervisorMachineCtx *vmware.SupervisorMachineContext) ([]*vmoprv1.VirtualMachine, error) { +func (v *VmopMachineService) getVirtualMachinesInCluster(ctx context.Context, supervisorMachineCtx *vmware.SupervisorMachineContext) ([]*vmoprv1.VirtualMachine, error) { labels := map[string]string{clusterSelectorKey: supervisorMachineCtx.Cluster.Name} vmList := &vmoprv1.VirtualMachineList{} - if err := supervisorMachineCtx.Client.List( - supervisorMachineCtx, vmList, + if err := v.Client.List( + ctx, vmList, client.InNamespace(supervisorMachineCtx.Cluster.Namespace), client.MatchingLabels(labels)); err != nil { return nil, errors.Wrapf( @@ -384,8 +389,8 @@ func getVirtualMachinesInCluster(supervisorMachineCtx *vmware.SupervisorMachineC // If the list is empty, fall back to usse legacy labels for filtering if len(vmList.Items) == 0 { legacyLabels := map[string]string{legacyClusterSelectorKey: supervisorMachineCtx.Cluster.Name} - if err := supervisorMachineCtx.Client.List( - supervisorMachineCtx, vmList, + if err := v.Client.List( + ctx, vmList, client.InNamespace(supervisorMachineCtx.Cluster.Namespace), client.MatchingLabels(legacyLabels)); err != nil { return nil, errors.Wrapf( @@ -445,7 +450,7 @@ func addVolume(vm *vmoprv1.VirtualMachine, name string) { }) } -func addVolumes(supervisorMachineCtx *vmware.SupervisorMachineContext, vm *vmoprv1.VirtualMachine) error { +func (v *VmopMachineService) addVolumes(ctx context.Context, supervisorMachineCtx *vmware.SupervisorMachineContext, vm *vmoprv1.VirtualMachine) error { nvolumes := len(supervisorMachineCtx.VSphereMachine.Spec.Volumes) if nvolumes == 0 { return nil @@ -492,11 +497,11 @@ func addVolumes(supervisorMachineCtx *vmware.SupervisorMachineContext, vm *vmopr } } - if _, err := ctrlutil.CreateOrPatch(supervisorMachineCtx, supervisorMachineCtx.Client, pvc, func() error { + if _, err := ctrlutil.CreateOrPatch(ctx, v.Client, pvc, func() error { if err := ctrlutil.SetOwnerReference( supervisorMachineCtx.VSphereMachine, pvc, - supervisorMachineCtx.Scheme, + v.Client.Scheme(), ); err != nil { return errors.Wrapf( err, diff --git a/pkg/services/vmoperator/vmopmachine_test.go b/pkg/services/vmoperator/vmopmachine_test.go index 29fd13407e..1d17954be5 100644 --- a/pkg/services/vmoperator/vmopmachine_test.go +++ b/pkg/services/vmoperator/vmopmachine_test.go @@ -17,6 +17,7 @@ limitations under the License. package vmoperator import ( + "context" "time" . "github.com/onsi/ginkgo/v2" @@ -37,13 +38,13 @@ import ( "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" ) -func getReconciledVM(ctx *vmware.SupervisorMachineContext) *vmoprv1.VirtualMachine { +func getReconciledVM(ctx context.Context, vmService VmopMachineService, supervisorMachineContext *vmware.SupervisorMachineContext) *vmoprv1.VirtualMachine { vm := &vmoprv1.VirtualMachine{} nsname := types.NamespacedName{ - Namespace: ctx.Machine.Namespace, - Name: ctx.Machine.Name, + Namespace: supervisorMachineContext.Machine.Namespace, + Name: supervisorMachineContext.Machine.Name, } - err := ctx.Client.Get(ctx, nsname, vm) + err := vmService.Client.Get(ctx, nsname, vm) if apierrors.IsNotFound(err) { return nil } @@ -51,8 +52,8 @@ func getReconciledVM(ctx *vmware.SupervisorMachineContext) *vmoprv1.VirtualMachi return vm } -func updateReconciledVM(ctx *vmware.SupervisorMachineContext, vm *vmoprv1.VirtualMachine) { - err := ctx.Client.Status().Update(ctx, vm) +func updateReconciledVM(ctx context.Context, vmService VmopMachineService, vm *vmoprv1.VirtualMachine) { + err := vmService.Client.Status().Update(ctx, vm) Expect(err).ShouldNot(HaveOccurred()) } @@ -83,16 +84,15 @@ var _ = Describe("VirtualMachine tests", func() { expectedConditions clusterv1.Conditions expectedRequeue bool - cluster *clusterv1.Cluster - vsphereCluster *vmwarev1.VSphereCluster - machine *clusterv1.Machine - vsphereMachine *vmwarev1.VSphereMachine - ctx *vmware.SupervisorMachineContext + cluster *clusterv1.Cluster + vsphereCluster *vmwarev1.VSphereCluster + machine *clusterv1.Machine + vsphereMachine *vmwarev1.VSphereMachine + supervisorMachineContext *vmware.SupervisorMachineContext // vm vmwarev1.VirtualMachine - vmopVM *vmoprv1.VirtualMachine - - vmService = VmopMachineService{} + vmopVM *vmoprv1.VirtualMachine + vmService VmopMachineService ) BeforeEach(func() { @@ -109,15 +109,16 @@ var _ = Describe("VirtualMachine tests", func() { machine = util.CreateMachine(machineName, clusterName, k8sVersion, controlPlaneLabelTrue) vsphereMachine = util.CreateVSphereMachine(machineName, clusterName, className, imageName, storageClass, controlPlaneLabelTrue) clusterContext, controllerCtx := util.CreateClusterContext(cluster, vsphereCluster) - ctx = util.CreateMachineContext(clusterContext, machine, vsphereMachine) - ctx.ControllerContext = controllerCtx + supervisorMachineContext = util.CreateMachineContext(clusterContext, machine, vsphereMachine) + supervisorMachineContext.ControllerContext = controllerCtx + vmService = VmopMachineService{Client: controllerCtx.Client} }) Context("Reconcile VirtualMachine", func() { - verifyOutput := func(ctx *vmware.SupervisorMachineContext) { + verifyOutput := func(machineContext *vmware.SupervisorMachineContext) { Expect(err != nil).Should(Equal(expectReconcileError)) Expect(requeue).Should(Equal(expectedRequeue)) - vsphereMachine := ctx.VSphereMachine + vsphereMachine := machineContext.VSphereMachine Expect(vsphereMachine).ShouldNot(BeNil()) Expect(vsphereMachine.Name).Should(Equal(machineName)) @@ -129,11 +130,11 @@ var _ = Describe("VirtualMachine tests", func() { Expect(vsphereMachine.Status.IPAddr).Should(Equal(expectedVMIP)) Expect(vsphereMachine.Status.VMStatus).Should(Equal(expectedState)) - vmopVM = getReconciledVM(ctx) + vmopVM = getReconciledVM(ctx, vmService, machineContext) Expect(vmopVM != nil).Should(Equal(expectVMOpVM)) if vmopVM != nil { - vms, _ := getVirtualMachinesInCluster(ctx) + vms, _ := vmService.getVirtualMachinesInCluster(ctx, machineContext) Expect(vms).Should(HaveLen(1)) Expect(vmopVM.Spec.ImageName).To(Equal(expectedImageName)) Expect(vmopVM.Spec.ClassName).To(Equal(className)) @@ -150,7 +151,7 @@ var _ = Describe("VirtualMachine tests", func() { } for _, expectedCondition := range expectedConditions { - c := conditions.Get(ctx.VSphereMachine, expectedCondition.Type) + c := conditions.Get(machineContext.VSphereMachine, expectedCondition.Type) Expect(c).NotTo(BeNil()) Expect(c.Status).To(Equal(expectedCondition.Status)) Expect(c.Reason).To(Equal(expectedCondition.Reason)) @@ -188,8 +189,8 @@ var _ = Describe("VirtualMachine tests", func() { // Do the bare minimum that will cause a vmoperator VirtualMachine to be created // Note that the VM returned is not a vmoperator type, but is intentionally implementation agnostic By("VirtualMachine is created") - requeue, err = vmService.ReconcileNormal(ctx) - verifyOutput(ctx) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + verifyOutput(supervisorMachineContext) // Provide valid bootstrap data. By("bootstrap data is created") @@ -203,7 +204,7 @@ var _ = Describe("VirtualMachine tests", func() { "value": []byte(bootstrapData), }, } - Expect(ctx.Client.Create(ctx, secret)).To(Succeed()) + Expect(vmService.Client.Create(ctx, secret)).To(Succeed()) machine.Spec.Bootstrap.DataSecretName = &secretName // we expect the reconciliation waiting for VM to be created @@ -211,40 +212,40 @@ var _ = Describe("VirtualMachine tests", func() { expectedConditions[0].Message = "" expectReconcileError = false expectedRequeue = true - requeue, err = vmService.ReconcileNormal(ctx) - verifyOutput(ctx) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + verifyOutput(supervisorMachineContext) // Simulate VMOperator creating a vSphere VM By("vSphere VM is created") - vmopVM = getReconciledVM(ctx) + vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) vmopVM.Status.Phase = vmoprv1.Created - updateReconciledVM(ctx, vmopVM) + updateReconciledVM(ctx, vmService, vmopVM) expectedState = vmwarev1.VirtualMachineStateCreated // we expect the reconciliation waiting for VM to be powered on expectedConditions[0].Reason = vmwarev1.PoweringOnReason - requeue, err = vmService.ReconcileNormal(ctx) - verifyOutput(ctx) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + verifyOutput(supervisorMachineContext) // Simulate VMOperator powering on the VM By("VirtualMachine is powered on") - vmopVM = getReconciledVM(ctx) + vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) vmopVM.Status.PowerState = vmoprv1.VirtualMachinePoweredOn - updateReconciledVM(ctx, vmopVM) + updateReconciledVM(ctx, vmService, vmopVM) expectedState = vmwarev1.VirtualMachineStatePoweredOn // we expect the reconciliation waiting for VM to have an IP expectedConditions[0].Reason = vmwarev1.WaitingForNetworkAddressReason - requeue, err = vmService.ReconcileNormal(ctx) - verifyOutput(ctx) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + verifyOutput(supervisorMachineContext) // Simulate VMOperator assigning an IP address By("VirtualMachine has an IP address") - vmopVM = getReconciledVM(ctx) + vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) vmopVM.Status.VmIp = vmIP - updateReconciledVM(ctx, vmopVM) + updateReconciledVM(ctx, vmService, vmopVM) // we expect the reconciliation waiting for VM to have a BIOS UUID expectedConditions[0].Reason = vmwarev1.WaitingForBIOSUUIDReason - requeue, err = vmService.ReconcileNormal(ctx) - verifyOutput(ctx) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + verifyOutput(supervisorMachineContext) // Simulate VMOperator assigning an Bios UUID By("VirtualMachine has Bios UUID") @@ -254,14 +255,14 @@ var _ = Describe("VirtualMachine tests", func() { expectedVMIP = vmIP expectedState = vmwarev1.VirtualMachineStateReady - vmopVM = getReconciledVM(ctx) + vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) vmopVM.Status.BiosUUID = biosUUID - updateReconciledVM(ctx, vmopVM) + updateReconciledVM(ctx, vmService, vmopVM) // we expect the reconciliation succeeds expectedConditions[0].Status = corev1.ConditionTrue expectedConditions[0].Reason = "" - requeue, err = vmService.ReconcileNormal(ctx) - verifyOutput(ctx) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + verifyOutput(supervisorMachineContext) Expect(vmopVM.Spec.ReadinessProbe).To(BeNil()) @@ -269,7 +270,7 @@ var _ = Describe("VirtualMachine tests", func() { By("With VM Modifier") modifiedImage := "modified-image" expectedImageName = modifiedImage - ctx.VMModifiers = []vmware.VMModifier{ + supervisorMachineContext.VMModifiers = []vmware.VMModifier{ func(obj runtime.Object) (runtime.Object, error) { // No need to check the type. We know this will be a VirtualMachine vm, _ := obj.(*vmoprv1.VirtualMachine) @@ -277,8 +278,8 @@ var _ = Describe("VirtualMachine tests", func() { return vm, nil }, } - requeue, err = vmService.ReconcileNormal(ctx) - verifyOutput(ctx) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + verifyOutput(supervisorMachineContext) }) Specify("Reconcile will add a probe once the cluster reports that the control plane is ready", func() { @@ -302,7 +303,7 @@ var _ = Describe("VirtualMachine tests", func() { "value": []byte(bootstrapData), }, } - Expect(ctx.Client.Create(ctx, secret)).To(Succeed()) + Expect(vmService.Client.Create(ctx, secret)).To(Succeed()) machine.Spec.Bootstrap.DataSecretName = &secretName expectedConditions = append(expectedConditions, clusterv1.Condition{ @@ -311,37 +312,37 @@ var _ = Describe("VirtualMachine tests", func() { Reason: vmwarev1.VMProvisionStartedReason, Message: "", }) - requeue, err = vmService.ReconcileNormal(ctx) - verifyOutput(ctx) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + verifyOutput(supervisorMachineContext) // Simulate VMOperator creating a vSphere VM By("vSphere VM is created") - vmopVM = getReconciledVM(ctx) + vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) vmopVM.Status.Phase = vmoprv1.Created - updateReconciledVM(ctx, vmopVM) + updateReconciledVM(ctx, vmService, vmopVM) expectedState = vmwarev1.VirtualMachineStateCreated expectedConditions[0].Reason = vmwarev1.PoweringOnReason - requeue, err = vmService.ReconcileNormal(ctx) - verifyOutput(ctx) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + verifyOutput(supervisorMachineContext) // Simulate VMOperator powering on the VM By("VirtualMachine is powered on") - vmopVM = getReconciledVM(ctx) + vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) vmopVM.Status.PowerState = vmoprv1.VirtualMachinePoweredOn - updateReconciledVM(ctx, vmopVM) + updateReconciledVM(ctx, vmService, vmopVM) expectedState = vmwarev1.VirtualMachineStatePoweredOn expectedConditions[0].Reason = vmwarev1.WaitingForNetworkAddressReason - requeue, err = vmService.ReconcileNormal(ctx) - verifyOutput(ctx) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + verifyOutput(supervisorMachineContext) // Simulate VMOperator assigning an IP address By("VirtualMachine has an IP address") - vmopVM = getReconciledVM(ctx) + vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) vmopVM.Status.VmIp = vmIP - updateReconciledVM(ctx, vmopVM) + updateReconciledVM(ctx, vmService, vmopVM) expectedConditions[0].Reason = vmwarev1.WaitingForBIOSUUIDReason - requeue, err = vmService.ReconcileNormal(ctx) - verifyOutput(ctx) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + verifyOutput(supervisorMachineContext) // Simulate VMOperator assigning an Bios UUID By("VirtualMachine has Bios UUID") @@ -352,11 +353,11 @@ var _ = Describe("VirtualMachine tests", func() { expectedState = vmwarev1.VirtualMachineStateReady expectedConditions[0].Status = corev1.ConditionTrue expectedConditions[0].Reason = "" - vmopVM = getReconciledVM(ctx) + vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) vmopVM.Status.BiosUUID = biosUUID - updateReconciledVM(ctx, vmopVM) - requeue, err = vmService.ReconcileNormal(ctx) - verifyOutput(ctx) + updateReconciledVM(ctx, vmService, vmopVM) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + verifyOutput(supervisorMachineContext) Expect(vmopVM.Spec.ReadinessProbe).To(BeNil()) @@ -364,11 +365,11 @@ var _ = Describe("VirtualMachine tests", func() { // Set the control plane to be ready so that the new VM will have a probe cluster.Status.ControlPlaneReady = true - vmopVM = getReconciledVM(ctx) + vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) vmopVM.Status.VmIp = vmIP - updateReconciledVM(ctx, vmopVM) - requeue, err = vmService.ReconcileNormal(ctx) - verifyOutput(ctx) + updateReconciledVM(ctx, vmService, vmopVM) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + verifyOutput(supervisorMachineContext) Expect(vmopVM.Spec.ReadinessProbe.TCPSocket.Port.IntValue()).To(Equal(defaultAPIBindPort)) }) @@ -386,8 +387,8 @@ var _ = Describe("VirtualMachine tests", func() { Reason: vmwarev1.VMCreationFailedReason, Message: missingK8SVersionFailure, }) - requeue, err = vmService.ReconcileNormal(ctx) - verifyOutput(ctx) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + verifyOutput(supervisorMachineContext) }) Specify("Reconcile machine when vm prerequisites check fails", func() { @@ -401,11 +402,11 @@ var _ = Describe("VirtualMachine tests", func() { "value": []byte(bootstrapData), }, } - Expect(ctx.Client.Create(ctx, secret)).To(Succeed()) + Expect(vmService.Client.Create(ctx, secret)).To(Succeed()) machine.Spec.Bootstrap.DataSecretName = &secretName - requeue, err = vmService.ReconcileNormal(ctx) - vmopVM = getReconciledVM(ctx) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) errMessage := "TestVirtualMachineClassBinding not found" vmopVM.Status.Conditions = append(vmopVM.Status.Conditions, vmoprv1.Condition{ Type: vmoprv1.VirtualMachinePrereqReadyCondition, @@ -415,8 +416,8 @@ var _ = Describe("VirtualMachine tests", func() { Message: errMessage, }) - updateReconciledVM(ctx, vmopVM) - requeue, err = vmService.ReconcileNormal(ctx) + updateReconciledVM(ctx, vmService, vmopVM) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) expectedImageName = imageName expectReconcileError = true @@ -428,7 +429,7 @@ var _ = Describe("VirtualMachine tests", func() { Reason: vmoprv1.VirtualMachineClassBindingNotFoundReason, Message: errMessage, }) - verifyOutput(ctx) + verifyOutput(supervisorMachineContext) }) Specify("Preserve changes made by other sources", func() { @@ -442,7 +443,7 @@ var _ = Describe("VirtualMachine tests", func() { "value": []byte(bootstrapData), }, } - Expect(ctx.Client.Create(ctx, secret)).To(Succeed()) + Expect(vmService.Client.Create(ctx, secret)).To(Succeed()) machine.Spec.Bootstrap.DataSecretName = &secretName expectReconcileError = false @@ -451,8 +452,8 @@ var _ = Describe("VirtualMachine tests", func() { expectedRequeue = true By("VirtualMachine is created") - requeue, err = vmService.ReconcileNormal(ctx) - verifyOutput(ctx) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + verifyOutput(supervisorMachineContext) vmVolume := vmoprv1.VirtualMachineVolume{ Name: "test", @@ -465,14 +466,14 @@ var _ = Describe("VirtualMachine tests", func() { } By("Updating the Volumes field") - vmopVM = getReconciledVM(ctx) + vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) vmopVM.Spec.Volumes = []vmoprv1.VirtualMachineVolume{vmVolume} - updateReconciledVM(ctx, vmopVM) - requeue, err = vmService.ReconcileNormal(ctx) - verifyOutput(ctx) + updateReconciledVM(ctx, vmService, vmopVM) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + verifyOutput(supervisorMachineContext) By("Checking that the Volumes field is still set after the reconcile") - vmopVM = getReconciledVM(ctx) + vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) Expect(vmopVM.Spec.Volumes).To(HaveLen(1)) Expect(vmopVM.Spec.Volumes[0]).To(BeEquivalentTo(vmVolume)) @@ -500,11 +501,11 @@ var _ = Describe("VirtualMachine tests", func() { } By("VirtualMachine is created") - requeue, err = vmService.ReconcileNormal(ctx) - verifyOutput(ctx) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + verifyOutput(supervisorMachineContext) By("Checking that the Volumes field is set after the reconcile") - vmopVM = getReconciledVM(ctx) + vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) Expect(vmopVM.Spec.Volumes).To(HaveLen(2)) @@ -531,34 +532,34 @@ var _ = Describe("VirtualMachine tests", func() { verifyDeleteFunc := func() bool { // Our reconcile loop calls DestroyVM until it gets the answer it's looking for - _ = vmService.ReconcileDelete(ctx) - Expect(ctx.VSphereMachine).ShouldNot(BeNil()) - if ctx.VSphereMachine.Status.VMStatus == vmwarev1.VirtualMachineStateNotFound { + _ = vmService.ReconcileDelete(ctx, supervisorMachineContext) + Expect(supervisorMachineContext.VSphereMachine).ShouldNot(BeNil()) + if supervisorMachineContext.VSphereMachine.Status.VMStatus == vmwarev1.VirtualMachineStateNotFound { // If the state is NotFound, check that the VM really has gone - Expect(getReconciledVM(ctx)).Should(BeNil()) + Expect(getReconciledVM(ctx, vmService, supervisorMachineContext)).Should(BeNil()) return true } // If the state is not NotFound, it must be Deleting - Expect(ctx.VSphereMachine.Status.VMStatus).Should(Equal(vmwarev1.VirtualMachineStateDeleting)) + Expect(supervisorMachineContext.VSphereMachine.Status.VMStatus).Should(Equal(vmwarev1.VirtualMachineStateDeleting)) return false } BeforeEach(func() { - requeue, err = vmService.ReconcileNormal(ctx) + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) }) // Test expects DestroyVM to return NotFound eventually Specify("Delete VirtualMachine with no delay", func() { - Expect(getReconciledVM(ctx)).ShouldNot(BeNil()) + Expect(getReconciledVM(ctx, vmService, supervisorMachineContext)).ShouldNot(BeNil()) Eventually(verifyDeleteFunc, timeout, interval).Should(BeTrue()) }) Context("With finalizers", func() { JustBeforeEach(func() { - vmopVM := getReconciledVM(ctx) + vmopVM := getReconciledVM(ctx, vmService, supervisorMachineContext) Expect(vmopVM).ShouldNot(BeNil()) - vmopVM.Finalizers = append(ctx.VSphereMachine.Finalizers, "test-finalizer") - Expect(ctx.Client.Update(ctx, vmopVM)).To(Succeed()) + vmopVM.Finalizers = append(supervisorMachineContext.VSphereMachine.Finalizers, "test-finalizer") + Expect(vmService.Client.Update(ctx, vmopVM)).To(Succeed()) }) // Test never removes the finalizer and expects DestroyVM to never return NotFound @@ -568,14 +569,14 @@ var _ = Describe("VirtualMachine tests", func() { // Check that DestroyVM does not update VirtualMachine more than once Specify("DestroyVM does not continue to update the VirtualMachine", func() { - _ = vmService.ReconcileDelete(ctx) - vmopVM := getReconciledVM(ctx) + _ = vmService.ReconcileDelete(ctx, supervisorMachineContext) + vmopVM := getReconciledVM(ctx, vmService, supervisorMachineContext) Expect(vmopVM).ShouldNot(BeNil()) deleteTimestamp := vmopVM.GetDeletionTimestamp() Expect(deleteTimestamp).ShouldNot(BeNil()) - _ = vmService.ReconcileDelete(ctx) - vmopVM = getReconciledVM(ctx) + _ = vmService.ReconcileDelete(ctx, supervisorMachineContext) + vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) Expect(vmopVM).ShouldNot(BeNil()) Expect(vmopVM.GetDeletionTimestamp()).To(Equal(deleteTimestamp)) diff --git a/pkg/util/testutil.go b/pkg/util/testutil.go index 71a4eff57e..e3ab7c7f7e 100644 --- a/pkg/util/testutil.go +++ b/pkg/util/testutil.go @@ -179,10 +179,6 @@ func CreateMachineContext(clusterContext *vmware.ClusterContext, machine *cluste // Build the machine context. return &vmware.SupervisorMachineContext{ BaseMachineContext: &capvcontext.BaseMachineContext{ - // TODO(sbueringer): This SupervisorMachineContext is only used in tests and the Logger - // will be removed anyway once we refactor the machine controllers, so it's fine if the - // logger is not perfect. - Logger: klog.Background().WithName(vsphereMachine.Name), Machine: machine, Cluster: clusterContext.Cluster, },