Skip to content

Commit

Permalink
migrating loadbalancer.go and openstackmachine_controller.go to struc…
Browse files Browse the repository at this point in the history
…tured logging
  • Loading branch information
Ana Runova committed Aug 8, 2023
1 parent 434cfe2 commit 6915c37
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 12 deletions.
10 changes: 5 additions & 5 deletions controllers/openstackmachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -359,12 +359,12 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope

switch instanceStatus.State() {
case infrav1.InstanceStateActive:
scope.Logger().Info("Machine instance state is ACTIVE", "instance-id", instanceStatus.ID())
scope.Logger().Info("Machine instance state is ACTIVE", "id", instanceStatus.ID())
conditions.MarkTrue(openStackMachine, infrav1.InstanceReadyCondition)
openStackMachine.Status.Ready = true
case infrav1.InstanceStateError:
// Error is unexpected, thus we report error and never retry
scope.Logger().Info("Machine instance state is ERROR", "instance-id", instanceStatus.ID())
scope.Logger().Info("Machine instance state is ERROR", "id", instanceStatus.ID())
err = fmt.Errorf("instance state %q is unexpected", instanceStatus.State())
openStackMachine.SetFailure(capierrors.UpdateMachineError, err)
conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceStateErrorReason, clusterv1.ConditionSeverityError, "")
Expand All @@ -377,7 +377,7 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope
default:
// The other state is normal (for example, migrating, shutoff) but we don't want to proceed until it's ACTIVE
// due to potential conflict or unexpected actions
scope.Logger().Info("Waiting for instance to become ACTIVE", "instance-id", instanceStatus.ID(), "status", instanceStatus.State())
scope.Logger().Info("Waiting for instance to become ACTIVE", "id", instanceStatus.ID(), "status", instanceStatus.State())
conditions.MarkUnknown(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, "Instance state is not handled: %s", instanceStatus.State())
return ctrl.Result{RequeueAfter: waitForInstanceBecomeActiveToReconcile}, nil
}
Expand Down Expand Up @@ -410,7 +410,7 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope
}

if fp.PortID != "" {
scope.Logger().Info("Floating IP already associated to a port:", "id", fp.ID, "fixed ip", fp.FixedIP, "portID", port.ID)
scope.Logger().Info("Floating IP already associated to a port", "id", fp.ID, "fixedIP", fp.FixedIP, "portID", port.ID)
} else {
err = networkingService.AssociateFloatingIP(openStackMachine, fp, port.ID)
if err != nil {
Expand All @@ -433,7 +433,7 @@ func (r *OpenStackMachineReconciler) getOrCreate(logger logr.Logger, cluster *cl

if instanceStatus == nil {
instanceSpec := machineToInstanceSpec(openStackCluster, machine, openStackMachine, userData)
logger.Info("Machine not exist, Creating Machine", "Machine", openStackMachine.Name)
logger.Info("Machine does not exist, creating Machine", "name", openStackMachine.Name)
instanceStatus, err = computeService.CreateInstance(openStackMachine, openStackCluster, instanceSpec, cluster.Name, false)
if err != nil {
conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceCreateFailedReason, clusterv1.ConditionSeverityError, err.Error())
Expand Down
14 changes: 7 additions & 7 deletions pkg/cloud/services/loadbalancer/loadbalancer.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ func (s *Service) getOrCreateLoadBalancer(openStackCluster *infrav1.OpenStackClu
return lb, nil
}

s.scope.Logger().Info(fmt.Sprintf("Creating load balancer in subnet: %q", subnetID), "name", loadBalancerName)
s.scope.Logger().Info("Creating load balancer in subnet", "subnetID", subnetID, "name", loadBalancerName)

lbCreateOpts := loadbalancers.CreateOpts{
Name: loadBalancerName,
Expand Down Expand Up @@ -199,7 +199,7 @@ func (s *Service) getOrCreateListener(openStackCluster *infrav1.OpenStackCluster
return listener, nil
}

s.scope.Logger().Info("Creating load balancer listener", "name", listenerName, "lb-id", lbID)
s.scope.Logger().Info("Creating load balancer listener", "name", listenerName, "loadBalancerID", lbID)

listenerCreateOpts := listeners.CreateOpts{
Name: listenerName,
Expand Down Expand Up @@ -267,7 +267,7 @@ func (s *Service) getOrUpdateAllowedCIDRS(openStackCluster *infrav1.OpenStackClu
listener.AllowedCIDRs = capostrings.Unique(listener.AllowedCIDRs)

if !reflect.DeepEqual(allowedCIDRs, listener.AllowedCIDRs) {
s.scope.Logger().Info("CIDRs do not match, start to update listener", "expected CIDRs", allowedCIDRs, "load balancer existing CIDR", listener.AllowedCIDRs)
s.scope.Logger().Info("CIDRs do not match, updating listener", "expectedCIDRs", allowedCIDRs, "currentCIDRs", listener.AllowedCIDRs)
listenerUpdateOpts := listeners.UpdateOpts{
AllowedCIDRs: &allowedCIDRs,
}
Expand Down Expand Up @@ -316,7 +316,7 @@ func (s *Service) getOrCreatePool(openStackCluster *infrav1.OpenStackCluster, po
return pool, nil
}

s.scope.Logger().Info(fmt.Sprintf("Creating load balancer pool for listener %q", listenerID), "name", poolName, "lb-id", lbID)
s.scope.Logger().Info("Creating load balancer pool for listener", "loadBalancerID", lbID, "listenerID", listenerID, "name", poolName)

method := pools.LBMethodRoundRobin

Expand Down Expand Up @@ -356,7 +356,7 @@ func (s *Service) getOrCreateMonitor(openStackCluster *infrav1.OpenStackCluster,
return nil
}

s.scope.Logger().Info(fmt.Sprintf("Creating load balancer monitor for pool %q", poolID), "name", monitorName, "lb-id", lbID)
s.scope.Logger().Info("Creating load balancer monitor for pool", "loadBalancerID", lbID, "name", monitorName, "poolID", poolID)

monitorCreateOpts := monitors.CreateOpts{
Name: monitorName,
Expand Down Expand Up @@ -400,7 +400,7 @@ func (s *Service) ReconcileLoadBalancerMember(openStackCluster *infrav1.OpenStac
}

loadBalancerName := getLoadBalancerName(clusterName)
s.scope.Logger().Info("Reconciling load balancer member", "name", loadBalancerName)
s.scope.Logger().Info("Reconciling load balancer member", "loadBalancerName", loadBalancerName)

lbID := openStackCluster.Status.APIServerLoadBalancer.ID
portList := []int{int(openStackCluster.Spec.ControlPlaneEndpoint.Port)}
Expand Down Expand Up @@ -429,7 +429,7 @@ func (s *Service) ReconcileLoadBalancerMember(openStackCluster *infrav1.OpenStac
continue
}

s.scope.Logger().Info("Deleting load balancer member (because the IP of the machine changed)", "name", name)
s.scope.Logger().Info("Deleting load balancer member because the IP of the machine changed", "name", name)

// lb member changed so let's delete it so we can create it again with the correct IP
err = s.waitForLoadBalancerActive(lbID)
Expand Down

0 comments on commit 6915c37

Please sign in to comment.