From 2d74c2c142a24325aa252b5568ba58aa066a3709 Mon Sep 17 00:00:00 2001 From: Amy Chen Date: Tue, 25 Jun 2019 16:20:45 -0700 Subject: [PATCH] Add logger to actuators --- actuators/cluster.go | 17 ++++----- actuators/machine.go | 76 ++++++++++++++++++---------------------- cmd/capd-manager/main.go | 21 ++++++++--- go.mod | 3 +- logger/logger.go | 16 +++++++++ 5 files changed, 76 insertions(+), 57 deletions(-) create mode 100644 logger/logger.go diff --git a/actuators/cluster.go b/actuators/cluster.go index e87f64e..9f92eb3 100644 --- a/actuators/cluster.go +++ b/actuators/cluster.go @@ -17,39 +17,34 @@ limitations under the License. package actuators import ( - "fmt" - + "github.com/go-logr/logr" "sigs.k8s.io/cluster-api-provider-docker/kind/actions" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) // Cluster defines a cluster actuator object type Cluster struct { -} - -// NewClusterActuator returns a new cluster actuator object -func NewClusterActuator() *Cluster { - return &Cluster{} + Log logr.Logger } // Reconcile setups an external load balancer for the cluster if needed func (c *Cluster) Reconcile(cluster *clusterv1.Cluster) error { elb, err := getExternalLoadBalancerNode(cluster.Name) if err != nil { - fmt.Printf("%+v\n", err) + c.Log.Error(err, "Error getting external load balancer node") return err } if elb != nil { - fmt.Println("External Load Balancer already exists. Nothing to do for this cluster.") + c.Log.Info("External Load Balancer already exists. Nothing to do for this cluster.") return nil } - fmt.Printf("The cluster named %q has been created! Setting up some infrastructure.\n", cluster.Name) + c.Log.Info("Cluster has been created! Setting up some infrastructure", "cluster-name", cluster.Name) _, err = actions.SetUpLoadBalancer(cluster.Name) return err } // Delete can be used to delete a cluster func (c *Cluster) Delete(cluster *clusterv1.Cluster) error { - fmt.Println("Cluster delete is not implemented.") + c.Log.Info("Cluster delete is not implemented.") return nil } diff --git a/actuators/machine.go b/actuators/machine.go index 08aa6c9..e94b639 100644 --- a/actuators/machine.go +++ b/actuators/machine.go @@ -22,6 +22,7 @@ import ( "fmt" "time" + "github.com/go-logr/logr" apicorev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -45,51 +46,44 @@ const ( type Machine struct { Core corev1.CoreV1Interface ClusterAPI v1alpha1.ClusterV1alpha1Interface -} - -// NewMachineActuator returns a new machine actuator object -func NewMachineActuator(clusterapi v1alpha1.ClusterV1alpha1Interface, core corev1.CoreV1Interface) *Machine { - return &Machine{ - Core: core, - ClusterAPI: clusterapi, - } + Log logr.Logger } // Create creates a machine for a given cluster // Note: have to print all the errors because cluster-api swallows them func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clusterv1.Machine) error { old := machine.DeepCopy() - fmt.Printf("Creating a machine for cluster %q\n", c.Name) + m.Log.Info("Creating a machine for cluster", "cluster-name", c.Name) clusterExists, err := cluster.IsKnown(c.Name) if err != nil { - fmt.Printf("%+v", err) + m.Log.Error(err, "Error finding cluster-name", "cluster", c.Name) return err } // If there's no cluster, requeue the request until there is one if !clusterExists { - fmt.Println("There is no cluster yet, waiting for a cluster before creating machines") + m.Log.Info("There is no cluster yet, waiting for a cluster before creating machines") return &capierror.RequeueAfterError{RequeueAfter: 30 * time.Second} } controlPlanes, err := actions.ListControlPlanes(c.Name) if err != nil { - fmt.Printf("%+v\n", err) + m.Log.Error(err, "Error listing control planes") return err } - fmt.Printf("Is there a cluster? %v\n", clusterExists) + m.Log.Info("Is there a cluster?", "cluster-exists", clusterExists) setValue := getRole(machine) - fmt.Printf("This node has a role of %q\n", setValue) + m.Log.Info("This node has a role", "role", setValue) if setValue == clusterAPIControlPlaneSetLabel { if len(controlPlanes) > 0 { - fmt.Println("Adding a control plane") + m.Log.Info("Adding a control plane") controlPlaneNode, err := actions.AddControlPlane(c.Name, machine.GetName(), machine.Spec.Versions.ControlPlane) if err != nil { - fmt.Printf("%+v", err) + m.Log.Error(err, "Error adding control plane") return err } nodeUID, err := actions.GetNodeRefUID(c.GetName(), controlPlaneNode.Name()) if err != nil { - fmt.Printf("%+v", err) + m.Log.Error(err, "Error getting node reference UID") return err } providerID := providerID(controlPlaneNode.Name()) @@ -97,42 +91,42 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu return m.save(old, machine, getNodeRef(controlPlaneNode.Name(), nodeUID)) } - fmt.Println("Creating a brand new cluster") + m.Log.Info("Creating a brand new cluster") elb, err := getExternalLoadBalancerNode(c.Name) if err != nil { - fmt.Printf("%+v\n", err) + m.Log.Error(err, "Error getting external load balancer node") return err } lbip, err := elb.IP() if err != nil { - fmt.Printf("%+v\n", err) + m.Log.Error(err, "Error getting node IP address") return err } controlPlaneNode, err := actions.CreateControlPlane(c.Name, machine.GetName(), lbip, machine.Spec.Versions.ControlPlane) if err != nil { - fmt.Printf("%+v\n", err) + m.Log.Error(err, "Error creating control plane") return err } nodeUID, err := actions.GetNodeRefUID(c.GetName(), controlPlaneNode.Name()) if err != nil { - fmt.Printf("%+v", err) + m.Log.Error(err, "Error getting node reference UID") return err } // set the machine's providerID providerID := providerID(controlPlaneNode.Name()) machine.Spec.ProviderID = &providerID if err := m.save(old, machine, getNodeRef(controlPlaneNode.Name(), nodeUID)); err != nil { - fmt.Printf("%+v\n", err) + m.Log.Error(err, "Error setting machine's provider ID") return err } s, err := kubeconfigToSecret(c.Name, c.Namespace) if err != nil { - fmt.Printf("%+v\n", err) + m.Log.Error(err, "Error converting kubeconfig to a secret") return err } // Save the secret to the management cluster if _, err := m.Core.Secrets(machine.GetNamespace()).Create(s); err != nil { - fmt.Printf("%+v\n", err) + m.Log.Error(err, "Error saving secret to management cluster") return err } return nil @@ -140,21 +134,21 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu // If there are no control plane then we should hold off on joining workers if len(controlPlanes) == 0 { - fmt.Printf("Sending machine %q back since there is no cluster to join\n", machine.Name) + m.Log.Info("Sending machine back since there is no cluster to join", "machine", machine.Name) return &capierror.RequeueAfterError{RequeueAfter: 30 * time.Second} } - fmt.Println("Creating a new worker node") + m.Log.Info("Creating a new worker node") worker, err := actions.AddWorker(c.Name, machine.GetName(), machine.Spec.Versions.Kubelet) if err != nil { - fmt.Printf("%+v", err) + m.Log.Error(err, "Error creating new worker node") return err } providerID := providerID(worker.Name()) machine.Spec.ProviderID = &providerID nodeUID, err := actions.GetNodeRefUID(c.GetName(), worker.Name()) if err != nil { - fmt.Printf("%+v", err) + m.Log.Error(err, "Error getting node reference ID") return err } return m.save(old, machine, getNodeRef(worker.Name(), nodeUID)) @@ -169,10 +163,10 @@ func (m *Machine) Delete(ctx context.Context, cluster *clusterv1.Cluster, machin if exists { setValue := getRole(machine) if setValue == clusterAPIControlPlaneSetLabel { - fmt.Printf("Deleting a control plane: %q\n", machine.GetName()) + m.Log.Info("Deleting a control plane", "machine", machine.GetName()) return actions.DeleteControlPlane(cluster.Name, machine.GetName()) } - fmt.Printf("Deleting a worker: %q\n", machine.GetName()) + m.Log.Info("Deleting a worker", "machine", machine.GetName()) return actions.DeleteWorker(cluster.Name, machine.GetName()) } return nil @@ -180,7 +174,7 @@ func (m *Machine) Delete(ctx context.Context, cluster *clusterv1.Cluster, machin // Update updates a machine func (m *Machine) Update(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { - fmt.Println("Update machine is not implemented yet.") + m.Log.Info("Update machine is not implemented yet") return nil } @@ -197,41 +191,41 @@ func (m *Machine) Exists(ctx context.Context, cluster *clusterv1.Cluster, machin fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, cluster.Name), fmt.Sprintf("name=^%s$", machine.GetName()), } - fmt.Printf("using labels: %v\n", labels) + m.Log.Info("using labels", "labels", labels) nodeList, err := nodes.List(labels...) if err != nil { return false, err } - fmt.Printf("found nodes: %v\n", nodeList) + m.Log.Info("found nodes", "nodes", nodeList) return len(nodeList) >= 1, nil } // patches the object and saves the status. func (m *Machine) save(oldMachine, newMachine *clusterv1.Machine, noderef *apicorev1.ObjectReference) error { - fmt.Println("updating machine") + m.Log.Info("updating machine") p, err := patch.NewJSONPatch(oldMachine, newMachine) if err != nil { - fmt.Printf("%+v\n", err) + m.Log.Error(err, "Error updating machine") return err } - fmt.Println("Patches for machine", p) + m.Log.Info("Patches for machine", "patches", p) if len(p) != 0 { pb, err := json.MarshalIndent(p, "", " ") if err != nil { - fmt.Printf("%+v\n", err) + m.Log.Error(err, "Error marshalling machine") return err } newMachine, err = m.ClusterAPI.Machines(oldMachine.Namespace).Patch(newMachine.Name, types.JSONPatchType, pb) if err != nil { - fmt.Printf("%+v\n", err) + m.Log.Error(err, "Error patching machine") return err } - fmt.Println("updated machine") + m.Log.Info("updated machine") } // set the noderef after so we don't try and patch it in during the first update newMachine.Status.NodeRef = noderef if _, err := m.ClusterAPI.Machines(oldMachine.Namespace).UpdateStatus(newMachine); err != nil { - fmt.Printf("%+v\n", err) + m.Log.Error(err, "Error setting node reference") return err } return nil diff --git a/cmd/capd-manager/main.go b/cmd/capd-manager/main.go index de37527..f015a6e 100644 --- a/cmd/capd-manager/main.go +++ b/cmd/capd-manager/main.go @@ -21,7 +21,9 @@ import ( "time" "k8s.io/client-go/kubernetes" + "k8s.io/klog/klogr" "sigs.k8s.io/cluster-api-provider-docker/actuators" + "sigs.k8s.io/cluster-api-provider-docker/logger" "sigs.k8s.io/cluster-api/pkg/apis" "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" @@ -57,8 +59,19 @@ func main() { panic(err) } - clusterActuator := actuators.NewClusterActuator() - machineActuator := actuators.NewMachineActuator(cs.ClusterV1alpha1(), k8sclientset.CoreV1()) + clusterLogger := logger.Log{} + clusterLogger.Logger = klogr.New().WithName("[cluster-actuator]") + clusterActuator := actuators.Cluster{ + Log: clusterLogger, + } + + machineLogger := logger.Log{} + machineLogger.Logger = klogr.New().WithName("[machine-actuator]") + machineActuator := actuators.Machine{ + Core: k8sclientset.CoreV1(), + ClusterAPI: cs.ClusterV1alpha1(), + Log: machineLogger, + } // Register our cluster deployer (the interface is in clusterctl and we define the Deployer interface on the actuator) common.RegisterClusterProvisioner("docker", clusterActuator) @@ -66,10 +79,10 @@ func main() { panic(err) } - if err := capimachine.AddWithActuator(mgr, machineActuator); err != nil { + if err := capimachine.AddWithActuator(mgr, &machineActuator); err != nil { panic(err) } - if err := capicluster.AddWithActuator(mgr, clusterActuator); err != nil { + if err := capicluster.AddWithActuator(mgr, &clusterActuator); err != nil { panic(err) } fmt.Println("starting the controller...!") diff --git a/go.mod b/go.mod index 24ceaab..b41ff9a 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.12 require ( github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 // indirect - github.com/go-logr/logr v0.1.0 // indirect + github.com/go-logr/logr v0.1.0 github.com/go-logr/zapr v0.1.1 // indirect github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect github.com/google/btree v1.0.0 // indirect @@ -25,6 +25,7 @@ require ( k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d k8s.io/client-go v11.0.0+incompatible k8s.io/cluster-bootstrap v0.0.0-20181213155137-5f9271efc2e7 // indirect + k8s.io/klog v0.3.0 k8s.io/kubernetes v1.13.1 sigs.k8s.io/cluster-api v0.0.0-20190607141803-aacb0c613ffb sigs.k8s.io/controller-runtime v0.1.10 diff --git a/logger/logger.go b/logger/logger.go new file mode 100644 index 0000000..4661e41 --- /dev/null +++ b/logger/logger.go @@ -0,0 +1,16 @@ +package logger + +import ( + "fmt" + + "github.com/go-logr/logr" +) + +// KLogger is a wrapper for klogr +type Log struct { + logr.Logger +} + +func (k Log) Error(err error, msg string, keysAndValues ...interface{}) { + k.Logger.Error(err, msg, "stacktrace", fmt.Sprintf("%+v", err), keysAndValues) +}