Skip to content
This repository has been archived by the owner on Sep 24, 2021. It is now read-only.

Add logger to actuators #27

Merged
merged 1 commit into from
Jul 2, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 6 additions & 11 deletions actuators/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,39 +17,34 @@ limitations under the License.
package actuators

import (
"fmt"
amy marked this conversation as resolved.
Show resolved Hide resolved

"github.com/go-logr/logr"
"sigs.k8s.io/cluster-api-provider-docker/kind/actions"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
)

// Cluster defines a cluster actuator object
type Cluster struct {
}

// NewClusterActuator returns a new cluster actuator object
func NewClusterActuator() *Cluster {
amy marked this conversation as resolved.
Show resolved Hide resolved
return &Cluster{}
Log logr.Logger
}

// Reconcile setups an external load balancer for the cluster if needed
func (c *Cluster) Reconcile(cluster *clusterv1.Cluster) error {
elb, err := getExternalLoadBalancerNode(cluster.Name)
if err != nil {
fmt.Printf("%+v\n", err)
c.Log.Error(err, "Error getting external load balancer node")
amy marked this conversation as resolved.
Show resolved Hide resolved
return err
}
if elb != nil {
fmt.Println("External Load Balancer already exists. Nothing to do for this cluster.")
c.Log.Info("External Load Balancer already exists. Nothing to do for this cluster.")
return nil
}
fmt.Printf("The cluster named %q has been created! Setting up some infrastructure.\n", cluster.Name)
c.Log.Info("Cluster has been created! Setting up some infrastructure", "cluster-name", cluster.Name)
_, err = actions.SetUpLoadBalancer(cluster.Name)
return err
}

// Delete can be used to delete a cluster
func (c *Cluster) Delete(cluster *clusterv1.Cluster) error {
fmt.Println("Cluster delete is not implemented.")
c.Log.Info("Cluster delete is not implemented.")
return nil
}
76 changes: 35 additions & 41 deletions actuators/machine.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"fmt"
"time"

"github.com/go-logr/logr"
apicorev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
Expand All @@ -45,116 +46,109 @@ const (
type Machine struct {
Core corev1.CoreV1Interface
ClusterAPI v1alpha1.ClusterV1alpha1Interface
}

// NewMachineActuator returns a new machine actuator object
func NewMachineActuator(clusterapi v1alpha1.ClusterV1alpha1Interface, core corev1.CoreV1Interface) *Machine {
return &Machine{
Core: core,
ClusterAPI: clusterapi,
}
Log logr.Logger
}

// Create creates a machine for a given cluster
// Note: have to print all the errors because cluster-api swallows them
func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clusterv1.Machine) error {
old := machine.DeepCopy()
fmt.Printf("Creating a machine for cluster %q\n", c.Name)
m.Log.Info("Creating a machine for cluster", "cluster-name", c.Name)
clusterExists, err := cluster.IsKnown(c.Name)
if err != nil {
fmt.Printf("%+v", err)
m.Log.Error(err, "Error finding cluster-name", "cluster", c.Name)
return err
}
// If there's no cluster, requeue the request until there is one
if !clusterExists {
fmt.Println("There is no cluster yet, waiting for a cluster before creating machines")
m.Log.Info("There is no cluster yet, waiting for a cluster before creating machines")
return &capierror.RequeueAfterError{RequeueAfter: 30 * time.Second}
}

controlPlanes, err := actions.ListControlPlanes(c.Name)
if err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error listing control planes")
return err
}
fmt.Printf("Is there a cluster? %v\n", clusterExists)
m.Log.Info("Is there a cluster?", "cluster-exists", clusterExists)
setValue := getRole(machine)
fmt.Printf("This node has a role of %q\n", setValue)
m.Log.Info("This node has a role", "role", setValue)
if setValue == clusterAPIControlPlaneSetLabel {
if len(controlPlanes) > 0 {
fmt.Println("Adding a control plane")
m.Log.Info("Adding a control plane")
controlPlaneNode, err := actions.AddControlPlane(c.Name, machine.GetName(), machine.Spec.Versions.ControlPlane)
if err != nil {
fmt.Printf("%+v", err)
m.Log.Error(err, "Error adding control plane")
return err
}
nodeUID, err := actions.GetNodeRefUID(c.GetName(), controlPlaneNode.Name())
if err != nil {
fmt.Printf("%+v", err)
m.Log.Error(err, "Error getting node reference UID")
return err
}
providerID := providerID(controlPlaneNode.Name())
machine.Spec.ProviderID = &providerID
return m.save(old, machine, getNodeRef(controlPlaneNode.Name(), nodeUID))
}

fmt.Println("Creating a brand new cluster")
m.Log.Info("Creating a brand new cluster")
elb, err := getExternalLoadBalancerNode(c.Name)
if err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error getting external load balancer node")
return err
}
lbip, err := elb.IP()
if err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error getting node IP address")
return err
}
controlPlaneNode, err := actions.CreateControlPlane(c.Name, machine.GetName(), lbip, machine.Spec.Versions.ControlPlane)
if err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error creating control plane")
return err
}
nodeUID, err := actions.GetNodeRefUID(c.GetName(), controlPlaneNode.Name())
if err != nil {
fmt.Printf("%+v", err)
m.Log.Error(err, "Error getting node reference UID")
return err
}
// set the machine's providerID
providerID := providerID(controlPlaneNode.Name())
machine.Spec.ProviderID = &providerID
if err := m.save(old, machine, getNodeRef(controlPlaneNode.Name(), nodeUID)); err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error setting machine's provider ID")
return err
}
s, err := kubeconfigToSecret(c.Name, c.Namespace)
if err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error converting kubeconfig to a secret")
return err
}
// Save the secret to the management cluster
if _, err := m.Core.Secrets(machine.GetNamespace()).Create(s); err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error saving secret to management cluster")
return err
}
return nil
}

// If there are no control plane then we should hold off on joining workers
if len(controlPlanes) == 0 {
fmt.Printf("Sending machine %q back since there is no cluster to join\n", machine.Name)
m.Log.Info("Sending machine back since there is no cluster to join", "machine", machine.Name)
return &capierror.RequeueAfterError{RequeueAfter: 30 * time.Second}
}

fmt.Println("Creating a new worker node")
m.Log.Info("Creating a new worker node")
worker, err := actions.AddWorker(c.Name, machine.GetName(), machine.Spec.Versions.Kubelet)
if err != nil {
fmt.Printf("%+v", err)
m.Log.Error(err, "Error creating new worker node")
return err
}
providerID := providerID(worker.Name())
machine.Spec.ProviderID = &providerID
nodeUID, err := actions.GetNodeRefUID(c.GetName(), worker.Name())
if err != nil {
fmt.Printf("%+v", err)
m.Log.Error(err, "Error getting node reference ID")
return err
}
return m.save(old, machine, getNodeRef(worker.Name(), nodeUID))
Expand All @@ -169,18 +163,18 @@ func (m *Machine) Delete(ctx context.Context, cluster *clusterv1.Cluster, machin
if exists {
setValue := getRole(machine)
if setValue == clusterAPIControlPlaneSetLabel {
fmt.Printf("Deleting a control plane: %q\n", machine.GetName())
m.Log.Info("Deleting a control plane", "machine", machine.GetName())
return actions.DeleteControlPlane(cluster.Name, machine.GetName())
}
fmt.Printf("Deleting a worker: %q\n", machine.GetName())
m.Log.Info("Deleting a worker", "machine", machine.GetName())
return actions.DeleteWorker(cluster.Name, machine.GetName())
}
return nil
}

// Update updates a machine
func (m *Machine) Update(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error {
fmt.Println("Update machine is not implemented yet.")
m.Log.Info("Update machine is not implemented yet")
return nil
}

Expand All @@ -197,41 +191,41 @@ func (m *Machine) Exists(ctx context.Context, cluster *clusterv1.Cluster, machin
fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, cluster.Name),
fmt.Sprintf("name=^%s$", machine.GetName()),
}
fmt.Printf("using labels: %v\n", labels)
m.Log.Info("using labels", "labels", labels)
nodeList, err := nodes.List(labels...)
if err != nil {
return false, err
}
fmt.Printf("found nodes: %v\n", nodeList)
m.Log.Info("found nodes", "nodes", nodeList)
return len(nodeList) >= 1, nil
}

// patches the object and saves the status.
func (m *Machine) save(oldMachine, newMachine *clusterv1.Machine, noderef *apicorev1.ObjectReference) error {
fmt.Println("updating machine")
m.Log.Info("updating machine")
p, err := patch.NewJSONPatch(oldMachine, newMachine)
if err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error updating machine")
return err
}
fmt.Println("Patches for machine", p)
m.Log.Info("Patches for machine", "patches", p)
if len(p) != 0 {
pb, err := json.MarshalIndent(p, "", " ")
if err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error marshalling machine")
return err
}
newMachine, err = m.ClusterAPI.Machines(oldMachine.Namespace).Patch(newMachine.Name, types.JSONPatchType, pb)
if err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error patching machine")
return err
}
fmt.Println("updated machine")
m.Log.Info("updated machine")
}
// set the noderef after so we don't try and patch it in during the first update
newMachine.Status.NodeRef = noderef
if _, err := m.ClusterAPI.Machines(oldMachine.Namespace).UpdateStatus(newMachine); err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error setting node reference")
return err
}
return nil
Expand Down
21 changes: 17 additions & 4 deletions cmd/capd-manager/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,9 @@ import (
"time"

"k8s.io/client-go/kubernetes"
"k8s.io/klog/klogr"
"sigs.k8s.io/cluster-api-provider-docker/actuators"
"sigs.k8s.io/cluster-api-provider-docker/logger"
"sigs.k8s.io/cluster-api/pkg/apis"
"sigs.k8s.io/cluster-api/pkg/apis/cluster/common"
"sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset"
Expand Down Expand Up @@ -57,19 +59,30 @@ func main() {
panic(err)
}

clusterActuator := actuators.NewClusterActuator()
machineActuator := actuators.NewMachineActuator(cs.ClusterV1alpha1(), k8sclientset.CoreV1())
clusterLogger := logger.Log{}
clusterLogger.Logger = klogr.New().WithName("[cluster-actuator]")
clusterActuator := actuators.Cluster{
Log: clusterLogger,
}

machineLogger := logger.Log{}
machineLogger.Logger = klogr.New().WithName("[machine-actuator]")
machineActuator := actuators.Machine{
Core: k8sclientset.CoreV1(),
ClusterAPI: cs.ClusterV1alpha1(),
Log: machineLogger,
}

// Register our cluster deployer (the interface is in clusterctl and we define the Deployer interface on the actuator)
common.RegisterClusterProvisioner("docker", clusterActuator)
if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
panic(err)
}

if err := capimachine.AddWithActuator(mgr, machineActuator); err != nil {
if err := capimachine.AddWithActuator(mgr, &machineActuator); err != nil {
panic(err)
}
if err := capicluster.AddWithActuator(mgr, clusterActuator); err != nil {
if err := capicluster.AddWithActuator(mgr, &clusterActuator); err != nil {
panic(err)
}
fmt.Println("starting the controller...!")
Expand Down
3 changes: 2 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ go 1.12

require (
github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 // indirect
github.com/go-logr/logr v0.1.0 // indirect
github.com/go-logr/logr v0.1.0
github.com/go-logr/zapr v0.1.1 // indirect
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect
github.com/google/btree v1.0.0 // indirect
Expand All @@ -25,6 +25,7 @@ require (
k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d
k8s.io/client-go v11.0.0+incompatible
k8s.io/cluster-bootstrap v0.0.0-20181213155137-5f9271efc2e7 // indirect
k8s.io/klog v0.3.0
chuckha marked this conversation as resolved.
Show resolved Hide resolved
k8s.io/kubernetes v1.13.1
sigs.k8s.io/cluster-api v0.0.0-20190607141803-aacb0c613ffb
sigs.k8s.io/controller-runtime v0.1.10
Expand Down
32 changes: 32 additions & 0 deletions logger/logger.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
/*
Copyright 2019 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package logger
amy marked this conversation as resolved.
Show resolved Hide resolved

import (
"fmt"

"github.com/go-logr/logr"
)

// Log is a wrapper to add a stacktrace to the Error message
type Log struct {
logr.Logger
}

func (k Log) Error(err error, msg string, keysAndValues ...interface{}) {
k.Logger.Error(err, msg, "stacktrace", fmt.Sprintf("%+v", err), keysAndValues)
}