Skip to content
This repository has been archived by the owner on Sep 24, 2021. It is now read-only.

Commit

Permalink
Add logger to actuators
Browse files Browse the repository at this point in the history
  • Loading branch information
Amy Chen committed Jun 26, 2019
1 parent e9226bc commit 2592843
Show file tree
Hide file tree
Showing 3 changed files with 43 additions and 53 deletions.
17 changes: 6 additions & 11 deletions actuators/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,39 +17,34 @@ limitations under the License.
package actuators

import (
"fmt"

"github.com/go-logr/logr"
"sigs.k8s.io/cluster-api-provider-docker/kind/actions"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
)

// Cluster defines a cluster actuator object
type Cluster struct {
}

// NewClusterActuator returns a new cluster actuator object
func NewClusterActuator() *Cluster {
return &Cluster{}
Log logr.Logger
}

// Reconcile setups an external load balancer for the cluster if needed
func (c *Cluster) Reconcile(cluster *clusterv1.Cluster) error {
elb, err := getExternalLoadBalancerNode(cluster.Name)
if err != nil {
fmt.Printf("%+v\n", err)
c.Log.Error(err, "Error getting external load balancer node")
return err
}
if elb != nil {
fmt.Println("External Load Balancer already exists. Nothing to do for this cluster.")
c.Log.Info("External Load Balancer already exists. Nothing to do for this cluster.")
return nil
}
fmt.Printf("The cluster named %q has been created! Setting up some infrastructure.\n", cluster.Name)
c.Log.Info("Cluster has been created! Setting up some infrastructure", "cluster-name", cluster.Name)
_, err = actions.SetUpLoadBalancer(cluster.Name)
return err
}

// Delete can be used to delete a cluster
func (c *Cluster) Delete(cluster *clusterv1.Cluster) error {
fmt.Println("Cluster delete is not implemented.")
c.Log.Info("Cluster delete is not implemented.")
return nil
}
76 changes: 35 additions & 41 deletions actuators/machine.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"fmt"
"time"

"github.com/go-logr/logr"
apicorev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
Expand All @@ -45,116 +46,109 @@ const (
type Machine struct {
Core corev1.CoreV1Interface
ClusterAPI v1alpha1.ClusterV1alpha1Interface
}

// NewMachineActuator returns a new machine actuator object
func NewMachineActuator(clusterapi v1alpha1.ClusterV1alpha1Interface, core corev1.CoreV1Interface) *Machine {
return &Machine{
Core: core,
ClusterAPI: clusterapi,
}
Log logr.Logger
}

// Create creates a machine for a given cluster
// Note: have to print all the errors because cluster-api swallows them
func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clusterv1.Machine) error {
old := machine.DeepCopy()
fmt.Printf("Creating a machine for cluster %q\n", c.Name)
m.Log.Info("Creating a machine for cluster", "cluster-name", c.Name)
clusterExists, err := cluster.IsKnown(c.Name)
if err != nil {
fmt.Printf("%+v", err)
m.Log.Error(err, "Error finding cluster", "cluster", c.Name)
return err
}
// If there's no cluster, requeue the request until there is one
if !clusterExists {
fmt.Println("There is no cluster yet, waiting for a cluster before creating machines")
m.Log.Info("There is no cluster yet, waiting for a cluster before creating machines")
return &capierror.RequeueAfterError{RequeueAfter: 30 * time.Second}
}

controlPlanes, err := actions.ListControlPlanes(c.Name)
if err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error listing control planes")
return err
}
fmt.Printf("Is there a cluster? %v\n", clusterExists)
m.Log.Info("Is there a cluster?", "cluster-exists", clusterExists)
setValue := getRole(machine)
fmt.Printf("This node has a role of %q\n", setValue)
m.Log.Info("This node has a role", "role", setValue)
if setValue == clusterAPIControlPlaneSetLabel {
if len(controlPlanes) > 0 {
fmt.Println("Adding a control plane")
m.Log.Info("Adding a control plane")
controlPlaneNode, err := actions.AddControlPlane(c.Name, machine.GetName(), machine.Spec.Versions.ControlPlane)
if err != nil {
fmt.Printf("%+v", err)
m.Log.Error(err, "Error adding control plane")
return err
}
nodeUID, err := actions.GetNodeRefUID(c.GetName(), controlPlaneNode.Name())
if err != nil {
fmt.Printf("%+v", err)
m.Log.Error(err, "Error getting node reference UID")
return err
}
providerID := providerID(controlPlaneNode.Name())
machine.Spec.ProviderID = &providerID
return m.save(old, machine, getNodeRef(controlPlaneNode.Name(), nodeUID))
}

fmt.Println("Creating a brand new cluster")
m.Log.Info("Creating a brand new cluster")
elb, err := getExternalLoadBalancerNode(c.Name)
if err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error getting external load balancer node")
return err
}
lbip, err := elb.IP()
if err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error getting node IP address")
return err
}
controlPlaneNode, err := actions.CreateControlPlane(c.Name, machine.GetName(), lbip, machine.Spec.Versions.ControlPlane)
if err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error creating control plane")
return err
}
nodeUID, err := actions.GetNodeRefUID(c.GetName(), controlPlaneNode.Name())
if err != nil {
fmt.Printf("%+v", err)
m.Log.Error(err, "Error getting node reference UID")
return err
}
// set the machine's providerID
providerID := providerID(controlPlaneNode.Name())
machine.Spec.ProviderID = &providerID
if err := m.save(old, machine, getNodeRef(controlPlaneNode.Name(), nodeUID)); err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error setting machine's provider ID")
return err
}
s, err := kubeconfigToSecret(c.Name, c.Namespace)
if err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error converting kubeconfig to a secret")
return err
}
// Save the secret to the management cluster
if _, err := m.Core.Secrets(machine.GetNamespace()).Create(s); err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error saving secret to management cluster")
return err
}
return nil
}

// If there are no control plane then we should hold off on joining workers
if len(controlPlanes) == 0 {
fmt.Printf("Sending machine %q back since there is no cluster to join\n", machine.Name)
m.Log.Info("Sending machine back since there is no cluster to join", "machine", machine.Name)
return &capierror.RequeueAfterError{RequeueAfter: 30 * time.Second}
}

fmt.Println("Creating a new worker node")
m.Log.Info("Creating a new worker node")
worker, err := actions.AddWorker(c.Name, machine.GetName(), machine.Spec.Versions.Kubelet)
if err != nil {
fmt.Printf("%+v", err)
m.Log.Error(err, "Error creating new work node")
return err
}
providerID := providerID(worker.Name())
machine.Spec.ProviderID = &providerID
nodeUID, err := actions.GetNodeRefUID(c.GetName(), worker.Name())
if err != nil {
fmt.Printf("%+v", err)
m.Log.Error(err, "Error getting node reference ID")
return err
}
return m.save(old, machine, getNodeRef(worker.Name(), nodeUID))
Expand All @@ -169,18 +163,18 @@ func (m *Machine) Delete(ctx context.Context, cluster *clusterv1.Cluster, machin
if exists {
setValue := getRole(machine)
if setValue == clusterAPIControlPlaneSetLabel {
fmt.Printf("Deleting a control plane: %q\n", machine.GetName())
m.Log.Info("Deleting a control plane", "machine", machine.GetName())
return actions.DeleteControlPlane(cluster.Name, machine.GetName())
}
fmt.Printf("Deleting a worker: %q\n", machine.GetName())
m.Log.Info("Deleting a worker", "machine", machine.GetName())
return actions.DeleteWorker(cluster.Name, machine.GetName())
}
return nil
}

// Update updates a machine
func (m *Machine) Update(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error {
fmt.Println("Update machine is not implemented yet.")
m.Log.Info("Update machine is not implemented yet")
return nil
}

Expand All @@ -197,41 +191,41 @@ func (m *Machine) Exists(ctx context.Context, cluster *clusterv1.Cluster, machin
fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, cluster.Name),
fmt.Sprintf("name=^%s$", machine.GetName()),
}
fmt.Printf("using labels: %v\n", labels)
m.Log.Info("using labels", "labels", labels)
nodeList, err := nodes.List(labels...)
if err != nil {
return false, err
}
fmt.Printf("found nodes: %v\n", nodeList)
m.Log.Info("found nodes", "nodes", nodeList)
return len(nodeList) >= 1, nil
}

// patches the object and saves the status.
func (m *Machine) save(old, new *clusterv1.Machine, noderef *apicorev1.ObjectReference) error {
fmt.Println("updating machine")
m.Log.Info("updating machine")
p, err := patch.NewJSONPatch(old, new)
if err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error updating machine")
return err
}
fmt.Println("Patches for machine", p)
m.Log.Info("Patches for machine", "patches", p)
if len(p) != 0 {
pb, err := json.MarshalIndent(p, "", " ")
if err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error marshalling machine")
return err
}
new, err = m.ClusterAPI.Machines(old.Namespace).Patch(new.Name, types.JSONPatchType, pb)
if err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error patching machine")
return err
}
fmt.Println("updated machine")
m.Log.Info("updated machine")
}
// set the noderef after so we don't try and patch it in during the first update
new.Status.NodeRef = noderef
if _, err := m.ClusterAPI.Machines(old.Namespace).UpdateStatus(new); err != nil {
fmt.Printf("%+v\n", err)
m.Log.Error(err, "Error setting node reference")
return err
}
return nil
Expand Down
3 changes: 2 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ go 1.12

require (
github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 // indirect
github.com/go-logr/logr v0.1.0 // indirect
github.com/go-logr/logr v0.1.0
github.com/go-logr/zapr v0.1.1 // indirect
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect
github.com/google/btree v1.0.0 // indirect
Expand All @@ -25,6 +25,7 @@ require (
k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d
k8s.io/client-go v11.0.0+incompatible
k8s.io/cluster-bootstrap v0.0.0-20181213155137-5f9271efc2e7 // indirect
k8s.io/klog v0.3.0
k8s.io/kubernetes v1.13.1
sigs.k8s.io/cluster-api v0.0.0-20190607141803-aacb0c613ffb
sigs.k8s.io/controller-runtime v0.1.10
Expand Down

0 comments on commit 2592843

Please sign in to comment.