Skip to content
This repository has been archived by the owner on Sep 24, 2021. It is now read-only.

Commit

Permalink
Merge pull request #62 from chuckha/dl-crds
Browse files Browse the repository at this point in the history
All the fixes for cluster api v0.1.4
k8s-ci-robot authored Jul 5, 2019

Verified

This commit was signed with the committer’s verified signature.
chuckha Chuck Ha
2 parents 87507b3 + c29fe4e commit 7a0f729
Showing 7 changed files with 146 additions and 1,170 deletions.
8 changes: 0 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
@@ -55,14 +55,6 @@ Make sure you have `kubectl`.

`export KUBECONFIG="${HOME}/.kube/kind-config-management"`

1. Install the cluster-api CRDs

`capdctl crds | kubectl apply -f -`

1. Run the capd & capi manager

`capdctl capd -capd-image=<YOUR_REGISTRY>/capd-manager:latest | kubectl apply -f -`

### Create a worker cluster

`kubectl apply -f examples/simple-cluster.yaml`
19 changes: 18 additions & 1 deletion actuators/actuators.go
Original file line number Diff line number Diff line change
@@ -17,6 +17,7 @@ limitations under the License.
package actuators

import (
"bytes"
"fmt"
"io/ioutil"

@@ -71,6 +72,22 @@ func kubeconfigToSecret(clusterName, namespace string) (*v1.Secret, error) {
return nil, errors.WithStack(err)
}

allNodes, err := nodes.List(fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName))
if err != nil {
return nil, errors.WithStack(err)
}

// This is necessary so the management cluster in a container can talk to another container.
// They share the same bridged network and the load balancer does respond on 6443 at its docker IP
// however, the *HOST* is listening on some random port (the one returned from the GetLoadBalancerHostAndPort).
lbip, _, err := actions.GetLoadBalancerHostAndPort(allNodes)
lines := bytes.Split(data, []byte("\n"))
for i, line := range lines {
if bytes.Contains(line, []byte("https://")) {
lines[i] = []byte(fmt.Sprintf(" server: https://%s:%d", lbip, 6443))
}
}

// write it to a secret
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
@@ -80,7 +97,7 @@ func kubeconfigToSecret(clusterName, namespace string) (*v1.Secret, error) {
},
Data: map[string][]byte{
// TODO pull in constant from cluster api
"value": data,
"value": bytes.Join(lines, []byte("\n")),
},
}, nil
}
49 changes: 7 additions & 42 deletions actuators/machine.go
Original file line number Diff line number Diff line change
@@ -23,7 +23,6 @@ import (
"time"

"github.com/go-logr/logr"
apicorev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"sigs.k8s.io/cluster-api-provider-docker/kind/actions"
@@ -81,14 +80,9 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu
m.Log.Error(err, "Error adding control plane")
return err
}
nodeUID, err := actions.GetNodeRefUID(c.GetName(), controlPlaneNode.Name())
if err != nil {
m.Log.Error(err, "Error getting node reference UID")
return err
}
providerID := providerID(controlPlaneNode.Name())
providerID := actions.ProviderID(controlPlaneNode.Name())
machine.Spec.ProviderID = &providerID
return m.save(old, machine, getNodeRef(controlPlaneNode.Name(), nodeUID))
return m.save(old, machine)
}

m.Log.Info("Creating a brand new cluster")
@@ -107,15 +101,10 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu
m.Log.Error(err, "Error creating control plane")
return err
}
nodeUID, err := actions.GetNodeRefUID(c.GetName(), controlPlaneNode.Name())
if err != nil {
m.Log.Error(err, "Error getting node reference UID")
return err
}
// set the machine's providerID
providerID := providerID(controlPlaneNode.Name())
providerID := actions.ProviderID(controlPlaneNode.Name())
machine.Spec.ProviderID = &providerID
if err := m.save(old, machine, getNodeRef(controlPlaneNode.Name(), nodeUID)); err != nil {
if err := m.save(old, machine); err != nil {
m.Log.Error(err, "Error setting machine's provider ID")
return err
}
@@ -144,14 +133,9 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu
m.Log.Error(err, "Error creating new worker node")
return err
}
providerID := providerID(worker.Name())
providerID := actions.ProviderID(worker.Name())
machine.Spec.ProviderID = &providerID
nodeUID, err := actions.GetNodeRefUID(c.GetName(), worker.Name())
if err != nil {
m.Log.Error(err, "Error getting node reference ID")
return err
}
return m.save(old, machine, getNodeRef(worker.Name(), nodeUID))
return m.save(old, machine)
}

// Delete returns nil when the machine no longer exists or when a successful delete has happened.
@@ -201,7 +185,7 @@ func (m *Machine) Exists(ctx context.Context, cluster *clusterv1.Cluster, machin
}

// patches the object and saves the status.
func (m *Machine) save(oldMachine, newMachine *clusterv1.Machine, noderef *apicorev1.ObjectReference) error {
func (m *Machine) save(oldMachine, newMachine *clusterv1.Machine) error {
m.Log.Info("updating machine")
p, err := patch.NewJSONPatch(oldMachine, newMachine)
if err != nil {
@@ -222,19 +206,9 @@ func (m *Machine) save(oldMachine, newMachine *clusterv1.Machine, noderef *apico
}
m.Log.Info("updated machine")
}
// set the noderef after so we don't try and patch it in during the first update
newMachine.Status.NodeRef = noderef
if _, err := m.ClusterAPI.Machines(oldMachine.Namespace).UpdateStatus(newMachine); err != nil {
m.Log.Error(err, "Error setting node reference")
return err
}
return nil
}

func providerID(name string) string {
return fmt.Sprintf("docker:////%s", name)
}

// CAPIroleToKindRole converts a CAPI role to kind role
// TODO there is a better way to do this.
func CAPIroleToKindRole(CAPIRole string) string {
@@ -243,12 +217,3 @@ func CAPIroleToKindRole(CAPIRole string) string {
}
return CAPIRole
}

func getNodeRef(name, uid string) *apicorev1.ObjectReference {
return &apicorev1.ObjectReference{
Kind: "Node",
APIVersion: apicorev1.SchemeGroupVersion.String(),
Name: name,
UID: types.UID(uid),
}
}
5 changes: 5 additions & 0 deletions cmd/capd-manager/main.go
Original file line number Diff line number Diff line change
@@ -17,6 +17,7 @@ limitations under the License.
package main

import (
"flag"
"fmt"
"time"

@@ -35,6 +36,9 @@ import (
)

func main() {
flag.Set("v", "0")
flag.Parse()

cfg, err := config.GetConfig()
if err != nil {
panic(err)
@@ -67,6 +71,7 @@ func main() {

machineLogger := logger.Log{}
machineLogger.Logger = klogr.New().WithName("[machine-actuator]")

machineActuator := actuators.Machine{
Core: k8sclientset.CoreV1(),
ClusterAPI: cs.ClusterV1alpha1(),
Loading

0 comments on commit 7a0f729

Please sign in to comment.