Skip to content
This repository has been archived by the owner on Sep 24, 2021. It is now read-only.

Commit

Permalink
Merge pull request #2 from chuckha/updates
Browse files Browse the repository at this point in the history
Several updates
  • Loading branch information
k8s-ci-robot authored Jun 21, 2019
2 parents d81278a + bfe1e2a commit 6372508
Show file tree
Hide file tree
Showing 7 changed files with 191 additions and 78 deletions.
5 changes: 1 addition & 4 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
# Cluster API Provider Docker

A temporary home for CAPD

## Manager Container Image

A sample is built and hosted at `gcr.io/kubernetes1-226021/capd-manager:latest`
Expand Down Expand Up @@ -31,7 +29,7 @@ docker build -t my-repository/capd-manager:latest .

# Testing out CAPD

Tested on: Linux, OS X
Tested on: Linux, works ok on OS X sometimes

Requirements: `kind` > 0.3.0 and `kubectl`

Expand Down Expand Up @@ -66,4 +64,3 @@ The kubeconfig is on the management cluster in secrets. Grab it and write it to
`kubectl get secrets -o jsonpath='{.data.kubeconfig}' kubeconfig-my-cluster | base64 --decode > ~/.kube/kind-config-my-cluster`

`kubectl get po --all-namespaces --kubeconfig ~/.kube/kind-config-my-cluster`

90 changes: 66 additions & 24 deletions actuators/machine.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"time"

"github.com/chuckha/cluster-api-provider-docker/kind/actions"
apicorev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
Expand Down Expand Up @@ -78,14 +79,19 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu
if setValue == clusterAPIControlPlaneSetLabel {
if len(controlPlanes) > 0 {
fmt.Println("Adding a control plane")
controlPlaneNode, err := actions.AddControlPlane(c.Name, machine.Spec.Versions.ControlPlane)
controlPlaneNode, err := actions.AddControlPlane(c.Name, machine.GetName(), machine.Spec.Versions.ControlPlane)
if err != nil {
fmt.Printf("%+v", err)
return err
}
name := providerID(controlPlaneNode.Name())
machine.Spec.ProviderID = &name
return m.save(old, machine)
nodeUID, err := actions.GetNodeRefUID(c.GetName(), controlPlaneNode.Name())
if err != nil {
fmt.Printf("%+v", err)
return err
}
providerID := providerID(controlPlaneNode.Name())
machine.Spec.ProviderID = &providerID
return m.save(old, machine, getNodeRef(controlPlaneNode.Name(), nodeUID))
}

fmt.Println("Creating a brand new cluster")
Expand All @@ -99,16 +105,20 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu
fmt.Printf("%+v\n", err)
return err
}
controlPlaneNode, err := actions.CreateControlPlane(c.Name, lbip, machine.Spec.Versions.ControlPlane)
controlPlaneNode, err := actions.CreateControlPlane(c.Name, machine.GetName(), lbip, machine.Spec.Versions.ControlPlane)
if err != nil {
fmt.Printf("%+v\n", err)
return err
}

nodeUID, err := actions.GetNodeRefUID(c.GetName(), controlPlaneNode.Name())
if err != nil {
fmt.Printf("%+v", err)
return err
}
// set the machine's providerID
name := providerID(controlPlaneNode.Name())
machine.Spec.ProviderID = &name
if err := m.save(old, machine); err != nil {
providerID := providerID(controlPlaneNode.Name())
machine.Spec.ProviderID = &providerID
if err := m.save(old, machine, getNodeRef(controlPlaneNode.Name(), nodeUID)); err != nil {
fmt.Printf("%+v\n", err)
return err
}
Expand All @@ -132,18 +142,37 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu
}

fmt.Println("Creating a new worker node")
worker, err := actions.AddWorker(c.Name, machine.Spec.Versions.Kubelet)
worker, err := actions.AddWorker(c.Name, machine.GetName(), machine.Spec.Versions.Kubelet)
if err != nil {
fmt.Printf("%+v", err)
return err
}
name := providerID(worker.Name())
machine.Spec.ProviderID = &name
return m.save(old, machine)
providerID := providerID(worker.Name())
machine.Spec.ProviderID = &providerID
nodeUID, err := actions.GetNodeRefUID(c.GetName(), worker.Name())
if err != nil {
fmt.Printf("%+v", err)
return err
}
return m.save(old, machine, getNodeRef(worker.Name(), nodeUID))
}

// Delete returns nil when the machine no longer exists or when a successful delete has happened.
func (m *Machine) Delete(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error {
return actions.DeleteNode(cluster.Name, providerNameToLookupID(*machine.Spec.ProviderID))
exists, err := m.Exists(ctx, cluster, machine)
if err != nil {
return err
}
if exists {
setValue := getRole(machine)
if setValue == clusterAPIControlPlaneSetLabel {
fmt.Printf("Deleting a control plane: %q\n", machine.GetName())
return actions.DeleteControlPlane(cluster.Name, machine.GetName())
}
fmt.Printf("Deleting a worker: %q\n", machine.GetName())
return actions.DeleteWorker(cluster.Name, machine.GetName())
}
return nil
}

func (m *Machine) Update(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error {
Expand All @@ -152,16 +181,16 @@ func (m *Machine) Update(ctx context.Context, cluster *clusterv1.Cluster, machin
}

func (m *Machine) Exists(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) (bool, error) {
if machine.Spec.ProviderID == nil {
return false, nil
if machine.Spec.ProviderID != nil {
return true, nil
}
fmt.Println("Looking for a docker container named", providerNameToLookupID(*machine.Spec.ProviderID))

role := getRole(machine)
kindRole := CAPIroleToKindRole(role)
labels := []string{
fmt.Sprintf("label=%s=%s", constants.NodeRoleKey, kindRole),
fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, cluster.Name),
fmt.Sprintf("name=^%s$", providerNameToLookupID(*machine.Spec.ProviderID)),
fmt.Sprintf("name=^%s$", machine.GetName()),
}
fmt.Printf("using labels: %v\n", labels)
nodeList, err := nodes.List(labels...)
Expand All @@ -172,7 +201,8 @@ func (m *Machine) Exists(ctx context.Context, cluster *clusterv1.Cluster, machin
return len(nodeList) >= 1, nil
}

func (m *Machine) save(old, new *clusterv1.Machine) error {
// patches the object and saves the status.
func (m *Machine) save(old, new *clusterv1.Machine, noderef *apicorev1.ObjectReference) error {
fmt.Println("updating machine")
p, err := patch.NewJSONPatch(old, new)
if err != nil {
Expand All @@ -186,19 +216,22 @@ func (m *Machine) save(old, new *clusterv1.Machine) error {
fmt.Printf("%+v\n", err)
return err
}
if _, err := m.ClusterAPI.Machines(old.Namespace).Patch(new.Name, types.JSONPatchType, pb); err != nil {
new, err = m.ClusterAPI.Machines(old.Namespace).Patch(new.Name, types.JSONPatchType, pb)
if err != nil {
fmt.Printf("%+v\n", err)
return err
}
fmt.Println("updated machine")
}
// set the noderef after so we don't try and patch it in during the first update
new.Status.NodeRef = noderef
if _, err := m.ClusterAPI.Machines(old.Namespace).UpdateStatus(new); err != nil {
fmt.Printf("%+v\n", err)
return err
}
return nil
}

func providerNameToLookupID(providerName string) string {
return providerName[len("docker://"):]
}

func providerID(name string) string {
return fmt.Sprintf("docker://%s", name)
}
Expand All @@ -210,3 +243,12 @@ func CAPIroleToKindRole(CAPIRole string) string {
}
return CAPIRole
}

func getNodeRef(name, uid string) *apicorev1.ObjectReference {
return &apicorev1.ObjectReference{
Kind: "Node",
APIVersion: apicorev1.SchemeGroupVersion.String(),
Name: name,
UID: types.UID(uid),
}
}
8 changes: 4 additions & 4 deletions cmd/capdctl/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ import (
"sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
)

// TODO: Generate the RBAC stuff from somewhere instead of copy pasta

const (
// Important to keep this consistent.
controlPlaneSet = "controlplane"
Expand All @@ -53,7 +55,7 @@ func main() {

capd := flag.NewFlagSet("capd", flag.ExitOnError)
capdImage := capd.String("capd-image", "gcr.io/kubernetes1-226021/capd-manager:latest", "The capd manager image to run")
capiImage := capd.String("capi-image", "gcr.io/k8s-cluster-api/cluster-api-controller:0.1.1", "The capi manager image to run")
capiImage := capd.String("capi-image", "gcr.io/k8s-cluster-api/cluster-api-controller:0.1.3", "The capi manager image to run")

controlPlane := flag.NewFlagSet("control-plane", flag.ExitOnError)
controlPlaneOpts := new(machineOptions)
Expand Down Expand Up @@ -122,7 +124,6 @@ subcommands are:
cluster - Write a capd cluster object to stdout
example: capdctl cluster -cluster-name my-cluster -namespace my-namespace | kubectl apply -f -
`
}

Expand Down Expand Up @@ -153,14 +154,13 @@ func machineYAML(opts *machineOptions) string {
Namespace: *opts.namespace,
Labels: map[string]string{
"cluster.k8s.io/cluster-name": *opts.clusterName,
"set": *opts.set,
"set": *opts.set,
},
},
Spec: v1alpha1.MachineSpec{
ProviderSpec: v1alpha1.ProviderSpec{},
},
}
// TODO: 🤔
if *opts.set == controlPlaneSet {
machine.Spec.Versions.ControlPlane = *opts.version
}
Expand Down
6 changes: 3 additions & 3 deletions cmd/kind-test/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,11 +48,11 @@ func main() {
if err != nil {
panic(fmt.Sprintf("%+v", err))
}
if _, err := actions.CreateControlPlane(clusterName, ip, version); err != nil {
if _, err := actions.CreateControlPlane(clusterName, inputs[1], ip, version); err != nil {
panic(fmt.Sprintf("%+v", err))
}
case "add-worker":
if _, err := actions.AddWorker(clusterName, version); err != nil {
if _, err := actions.AddWorker(clusterName, inputs[1], version); err != nil {
panic(fmt.Sprintf("%+v", err))
}
case "delete-node":
Expand All @@ -65,7 +65,7 @@ func main() {
panic(fmt.Sprintf("%+v", err))
}
case "add-control-plane":
if _, err := actions.AddControlPlane(clusterName, version); err != nil {
if _, err := actions.AddControlPlane(clusterName, inputs[1], version); err != nil {
panic(fmt.Sprintf("%+v", err))
}
case "set-cluster-name":
Expand Down
76 changes: 69 additions & 7 deletions kind/actions/cluster_actions.go
Original file line number Diff line number Diff line change
Expand Up @@ -243,10 +243,10 @@ func KubeadmJoin(clusterName string, node *nodes.Node) error {
return nil
}

func SetNodeRef(clusterName, nodeName string) error {
func SetNodeProviderRef(clusterName, nodeName string) error {
allNodes, err := nodes.List(fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName))
if err != nil {
return nil
return err
}

node, err := nodes.BootstrapControlPlaneNode(allNodes)
Expand Down Expand Up @@ -274,28 +274,90 @@ func SetNodeRef(clusterName, nodeName string) error {
return nil
}

func RemoveNode(clusterName, nodeName string) error {
func GetNodeRefUID(clusterName, nodeName string) (string, error) {
// k get nodes my-cluster-worker -o custom-columns=UID:.metadata.uid --no-headers
allNodes, err := nodes.List(fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName))
if err != nil {
return nil
return "", err
}

node, err := nodes.BootstrapControlPlaneNode(allNodes)
if err != nil {
return err
return "", err
}

patch := fmt.Sprintf(`{"spec": {"providerID": "docker://%s"}}`, nodeName)
fmt.Println("trying to apply:", patch)
cmd := node.Command(
"kubectl",
"--kubeconfig", "/etc/kubernetes/admin.conf",
"delete",
"get",
"node", nodeName,
"--output=custom-columns=UID:.metadata.uid",
"--no-headers",
)
lines, err := exec.CombinedOutputLines(cmd)
if err != nil {
for _, line := range lines {
fmt.Println(line)
}
return errors.Wrap(err, "failed to remove node from cluster")
return "", errors.Wrap(err, "failed get node ref UID")
}
return strings.TrimSpace(lines[0]), nil
}

// DeleteClusterNode will remove the kubernetes node from the list of nodes (during a kubectl get nodes).
func DeleteClusterNode(clusterName, nodeName string) error {
// get all control plane nodes
allControlPlanes, err := nodes.List(
fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName),
fmt.Sprintf("label=%s=%s", constants.NodeRoleKey, constants.ControlPlaneNodeRoleValue),
)
if err != nil {
return err
}
var node nodes.Node
// pick one that doesn't match the node name we are trying to delete
for _, n := range allControlPlanes {
if n.Name() != nodeName {
node = n
break
}
}
cmd := node.Command(
"kubectl",
"--kubeconfig", "/etc/kubernetes/admin.conf",
"delete", "node", nodeName,
)
lines, err := exec.CombinedOutputLines(cmd)
if err != nil {
for _, line := range lines {
fmt.Println(line)
}
return errors.Wrap(err, "failed to delete cluster node")
}
return nil
}

// KubeadmReset will run `kubeadm reset` on the control plane to remove.
func KubeadmReset(clusterName, nodeName string) error {
nodeList, err := nodes.List(
fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName),
fmt.Sprintf("label=%s=%s", constants.NodeRoleKey, constants.ControlPlaneNodeRoleValue),
fmt.Sprintf("name=^%s$", nodeName),
)
if len(nodeList) < 1 {
return errors.Errorf("could nto find node %q", nodeName)
}
node := nodeList[0]

cmd := node.Command("kubeadm", "reset", "--force")
lines, err := exec.CombinedOutputLines(cmd)
if err != nil {
for _, line := range lines {
fmt.Println(line)
}
return errors.Wrap(err, "failed to reset node")
}

return nil
Expand Down
Loading

0 comments on commit 6372508

Please sign in to comment.