diff --git a/cmd/capd-manager/main.go b/cmd/capd-manager/main.go index 36e668e..4189bf2 100644 --- a/cmd/capd-manager/main.go +++ b/cmd/capd-manager/main.go @@ -17,10 +17,11 @@ limitations under the License. package main import ( - "fmt" + "flag" "time" "k8s.io/client-go/kubernetes" + "k8s.io/klog" "k8s.io/klog/klogr" "sigs.k8s.io/cluster-api-provider-docker/actuators" "sigs.k8s.io/cluster-api-provider-docker/logger" @@ -35,6 +36,9 @@ import ( ) func main() { + klog.InitFlags(flag.CommandLine) + flag.Parse() + cfg, err := config.GetConfig() if err != nil { panic(err) @@ -86,7 +90,8 @@ func main() { if err := capicluster.AddWithActuator(mgr, &clusterActuator); err != nil { panic(err) } - fmt.Println("starting the controller...!") + + klogr.New().Info("Starting the controller") if err := mgr.Start(signals.SetupSignalHandler()); err != nil { panic(err) diff --git a/cmd/capdctl/main.go b/cmd/capdctl/main.go index 51db1e8..6a8b282 100644 --- a/cmd/capdctl/main.go +++ b/cmd/capdctl/main.go @@ -17,26 +17,18 @@ limitations under the License. package main import ( - "archive/tar" - "bytes" - "compress/gzip" + "context" "encoding/json" "flag" "fmt" - "io" - "io/ioutil" - "net/http" "os" - "strings" - "time" - - "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/cluster-api-provider-docker/kind/actions" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - "sigs.k8s.io/kind/pkg/cluster/nodes" - "sigs.k8s.io/kind/pkg/container/cri" - "sigs.k8s.io/kind/pkg/exec" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog" + "sigs.k8s.io/cluster-api-provider-docker/kind/controlplane" + "sigs.k8s.io/cluster-api-provider-docker/objects" + crclient "sigs.k8s.io/controller-runtime/pkg/client" ) // TODO: Generate the RBAC stuff from somewhere instead of copy pasta @@ -58,12 +50,12 @@ func (mo *machineOptions) initFlags(fs *flag.FlagSet) { mo.version = fs.String("version", "v1.14.2", "The Kubernetes version to run") } -type machineDeyploymentOptions struct { +type machineDeploymentOptions struct { name, namespace, clusterName, kubeletVersion *string replicas *int } -func (mo *machineDeyploymentOptions) initFlags(fs *flag.FlagSet) { +func (mo *machineDeploymentOptions) initFlags(fs *flag.FlagSet) { mo.name = fs.String("name", "my-machine-deployment", "The name of the machine deployment") mo.namespace = fs.String("namespace", "my-namespace", "The namespace of the machine deployment") mo.clusterName = fs.String("cluster-name", "my-cluster", "The name of the cluster the machine deployment creates machines for") @@ -93,9 +85,12 @@ func main() { clusterNamespace := cluster.String("namespace", "my-namespace", "The namespace the cluster belongs to") machineDeployment := flag.NewFlagSet("machine-deployment", flag.ExitOnError) - machineDeploymentOpts := new(machineDeyploymentOptions) + machineDeploymentOpts := new(machineDeploymentOptions) machineDeploymentOpts.initFlags(machineDeployment) + kflags := flag.NewFlagSet("klog", flag.ExitOnError) + klog.InitFlags(kflags) + if len(os.Args) < 2 { fmt.Println("At least one subcommand is requied.") fmt.Println(usage()) @@ -106,6 +101,9 @@ func main() { case "setup": setup.Parse(os.Args[2:]) makeManagementCluster(*managementClusterName, *version, *capdImage, *capiImage) + case "apply": + kflags.Parse(os.Args[2:]) + applyControlPlane(*managementClusterName, *version, *capiImage, *capdImage) case "control-plane": controlPlane.Parse(os.Args[2:]) fmt.Fprintf(os.Stdout, machineYAML(controlPlaneOpts)) @@ -124,6 +122,8 @@ func main() { fmt.Println(usage()) os.Exit(1) } + + klog.Flush() } func usage() string { @@ -154,90 +154,24 @@ subcommands are: ` } -func clusterYAML(name, namespace string) string { - return fmt.Sprintf(`apiVersion: "cluster.k8s.io/v1alpha1" -kind: Cluster -metadata: - name: %s - namespace: %s -spec: - clusterNetwork: - services: - cidrBlocks: ["10.96.0.0/12"] - pods: - cidrBlocks: ["192.168.0.0/16"] - serviceDomain: "cluster.local" - providerSpec: {}`, name, namespace) +func clusterYAML(clusterName, namespace string) string { + cluster := objects.GetCluster(clusterName, namespace) + return marshal(&cluster) } -func machineDeploymentYAML(opts *machineDeyploymentOptions) string { - replicas := int32(*opts.replicas) - labels := map[string]string{ - "cluster.k8s.io/cluster-name": *opts.clusterName, - "set": "node", - } - deployment := v1alpha1.MachineDeployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "MachineDeployment", - APIVersion: "cluster.k8s.io/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: *opts.name, - Namespace: *opts.namespace, - Labels: labels, - }, - Spec: v1alpha1.MachineDeploymentSpec{ - Replicas: &replicas, - Selector: metav1.LabelSelector{ - MatchLabels: labels, - }, - Template: v1alpha1.MachineTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: v1alpha1.MachineSpec{ - ProviderSpec: v1alpha1.ProviderSpec{}, - Versions: v1alpha1.MachineVersionInfo{ - Kubelet: *opts.kubeletVersion, - }, - }, - }, - }, - } +func machineYAML(opts *machineOptions) string { + machine := objects.GetMachine(*opts.name, *opts.namespace, *opts.clusterName, *opts.set, *opts.version) + return marshal(&machine) +} + +func machineDeploymentYAML(opts *machineDeploymentOptions) string { + machineDeploy := objects.GetMachineDeployment(*opts.name, *opts.namespace, *opts.clusterName, *opts.kubeletVersion, int32(*opts.replicas)) + return marshal(&machineDeploy) - b, err := json.Marshal(deployment) - // TODO don't panic on the error - if err != nil { - panic(err) - } - return string(b) } -func machineYAML(opts *machineOptions) string { - machine := v1alpha1.Machine{ - TypeMeta: metav1.TypeMeta{ - Kind: "Machine", - APIVersion: "cluster.k8s.io/v1alpha1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: *opts.name, - Namespace: *opts.namespace, - Labels: map[string]string{ - "cluster.k8s.io/cluster-name": *opts.clusterName, - "set": *opts.set, - }, - }, - Spec: v1alpha1.MachineSpec{ - ProviderSpec: v1alpha1.ProviderSpec{}, - }, - } - if *opts.set == controlPlaneSet { - machine.Spec.Versions.ControlPlane = *opts.version - } - if *opts.set == "worker" { - machine.Spec.Versions.Kubelet = *opts.version - } - b, err := json.Marshal(machine) +func marshal(obj runtime.Object) string { + b, err := json.Marshal(obj) // TODO don't panic on the error if err != nil { panic(err) @@ -251,254 +185,42 @@ func makeManagementCluster(clusterName, capiVersion, capdImage, capiImageOverrid if capiImageOverride != "" { capiImage = capiImageOverride } - elb, err := actions.SetUpLoadBalancer(clusterName) - if err != nil { - panic(err) - } - lbipv4, _, err := elb.IP() - if err != nil { - panic(err) - } - cpMounts := []cri.Mount{ - { - ContainerPath: "/var/run/docker.sock", - HostPath: "/var/run/docker.sock", - }, - } - cp, err := actions.CreateControlPlane(clusterName, fmt.Sprintf("%s-control-plane", clusterName), lbipv4, "v1.14.2", cpMounts) - if err != nil { - panic(err) - } - if !nodes.WaitForReady(cp, time.Now().Add(5*time.Minute)) { - panic(errors.New("control plane was not ready in 5 minutes")) - } - f, err := ioutil.TempFile("", "crds") - if err != nil { + + if err := controlplane.CreateKindCluster(capiImage, clusterName); err != nil { panic(err) } - defer os.Remove(f.Name()) + + applyControlPlane(clusterName, capiVersion, capiImage, capdImage) +} + +func applyControlPlane(clusterName, capiVersion, capiImage, capdImage string) { fmt.Println("Downloading the latest CRDs for CAPI version", capiVersion) - crds, err := getCRDs(capiVersion, capiImage) + objects, err := objects.GetManegementCluster(capiVersion, capiImage, capdImage) if err != nil { panic(err) } - fmt.Fprintln(f, crds) - fmt.Fprintln(f, "---") - fmt.Fprintln(f, capdRBAC) - fmt.Fprintln(f, "---") - fmt.Fprintln(f, getCAPDPlane(capdImage)) - fmt.Println("Applying the control plane", f.Name()) - cmd := exec.Command("kubectl", "apply", "-f", f.Name()) - cmd.SetEnv(fmt.Sprintf("KUBECONFIG=%s/.kube/kind-config-%s", os.Getenv("HOME"), clusterName)) - cmd.SetStdout(os.Stdout) - cmd.SetStderr(os.Stderr) - if err := cmd.Run(); err != nil { - out, _ := ioutil.ReadFile(f.Name()) - fmt.Println(out) - panic(err) - } -} -func getCAPDPlane(capdImage string) string { - return fmt.Sprintf(capiPlane, capdImage) -} - -var capiPlane = ` -apiVersion: v1 -kind: Namespace -metadata: - labels: - controller-tools.k8s.io: "1.0" - name: docker-provider-system ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - control-plane: controller-manager - name: docker-provider-controller-manager - namespace: docker-provider-system -spec: - selector: - matchLabels: - control-plane: controller-manager - serviceName: docker-provider-controller-manager-service - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - name: capd-manager - image: %s - command: - - capd-manager - volumeMounts: - - mountPath: /var/run/docker.sock - name: dockersock - - mountPath: /var/lib/docker - name: dockerlib - securityContext: - privileged: true - volumes: - - name: dockersock - hostPath: - path: /var/run/docker.sock - type: Socket - - name: dockerlib - hostPath: - path: /var/lib/docker - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - key: node.alpha.kubernetes.io/notReady - operator: Exists - - effect: NoExecute - key: node.alpha.kubernetes.io/unreachable - operator: Exists -` + fmt.Println("Applying the control plane") -// getCRDs should actually use kustomize to correctly build the manager yaml. -// HACK: this is a hacked function -func getCRDs(version, capiImage string) (string, error) { - crds := []string{"crds", "rbac", "manager"} - releaseCode := fmt.Sprintf("https://github.com/kubernetes-sigs/cluster-api/archive/%s.tar.gz", version) - - resp, err := http.Get(releaseCode) + cfg, err := controlplane.GetKubeconfig(clusterName) if err != nil { - return "", errors.WithStack(err) + panic(err) } - gz, err := gzip.NewReader(resp.Body) + client, err := crclient.New(cfg, crclient.Options{}) if err != nil { - return "", errors.WithStack(err) + panic(err) } - tgz := tar.NewReader(gz) - var buf bytes.Buffer - - for { - header, err := tgz.Next() - - if err == io.EOF { - break - } - + for _, obj := range objects { + accessor, err := meta.Accessor(obj) if err != nil { - return "", errors.WithStack(err) + panic(err) } + fmt.Printf("creating %q %q\n", obj.GetObjectKind().GroupVersionKind().String(), accessor.GetName()) - switch header.Typeflag { - case tar.TypeDir: - continue - case tar.TypeReg: - for _, crd := range crds { - // Skip the kustomization files for now. Would like to use kustomize in future - if strings.HasSuffix(header.Name, "kustomization.yaml") { - continue - } - - // This is a poor person's kustomize - if strings.HasSuffix(header.Name, "manager.yaml") { - var managerBuf bytes.Buffer - io.Copy(&managerBuf, tgz) - lines := strings.Split(managerBuf.String(), "\n") - for _, line := range lines { - if strings.Contains(line, "image:") { - buf.WriteString(strings.Replace(line, "image: controller:latest", fmt.Sprintf("image: %s", capiImage), 1)) - buf.WriteString("\n") - continue - } - buf.WriteString(line) - buf.WriteString("\n") - } - } - - // These files don't need kustomize at all. - if strings.Contains(header.Name, fmt.Sprintf("config/%s/", crd)) { - io.Copy(&buf, tgz) - fmt.Fprintln(&buf, "---") - } - } + if client.Create(context.Background(), obj); err != nil { + panic(err) } } - return buf.String(), nil } - -var capdRBAC = `apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: docker-provider-manager-role -rules: -- apiGroups: - - cluster.k8s.io - resources: - - clusters - - clusters/status - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - cluster.k8s.io - resources: - - machines - - machines/status - - machinedeployments - - machinedeployments/status - - machinesets - - machinesets/status - - machineclasses - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - cluster.k8s.io - resources: - - clusters - - clusters/status - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - nodes - - events - - secrets - verbs: - - get - - list - - watch - - create - - update - - patch - - delete ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - creationTimestamp: null - name: docker-provider-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: docker-provider-manager-role -subjects: -- kind: ServiceAccount - name: default - namespace: docker-provider-system -` diff --git a/go.mod b/go.mod index 4c600f8..cdd7200 100644 --- a/go.mod +++ b/go.mod @@ -18,12 +18,15 @@ require ( go.uber.org/atomic v1.4.0 // indirect go.uber.org/multierr v1.1.0 // indirect go.uber.org/zap v1.10.0 // indirect + golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 // indirect + golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 // indirect - golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb // indirect + golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542 // indirect + golang.org/x/text v0.3.2 // indirect golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect - k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b + k8s.io/api v0.0.0-20190703205437-39734b2a72fe k8s.io/apiextensions-apiserver v0.0.0-20181213153335-0fe22c71c476 // indirect - k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d + k8s.io/apimachinery v0.0.0-20190703205208-4cfb76a8bf76 k8s.io/client-go v11.0.0+incompatible k8s.io/cluster-bootstrap v0.0.0-20181213155137-5f9271efc2e7 // indirect k8s.io/klog v0.3.0 @@ -37,6 +40,7 @@ require ( replace ( k8s.io/api => k8s.io/api v0.0.0-20181213150558-05914d821849 k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20181127025237-2b1284ed4c93 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.0.0-20190704050804-bd8686edbd81 k8s.io/client-go => k8s.io/client-go v10.0.0+incompatible k8s.io/kubernetes => k8s.io/kubernetes v1.13.1 ) diff --git a/go.sum b/go.sum index c0030f2..cd08ffe 100644 --- a/go.sum +++ b/go.sum @@ -139,13 +139,18 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190514140710-3ec191127204 h1:4yG6GqBtw9C+UrLp6s2wtSniayy/Vd/3F7ffLE427XI= golang.org/x/net v0.0.0-20190514140710-3ec191127204/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -155,14 +160,18 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542 h1:6ZQFf1D2YYDDI7eSwW8adlkkavTB9sw5I24FVtEvNUQ= +golang.org/x/sys v0.0.0-20190710143415-6ec70d6a5542/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/kind/controlplane/create.go b/kind/controlplane/create.go new file mode 100644 index 0000000..1f07390 --- /dev/null +++ b/kind/controlplane/create.go @@ -0,0 +1,63 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "fmt" + "time" + + "github.com/pkg/errors" + "sigs.k8s.io/cluster-api-provider-docker/kind/actions" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/container/cri" +) + +// CreateKindCluster sets up a KIND cluster and turns it into a CAPD control plane +func CreateKindCluster(image, clusterName string) error { + lb, err := actions.SetUpLoadBalancer(clusterName) + if err != nil { + return errors.Wrap(err, "failed to create load balancer") + } + + lbipv4, _, err := lb.IP() + if err != nil { + return errors.Wrap(err, "failed to get ELB IP") + } + + cpMounts := []cri.Mount{ + { + ContainerPath: "/var/run/docker.sock", + HostPath: "/var/run/docker.sock", + }, + { + + ContainerPath: "/var/lib/docker", + HostPath: "/var/lib/docker", + }, + } + + cp, err := actions.CreateControlPlane(clusterName, fmt.Sprintf("%s-control-plane", clusterName), lbipv4, "v1.14.2", cpMounts) + if err != nil { + return errors.Wrap(err, "couldn't create control plane") + } + + if !nodes.WaitForReady(cp, time.Now().Add(5*time.Minute)) { + return errors.New("control plane was not ready in 5 minutes") + } + + return nil +} diff --git a/kind/controlplane/kubeconfig.go b/kind/controlplane/kubeconfig.go new file mode 100644 index 0000000..a828b71 --- /dev/null +++ b/kind/controlplane/kubeconfig.go @@ -0,0 +1,39 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/kind/pkg/cluster" +) + +// TODO: Add version number? +const userAgent = "capdctl" + +// GetKubeconfig retrieves and parsing the kind cluster config +func GetKubeconfig(clusterName string) (*rest.Config, error) { + ctx := cluster.NewContext(clusterName) + path := ctx.KubeConfigPath() + + cfg, err := clientcmd.BuildConfigFromFlags("", path) + if err != nil { + return nil, err + } + + return rest.AddUserAgent(cfg, userAgent), err +} diff --git a/objects/all.go b/objects/all.go new file mode 100644 index 0000000..e7f328c --- /dev/null +++ b/objects/all.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package objects + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// GetManegementCluster returns all the objects needed to create a working CAPD management cluster +func GetManegementCluster(version, capiImage, capdImage string) ([]runtime.Object, error) { + capiObjects, err := GetCAPI(version, capiImage) + if err != nil { + return []runtime.Object{}, err + } + + namespaceObj := GetNamespace() + statefulSet := GetStatefulSet(capdImage) + clusterRole := GetClusterRole() + clusterRoleBinding := GetClusterRoleBinding() + + return append(capiObjects, + &namespaceObj, + &statefulSet, + &clusterRole, + &clusterRoleBinding, + ), nil +} diff --git a/objects/capi.go b/objects/capi.go new file mode 100644 index 0000000..05169c6 --- /dev/null +++ b/objects/capi.go @@ -0,0 +1,149 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package objects + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/gzip" + "fmt" + "io" + "net/http" + "strings" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/kubectl/scheme" +) + +// getCRDs should actually use kustomize to correctly build the manager yaml. +// HACK: this is a hacked function +func getCAPIYAML(version, capiImage string) (io.Reader, error) { + crds := []string{"crds", "rbac", "manager"} + releaseCode := fmt.Sprintf("https://github.com/kubernetes-sigs/cluster-api/archive/%s.tar.gz", version) + + resp, err := http.Get(releaseCode) + if err != nil { + return nil, errors.WithStack(err) + } + + gz, err := gzip.NewReader(resp.Body) + if err != nil { + return nil, errors.WithStack(err) + } + + tgz := tar.NewReader(gz) + var buf bytes.Buffer + + for { + header, err := tgz.Next() + + if err == io.EOF { + break + } + + if err != nil { + return nil, errors.WithStack(err) + } + + switch header.Typeflag { + case tar.TypeDir: + continue + case tar.TypeReg: + for _, crd := range crds { + // Skip the kustomization files for now. Would like to use kustomize in future + if strings.HasSuffix(header.Name, "kustomization.yaml") { + continue + } + + // This is a poor person's kustomize + if strings.HasSuffix(header.Name, "manager.yaml") { + var managerBuf bytes.Buffer + io.Copy(&managerBuf, tgz) + lines := strings.Split(managerBuf.String(), "\n") + for _, line := range lines { + if strings.Contains(line, "image:") { + buf.WriteString(strings.Replace(line, "image: controller:latest", fmt.Sprintf("image: %s", capiImage), 1)) + buf.WriteString("\n") + continue + } + buf.WriteString(line) + buf.WriteString("\n") + } + } + + // These files don't need kustomize at all. + if strings.Contains(header.Name, fmt.Sprintf("config/%s/", crd)) { + io.Copy(&buf, tgz) + fmt.Fprintln(&buf, "---") + } + } + } + } + return &buf, nil +} + +func decodeCAPIObjects(yaml io.Reader) ([]runtime.Object, error) { + decoder := scheme.Codecs.UniversalDeserializer() + objects := []runtime.Object{} + readbuf := bufio.NewReader(yaml) + writebuf := &bytes.Buffer{} + + for { + line, err := readbuf.ReadBytes('\n') + // End of an object, parse it + if err == io.EOF || bytes.Equal(line, []byte("---\n")) { + + // Use unstructured because scheme may not know about CRDs + if writebuf.Len() > 1 { + obj, _, err := decoder.Decode(writebuf.Bytes(), nil, &unstructured.Unstructured{}) + if err == nil { + objects = append(objects, obj) + } else { + return []runtime.Object{}, errors.Wrap(err, "couldn't decode CAPI object") + } + } + + // previously we didn't care if this was EOF or ---, but now we need to break the loop + if err == io.EOF { + break + } + + // No matter what happened, start over + writebuf.Reset() + } else if err != nil { + return []runtime.Object{}, errors.Wrap(err, "couldn't read YAML") + } else { + // Just an ordinary line + writebuf.Write(line) + } + } + + return objects, nil +} + +// GetCAPI retrieves the objects needed to create a CAPI control plane from Github and parses them into runtime.Objects +func GetCAPI(version, capiImage string) ([]runtime.Object, error) { + reader, err := getCAPIYAML(version, capiImage) + if err != nil { + return []runtime.Object{}, err + } + + return decodeCAPIObjects(reader) +} diff --git a/objects/cluster.go b/objects/cluster.go new file mode 100644 index 0000000..61fc7bb --- /dev/null +++ b/objects/cluster.go @@ -0,0 +1,102 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package objects + +import ( + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + capi "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" +) + +const controlPlaneSet = "controlplane" + +// GetMachineDeployment returns a worker node machine deployment object +func GetMachineDeployment(name, namespace, clusterName, kubeletVersion string, replicas int32) capi.MachineDeployment { + labels := map[string]string{ + "cluster.k8s.io/cluster-name": clusterName, + "set": "node", + } + return capi.MachineDeployment{ + ObjectMeta: meta.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + }, + Spec: capi.MachineDeploymentSpec{ + Replicas: &replicas, + Selector: meta.LabelSelector{ + MatchLabels: labels, + }, + Template: capi.MachineTemplateSpec{ + ObjectMeta: meta.ObjectMeta{ + Labels: labels, + }, + Spec: capi.MachineSpec{ + ProviderSpec: capi.ProviderSpec{}, + Versions: capi.MachineVersionInfo{ + Kubelet: kubeletVersion, + }, + }, + }, + }, + } +} + +// GetCluster returns a cluster object with the given name and namespace +func GetCluster(clusterName, namespace string) capi.Cluster { + return capi.Cluster{ + ObjectMeta: meta.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: capi.ClusterSpec{ + ClusterNetwork: capi.ClusterNetworkingConfig{ + Services: capi.NetworkRanges{ + CIDRBlocks: []string{"10.96.0.0/12"}, + }, + Pods: capi.NetworkRanges{ + CIDRBlocks: []string{"192.168.0.0/16"}, + }, + ServiceDomain: "cluster.local", + }, + }, + } +} + +// GetMachine returns a machine with the given parameters +func GetMachine(name, namespace, clusterName, set, version string) capi.Machine { + machine := capi.Machine{ + ObjectMeta: meta.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + "cluster.k8s.io/cluster-name": clusterName, + "set": set, + }, + }, + Spec: capi.MachineSpec{ + ProviderSpec: capi.ProviderSpec{}, + }, + } + if set == controlPlaneSet { + machine.Spec.Versions.ControlPlane = version + } + if set == "worker" { + machine.Spec.Versions.Kubelet = version + } + + return machine +} diff --git a/objects/control_plane.go b/objects/control_plane.go new file mode 100644 index 0000000..12cd532 --- /dev/null +++ b/objects/control_plane.go @@ -0,0 +1,135 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package objects + +import ( + apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" +) + +const namespace = "docker-provider-system" + +// GetNamespace returns a "docker-provider-system" namespace object +func GetNamespace() core.Namespace { + return core.Namespace{ + ObjectMeta: meta.ObjectMeta{ + Labels: map[string]string{"controller-tools.k8s.io": "1.0"}, + Name: namespace, + }, + } +} + +var ( + controlPlaneLabel = map[string]string{"control-plane": "controller-manager"} + hostPathSocket = core.HostPathSocket + hostPathDirectory = core.HostPathDirectory +) + +const ( + dockerSockVolumeName = "dockersock" + dockerSockPath = "/var/run/docker.sock" + dockerLibVolumeName = "dockerlib" + dockerLibPath = "/var/lib/docker" +) + +// GetStatefulSet returns a statefulset for running CAPD with the given image name +func GetStatefulSet(image string) apps.StatefulSet { + return apps.StatefulSet{ + ObjectMeta: meta.ObjectMeta{ + Labels: controlPlaneLabel, + Name: "docker-provider-controller-manager", + Namespace: namespace, + }, + Spec: apps.StatefulSetSpec{ + Selector: &v1.LabelSelector{ + MatchLabels: controlPlaneLabel, + }, + ServiceName: "docker-provider-controller-manager-service", + Template: core.PodTemplateSpec{ + ObjectMeta: meta.ObjectMeta{ + Labels: controlPlaneLabel, + }, + Spec: core.PodSpec{ + Containers: []core.Container{ + { + Name: "capd-manager", + Image: image, + Command: []string{ + "capd-manager", + "-v=3", + "-logtostderr=true", + }, + VolumeMounts: []core.VolumeMount{ + { + MountPath: dockerSockPath, + Name: dockerSockVolumeName, + }, + { + MountPath: dockerLibPath, + Name: dockerLibVolumeName, + }, + }, + }, + }, + Volumes: []core.Volume{ + { + Name: dockerSockVolumeName, + VolumeSource: core.VolumeSource{ + HostPath: &core.HostPathVolumeSource{ + Path: dockerSockPath, + Type: &hostPathSocket, + }, + }, + }, + { + Name: dockerLibVolumeName, + VolumeSource: core.VolumeSource{ + HostPath: &core.HostPathVolumeSource{ + Path: dockerLibPath, + Type: &hostPathDirectory, + }, + }, + }, + }, + Tolerations: []core.Toleration{ + { + Key: constants.LabelNodeRoleMaster, + Effect: core.TaintEffectNoSchedule, + }, + { + Key: "CriticalAddonsOnly", + Operator: core.TolerationOpExists, + }, + { + Key: "node.alpha.kubernetes.io/notReady", + Operator: core.TolerationOpExists, + Effect: core.TaintEffectNoExecute, + }, + { + Key: "node.alpha.kubernetes.io/unreachable", + Operator: core.TolerationOpExists, + Effect: core.TaintEffectNoExecute, + }, + }, + }, + }, + }, + } +} diff --git a/objects/rbac.go b/objects/rbac.go new file mode 100644 index 0000000..a2946b2 --- /dev/null +++ b/objects/rbac.go @@ -0,0 +1,116 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package objects + +import ( + core "k8s.io/api/core/v1" + rbac "k8s.io/api/rbac/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + capi "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" +) + +const clusterRoleName = "docker-provider-manager-role" + +// GetClusterRole returns the cluster role capi needs to function properly +func GetClusterRole() rbac.ClusterRole { + return rbac.ClusterRole{ + ObjectMeta: meta.ObjectMeta{ + Name: clusterRoleName, + }, + Rules: []rbac.PolicyRule{ + { + APIGroups: []string{ + capi.SchemeGroupVersion.Group, + }, + Resources: []string{ + "clusters", + "clusters/status", + }, + Verbs: []string{ + "get", + "list", + "watch", + "create", + "update", + "patch", + "delete", + }, + }, + { + APIGroups: []string{ + capi.SchemeGroupVersion.Group, + }, + Resources: []string{ + "machines", + "machines/status", + "machinedeployments", + "machinedeployments/status", + "machinesets", + "machinesets/status", + "machineclasses", + }, + Verbs: []string{ + "get", + "list", + "watch", + "create", + "update", + "patch", + "delete", + }, + }, + { + APIGroups: []string{ + core.GroupName, + }, + Resources: []string{ + "nodes", + "events", + "secrets", + }, + Verbs: []string{ + "get", + "list", + "watch", + "create", + "update", + "patch", + "delete", + }, + }, + }, + } +} + +// GetClusterRoleBinding returns the binding for the role created by GetClusterRole +func GetClusterRoleBinding() rbac.ClusterRoleBinding { + return rbac.ClusterRoleBinding{ + ObjectMeta: meta.ObjectMeta{ + Name: "docker-provider-manager-rolebinding", + }, + RoleRef: rbac.RoleRef{ + Kind: "ClusterRole", + Name: clusterRoleName, + APIGroup: rbac.GroupName, + }, + Subjects: []rbac.Subject{{ + Kind: rbac.ServiceAccountKind, + Name: "default", + Namespace: namespace, + }}, + } +}