From 3cb2772ad9b175c8e3850aa6711368053b742400 Mon Sep 17 00:00:00 2001 From: jessicaochen Date: Sun, 27 May 2018 17:14:34 -0700 Subject: [PATCH] Initial version of clusterctl create --- clusterctl/README.md | 7 +- .../clusterdeployer/clusterapiserver.go | 334 ++++++++++++++++++ clusterctl/clusterdeployer/clusterclient.go | 211 +++++++++++ .../clusterdeployer/clusterclient_test.go | 4 + .../clusterdeployer/clusterclientfactory.go | 12 + clusterctl/clusterdeployer/clusterdeployer.go | 285 ++++++++++++++- .../clusterdeployer/clusterdeployer_test.go | 304 ++++++++++++++-- .../clusterdeployer/minikube/minikube.go | 4 +- clusterctl/cmd/create_cluster.go | 44 ++- clusterctl/cmd/create_cluster_test.go | 55 ++- 10 files changed, 1211 insertions(+), 49 deletions(-) create mode 100644 clusterctl/clusterdeployer/clusterapiserver.go create mode 100644 clusterctl/clusterdeployer/clusterclient.go create mode 100644 clusterctl/clusterdeployer/clusterclient_test.go create mode 100644 clusterctl/clusterdeployer/clusterclientfactory.go diff --git a/clusterctl/README.md b/clusterctl/README.md index f65fe5b8e96e..7546ecbabb91 100644 --- a/clusterctl/README.md +++ b/clusterctl/README.md @@ -22,13 +22,10 @@ $ go build ### Creating a cluster - -**NOT YET SUPPORTED!** - Use [provider-specific deployer](../README.md) to create clusters till cluster creation is supported. - -1. Create a `cluster.yaml` and `machines.yaml` files configured for your cluster. See the provider specific templates and generation tools at `$GOPATH/src/sigs.k8s.io/cluster-api/clusterctl/examples/`. +1. Create a `cluster.yaml`, `machines.yaml` and `provider-components.yaml` files configured for your cluster. See the provider specific templates and generation tools at `$GOPATH/src/sigs.k8s.io/cluster-api/clusterctl/examples/`. 2. Create a cluster ``` -clusterctl create cluster -c cluster.yaml -m machines.yaml +clusterctl create cluster -provider [google/terrraform] -c cluster.yaml -m machines.yaml -p provider-components.yaml ``` Additional advanced flags can be found via help ``` diff --git a/clusterctl/clusterdeployer/clusterapiserver.go b/clusterctl/clusterdeployer/clusterapiserver.go new file mode 100644 index 000000000000..430187ced963 --- /dev/null +++ b/clusterctl/clusterdeployer/clusterapiserver.go @@ -0,0 +1,334 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterdeployer + +import ( + "bytes" + "encoding/base64" + "fmt" + "os" + "text/template" + + "github.com/golang/glog" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/cert/triple" +) + +var apiServerImage = "gcr.io/k8s-cluster-api/cluster-apiserver:0.0.3" + +func init() { + if img, ok := os.LookupEnv("CLUSTER_API_SERVER_IMAGE"); ok { + apiServerImage = img + } +} + +type caCertParams struct { + caBundle string + tlsCrt string + tlsKey string +} + +func getApiServerCerts() (*caCertParams, error) { + const name = "clusterapi" + const namespace = corev1.NamespaceDefault + + caKeyPair, err := triple.NewCA(fmt.Sprintf("%s-certificate-authority", name)) + if err != nil { + return nil, fmt.Errorf("failed to create root-ca: %v", err) + } + + apiServerKeyPair, err := triple.NewServerKeyPair( + caKeyPair, + fmt.Sprintf("%s.%s.svc", name, namespace), + name, + namespace, + "cluster.local", + []string{}, + []string{}) + if err != nil { + return nil, fmt.Errorf("failed to create apiserver key pair: %v", err) + } + + certParams := &caCertParams{ + caBundle: base64.StdEncoding.EncodeToString(cert.EncodeCertPEM(caKeyPair.Cert)), + tlsKey: base64.StdEncoding.EncodeToString(cert.EncodePrivateKeyPEM(apiServerKeyPair.Key)), + tlsCrt: base64.StdEncoding.EncodeToString(cert.EncodeCertPEM(apiServerKeyPair.Cert)), + } + + return certParams, nil +} + +func GetApiServerYaml() (string, error) { + tmpl, err := template.New("config").Parse(ClusterAPIAPIServerConfigTemplate) + if err != nil { + return "", err + } + + certParms, err := getApiServerCerts() + if err != nil { + glog.Errorf("Error: %v", err) + return "", err + } + + type params struct { + Token string + APIServerImage string + ControllerManagerImage string + MachineControllerImage string + CABundle string + TLSCrt string + TLSKey string + } + + var tmplBuf bytes.Buffer + err = tmpl.Execute(&tmplBuf, params{ + APIServerImage: apiServerImage, + CABundle: certParms.caBundle, + TLSCrt: certParms.tlsCrt, + TLSKey: certParms.tlsKey, + }) + if err != nil { + return "", err + } + + return string(tmplBuf.Bytes()), nil +} + +const ClusterAPIAPIServerConfigTemplate = ` +apiVersion: apiregistration.k8s.io/v1beta1 +kind: APIService +metadata: + name: v1alpha1.cluster.k8s.io + labels: + api: clusterapi + apiserver: "true" +spec: + version: v1alpha1 + group: cluster.k8s.io + groupPriorityMinimum: 2000 + priority: 200 + service: + name: clusterapi + namespace: default + versionPriority: 10 + caBundle: {{ .CABundle }} +--- +apiVersion: v1 +kind: Service +metadata: + name: clusterapi + namespace: default + labels: + api: clusterapi + apiserver: "true" +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 443 + selector: + api: clusterapi + apiserver: "true" +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: clusterapi-apiserver + namespace: default + labels: + api: clusterapi + apiserver: "true" +spec: + replicas: 1 + template: + metadata: + labels: + api: clusterapi + apiserver: "true" + spec: + nodeSelector: + node-role.kubernetes.io/master: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + key: node.alpha.kubernetes.io/notReady + operator: Exists + - effect: NoExecute + key: node.alpha.kubernetes.io/unreachable + operator: Exists + containers: + - name: apiserver + image: {{ .APIServerImage }} + volumeMounts: + - name: cluster-apiserver-certs + mountPath: /apiserver.local.config/certificates + readOnly: true + - name: config + mountPath: /etc/kubernetes + - name: certs + mountPath: /etc/ssl/certs + command: + - "./apiserver" + args: + - "--etcd-servers=http://etcd-clusterapi-svc:2379" + - "--tls-cert-file=/apiserver.local.config/certificates/tls.crt" + - "--tls-private-key-file=/apiserver.local.config/certificates/tls.key" + - "--audit-log-path=-" + - "--audit-log-maxage=0" + - "--audit-log-maxbackup=0" + - "--authorization-kubeconfig=/etc/kubernetes/admin.conf" + - "--kubeconfig=/etc/kubernetes/admin.conf" + resources: + requests: + cpu: 100m + memory: 20Mi + limits: + cpu: 100m + memory: 30Mi + volumes: + - name: cluster-apiserver-certs + secret: + secretName: cluster-apiserver-certs + - name: config + hostPath: + path: /etc/kubernetes + - name: certs + hostPath: + path: /etc/ssl/certs +--- +apiVersion: rbac.authorization.k8s.io/ +kind: RoleBinding +metadata: + name: clusterapi + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: default + namespace: default +--- +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + name: etcd-clusterapi + namespace: default +spec: + serviceName: "etcd" + replicas: 1 + template: + metadata: + labels: + app: etcd + spec: + nodeSelector: + node-role.kubernetes.io/master: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + key: node.alpha.kubernetes.io/notReady + operator: Exists + - effect: NoExecute + key: node.alpha.kubernetes.io/unreachable + operator: Exists + volumes: + - hostPath: + path: /var/lib/etcd2 + type: DirectoryOrCreate + name: etcd-data-dir + terminationGracePeriodSeconds: 10 + containers: + - name: etcd + image: quay.io/coreos/etcd:latest + imagePullPolicy: Always + resources: + requests: + cpu: 100m + memory: 20Mi + limits: + cpu: 100m + memory: 30Mi + env: + - name: ETCD_DATA_DIR + value: /etcd-data-dir + command: + - /usr/local/bin/etcd + - --listen-client-urls + - http://0.0.0.0:2379 + - --advertise-client-urls + - http://localhost:2379 + ports: + - containerPort: 2379 + volumeMounts: + - name: etcd-data-dir + mountPath: /etcd-data-dir + readinessProbe: + httpGet: + port: 2379 + path: /health + failureThreshold: 1 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + httpGet: + port: 2379 + path: /health + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 +--- +apiVersion: v1 +kind: Service +metadata: + name: etcd-clusterapi-svc + namespace: default + labels: + app: etcd +spec: + ports: + - port: 2379 + name: etcd + targetPort: 2379 + selector: + app: etcd +--- +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: cluster-apiserver-certs + namespace: default + labels: + api: clusterapi + apiserver: "true" +data: + tls.crt: {{ .TLSCrt }} + tls.key: {{ .TLSKey }} +` diff --git a/clusterctl/clusterdeployer/clusterclient.go b/clusterctl/clusterdeployer/clusterclient.go new file mode 100644 index 000000000000..56a48b7fca53 --- /dev/null +++ b/clusterctl/clusterdeployer/clusterclient.go @@ -0,0 +1,211 @@ +package clusterdeployer + +import ( + "io/ioutil" + "os" + "os/exec" + + "fmt" + "github.com/golang/glog" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" + "sigs.k8s.io/cluster-api/util" + "strings" + "time" +) + +type clusterClient struct { + clientSet clientset.Interface + kubeconfigFile string + closeFn func() error +} + +func NewClusterClient(kubeconfig string) (*clusterClient, error) { + f, err := ioutil.TempFile("", "") + if err != nil { + return nil, err + } + defer f.Close() + f.WriteString(kubeconfig) + c, err := NewClusterClientFromFile(f.Name()) + if err != nil { + return nil, err + } + c.closeFn = c.removeKubeconfigFile + return c, nil +} + +func (c *clusterClient) removeKubeconfigFile() error { + return os.Remove(c.kubeconfigFile) +} + +func NewClusterClientFromFile(kubeconfigFile string) (*clusterClient, error) { + c, err := util.NewClientSet(kubeconfigFile) + if err != nil { + return nil, err + } + + return &clusterClient{ + kubeconfigFile: kubeconfigFile, + clientSet: c, + }, nil +} + +// Frees resources associated with the cluster client +func (c *clusterClient) Close() error { + if c.closeFn != nil { + return c.closeFn() + } + return nil +} + +func (c *clusterClient) Apply(manifest string) error { + return c.waitForKubectlApply(manifest) +} + +func (c *clusterClient) GetClusterObjects() ([]*clusterv1.Cluster, error) { + clusters := []*clusterv1.Cluster{} + // TODO: Iterate over all namespaces where we could have Cluster API Objects + clusterlist, err := c.clientSet.ClusterV1alpha1().Clusters(apiv1.NamespaceDefault).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + + for _, cluster := range clusterlist.Items { + cluster, err := c.clientSet.ClusterV1alpha1().Clusters(apiv1.NamespaceDefault).Get(cluster.Name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + clusters = append(clusters, cluster) + } + return clusters, nil +} + +func (c *clusterClient) GetMachineObjects() ([]*clusterv1.Machine, error) { + // TODO: Iterate over all namespaces where we could have Cluster API Objects + machines := []*clusterv1.Machine{} + machineslist, err := c.clientSet.ClusterV1alpha1().Machines(apiv1.NamespaceDefault).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + + for _, machine := range machineslist.Items { + if err != nil { + return nil, err + } + machine, err := c.clientSet.ClusterV1alpha1().Machines(apiv1.NamespaceDefault).Get(machine.Name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + machines = append(machines, machine) + } + return machines, nil +} + +func (c *clusterClient) CreateClusterObject(cluster *clusterv1.Cluster) error { + // TODO: Support specific namespaces + _, err := c.clientSet.ClusterV1alpha1().Clusters(apiv1.NamespaceDefault).Create(cluster) + return err +} + +func (c *clusterClient) CreateMachineObjects(machines []*clusterv1.Machine) error { + // TODO: Support specific namespaces + for _, machine := range machines { + createdMachine, err := c.clientSet.ClusterV1alpha1().Machines(apiv1.NamespaceDefault).Create(machine) + if err != nil { + return err + } + err = waitForMachineReady(c.clientSet, createdMachine) + if err != nil { + return err + } + } + return nil +} + +func (c *clusterClient) UpdateClusterObjectEndpoint(masterIP string) error { + clusters, err := c.GetClusterObjects() + if err != nil { + return err + } + if len(clusters) != 1 { + // TODO: Do not assume default namespace nor single cluster + return fmt.Errorf("More than the one expected cluster found %v", clusters) + } + cluster := clusters[0] + cluster.Status.APIEndpoints = append(cluster.Status.APIEndpoints, + clusterv1.APIEndpoint{ + Host: masterIP, + Port: 443, + }) + _, err = c.clientSet.ClusterV1alpha1().Clusters(apiv1.NamespaceDefault).UpdateStatus(cluster) + return err +} + +func (c *clusterClient) WaitForClusterV1alpha1Ready() error { + return waitForClusterResourceReady(c.clientSet) +} + +func (c *clusterClient) kubectlApply(manifest string) error { + r := strings.NewReader(manifest) + cmd := exec.Command("kubectl", "apply", "--kubeconfig", c.kubeconfigFile, "-f", "-") + cmd.Stdin = r + + out, err := cmd.CombinedOutput() + if err == nil { + return nil + } else { + return fmt.Errorf("couldn't kubectl apply: %v, output: %s", err, string(out)) + } +} + +func (c *clusterClient) waitForKubectlApply(manifest string) error { + err := util.Poll(500*time.Millisecond, 120*time.Second, func() (bool, error) { + glog.V(2).Infof("Waiting for kubectl apply...") + err := c.kubectlApply(manifest) + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + // TODO: do a cleaner detection of when server not yet available. + return false, nil + } + return false, err + } + + return true, nil + }) + + return err +} + +func waitForClusterResourceReady(cs clientset.Interface) error { + err := util.Poll(500*time.Millisecond, 120*time.Second, func() (bool, error) { + glog.V(2).Info("Waiting for Cluster v1alpha resources to become available...") + _, err := cs.Discovery().ServerResourcesForGroupVersion("cluster.k8s.io/v1alpha1") + if err == nil { + return true, nil + } + return false, nil + }) + + return err +} + +func waitForMachineReady(cs clientset.Interface, machine *clusterv1.Machine) error { + err := util.Poll(500*time.Millisecond, 120*time.Second, func() (bool, error) { + glog.V(2).Infof("Waiting for Machine %v to become ready...", machine.Name) + m, err := cs.ClusterV1alpha1().Machines(apiv1.NamespaceDefault).Get(machine.Name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + // TODO: update once machine controllers have a way to indicate a machine has been provisoned. + // Seeing a node cannot be purely relied upon because the provisioned master will not be registering with + // the stack that provisions it. + ready := m.Status.NodeRef != nil || len(m.Annotations) > 0 + return ready, nil + }) + + return err +} diff --git a/clusterctl/clusterdeployer/clusterclient_test.go b/clusterctl/clusterdeployer/clusterclient_test.go new file mode 100644 index 000000000000..98d8719198fa --- /dev/null +++ b/clusterctl/clusterdeployer/clusterclient_test.go @@ -0,0 +1,4 @@ +package clusterdeployer + +// TODO: Test clusterclient. To do this properly, etcd and kubectl need to be on the box running the test. +// Placeholder till the presubmit images have the needed binaries. diff --git a/clusterctl/clusterdeployer/clusterclientfactory.go b/clusterctl/clusterdeployer/clusterclientfactory.go new file mode 100644 index 000000000000..db5ff1775d99 --- /dev/null +++ b/clusterctl/clusterdeployer/clusterclientfactory.go @@ -0,0 +1,12 @@ +package clusterdeployer + +type clusterClientFactory struct { +} + +func NewClusterClientFactory() ClusterClientFactory { + return &clusterClientFactory{} +} + +func (f *clusterClientFactory) ClusterClient(kubeconfig string) (ClusterClient, error) { + return NewClusterClient(kubeconfig) +} diff --git a/clusterctl/clusterdeployer/clusterdeployer.go b/clusterctl/clusterdeployer/clusterdeployer.go index 0574036570e8..945cb8622091 100644 --- a/clusterctl/clusterdeployer/clusterdeployer.go +++ b/clusterctl/clusterdeployer/clusterdeployer.go @@ -3,10 +3,22 @@ package clusterdeployer import ( "fmt" - "sigs.k8s.io/cluster-api/errors" + "github.com/golang/glog" + "os" + clustercommon "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/util" + "time" ) +// Provider specific logic. Logic here should eventually be optional & additive. +type ProviderDeployer interface { + // TODO: This requirement can be removed once after: https://github.com/kubernetes-sigs/cluster-api/issues/158 + GetIP(machine *clusterv1.Machine) (string, error) + // TODO: This requirement can be removed after: https://github.com/kubernetes-sigs/cluster-api/issues/160 + GetKubeConfig(master *clusterv1.Machine) (string, error) +} + // Can provision a kubernetes cluster type ClusterProvisioner interface { Create() error @@ -14,25 +26,290 @@ type ClusterProvisioner interface { GetKubeconfig() (string, error) } +// Provides interaction with a cluster +type ClusterClient interface { + Apply(string) error + WaitForClusterV1alpha1Ready() error + GetClusterObjects() ([]*clusterv1.Cluster, error) + GetMachineObjects() ([]*clusterv1.Machine, error) + CreateClusterObject(*clusterv1.Cluster) error + CreateMachineObjects([]*clusterv1.Machine) error + UpdateClusterObjectEndpoint(string) error + Close() error +} + +// Can create cluster clients +type ClusterClientFactory interface { + ClusterClient(string) (ClusterClient, error) +} + type ClusterDeployer struct { externalProvisioner ClusterProvisioner + clientFactory ClusterClientFactory + provider ProviderDeployer + providerComponents string + kubeconfigOutput string cleanupExternalCluster bool } -func New(externalProvisioner ClusterProvisioner, cleanupExternalCluster bool) *ClusterDeployer { +func New( + externalProvisioner ClusterProvisioner, + clientFactory ClusterClientFactory, + provider ProviderDeployer, + providerComponents string, + kubeconfigOutput string, + cleanupExternalCluster bool) *ClusterDeployer { return &ClusterDeployer{ externalProvisioner: externalProvisioner, + clientFactory: clientFactory, + provider: provider, + providerComponents: providerComponents, + kubeconfigOutput: kubeconfigOutput, cleanupExternalCluster: cleanupExternalCluster, } } // Creates the a cluster from the provided cluster definition and machine list. -func (d *ClusterDeployer) Create(_ *clusterv1.Cluster, _ []*clusterv1.Machine) error { +func (d *ClusterDeployer) Create(cluster *clusterv1.Cluster, machines []*clusterv1.Machine) error { + master, nodes, err := splitMachineRoles(machines) + if err != nil { + return fmt.Errorf("unable to seperate master machines from node machines: %v", err) + } + + glog.V(1).Info("Spinning up external cluster.") if err := d.externalProvisioner.Create(); err != nil { return fmt.Errorf("could not create external control plane: %v", err) } if d.cleanupExternalCluster { defer d.externalProvisioner.Delete() } - return errors.NotImplementedError // Not fully functional yet. + + glog.V(1).Info("Applying Cluster API stack to external cluster.") + externalKubeconfig, err := d.externalProvisioner.GetKubeconfig() + if err != nil { + return fmt.Errorf("unable to get external cluster kubeconfig: %v", err) + } + externalClient, err := d.clientFactory.ClusterClient(externalKubeconfig) + if err != nil { + return fmt.Errorf("unable to create external client:%v", err) + } + defer func() { + err := externalClient.Close() + if err != nil { + glog.Error("Could not close external client") + } + }() + + glog.V(2).Info("Applying Cluster API APIServer to external cluster.") + err = d.ApplyClusterAPIApiserver(externalClient) + if err != nil { + return fmt.Errorf("unable to apply cluster apiserver to external cluster:%v", err) + } + glog.V(2).Info("Applying Cluster API Provider Components to external cluster.") + err = d.ApplyClusterAPIControllers(externalClient) + if err != nil { + return fmt.Errorf("unable to apply cluster api controllers to external cluster:%v", err) + } + + glog.V(1).Info("Provisioning internal cluster and master via external cluster.") + glog.V(2).Infof("Creating cluster %v", cluster.Name) + err = externalClient.CreateClusterObject(cluster) + if err != nil { + return fmt.Errorf("unable to create cluster object: %v", err) + } + + glog.V(2).Infof("Creating master %v", master.Name) + err = externalClient.CreateMachineObjects([]*clusterv1.Machine{master}) + if err != nil { + return fmt.Errorf("unable to create master machine:%v", err) + } + + // Update cluster endpoint. Needed till this logic moves into cluster controller. + // TODO: https://github.com/kubernetes-sigs/cluster-api/issues/158 + // Fetch freshly created master. + updatedMachines, err := externalClient.GetMachineObjects() + if err != nil { + return fmt.Errorf("unable to fetch machines:%v", err) + } + master, _, err = splitMachineRoles(updatedMachines) + if err != nil { + return fmt.Errorf("unable to fetch master machine:%v", err) + } + masterIP, err := d.provider.GetIP(master) + if err != nil { + return fmt.Errorf("unable to get master IP: %v", err) + } + err = externalClient.UpdateClusterObjectEndpoint(masterIP) + if err != nil { + return fmt.Errorf("unable to update cluster endpoint: %v", err) + } + + glog.V(1).Info("Getting internal cluster kubeconfig.") + internalKubeconfig, err := waitForKubeconfigReady(d.provider, master) + if err != nil { + return fmt.Errorf("unable to get internal cluster kbueconfig: %v", err) + } + err = d.writeKubeconfig(internalKubeconfig) + if err != nil { + return err + } + defer glog.Info("You can now access your cluster with kubectl --kubeconfig %v", d.kubeconfigOutput) + internalClient, err := d.clientFactory.ClusterClient(internalKubeconfig) + if err != nil { + return fmt.Errorf("unable to create internal cluster client: %v", err) + } + defer func() { + err := internalClient.Close() + if err != nil { + glog.Error("Could not close external client") + } + }() + + glog.V(1).Info("Applying Cluster API stack to internal cluster.") + glog.V(2).Info("Applying Cluster API API server to internal cluster.") + err = d.ApplyClusterAPIApiserver(internalClient) + if err != nil { + return fmt.Errorf("unable to apply cluster api apiserver to internal cluster: %v", err) + } + + glog.V(2).Info("Pivoting Cluster API objects from eternal to internal cluster.") + err = pivot(externalClient, internalClient) + if err != nil { + return fmt.Errorf("unable to pivot cluster API objects: %v", err) + } + + glog.V(2).Info("Applying Cluster API provider components to internal cluster.") + err = d.ApplyClusterAPIControllers(internalClient) + if err != nil { + return fmt.Errorf("unable to apply cluster api provider components to internal cluster:%v", err) + } + + // Update cluster endpoint as the status does not always move. + // Needed till this logic moves into cluster controller. + // TODO: https://github.com/kubernetes-sigs/cluster-api/issues/158 + err = internalClient.UpdateClusterObjectEndpoint(masterIP) + if err != nil { + return fmt.Errorf("unable to update cluster endpoint: %v", err) + } + + glog.V(2).Info("Creating node machines in internal cluster.") + err = internalClient.CreateMachineObjects(nodes) + if err != nil { + return fmt.Errorf("unable to create node machines: %v", err) + } + + return nil +} + +func (d *ClusterDeployer) ApplyClusterAPIApiserver(client ClusterClient) error { + yaml, err := GetApiServerYaml() + if err != nil { + return fmt.Errorf("unable to generate apiserver yaml:%v", err) + } + + err = client.Apply(yaml) + if err != nil { + return fmt.Errorf("unable to apply apiserver yaml:%v", err) + } + return client.WaitForClusterV1alpha1Ready() +} + +func (d *ClusterDeployer) ApplyClusterAPIControllers(client ClusterClient) error { + return client.Apply(d.providerComponents) +} + +func (d *ClusterDeployer) writeKubeconfig(kubeconfig string) error { + os.Remove(d.kubeconfigOutput) + f, err := os.Create(d.kubeconfigOutput) + if err != nil { + return err + } + defer f.Close() + f.WriteString(kubeconfig) + return nil +} + +func waitForKubeconfigReady(provider ProviderDeployer, machine *clusterv1.Machine) (string, error) { + kubeconfig := "" + err := util.Poll(500*time.Millisecond, 120*time.Second, func() (bool, error) { + glog.V(2).Infof("Waiting for kubeconfig on %v to become ready...", machine.Name) + k, err := provider.GetKubeConfig(machine) + if err != nil { + glog.V(4).Infof("error getting kubeconfig: %v", err) + return false, nil + } + if k == "" { + return false, nil + } + kubeconfig = k + return true, nil + }) + + return kubeconfig, err +} + +func pivot(from, to ClusterClient) error { + if err := from.WaitForClusterV1alpha1Ready(); err != nil { + return fmt.Errorf("Cluster v1aplpha1 resource not ready on source cluster.") + } + + if err := to.WaitForClusterV1alpha1Ready(); err != nil { + return fmt.Errorf("Cluster v1aplpha1 resource not ready on target cluster.") + } + + // TODO: As we move objects, update any references (eg. owner ref) in following object to new UID + clusters, err := from.GetClusterObjects() + if err != nil { + return err + } + + for _, cluster := range clusters { + cluster.SetResourceVersion("") + err = to.CreateClusterObject(cluster) + if err != nil { + return err + } + glog.Infof("Moved Cluster '%s'", cluster.GetName()) + } + + machines, err := from.GetMachineObjects() + if err != nil { + return err + } + + for _, machine := range machines { + machine.SetResourceVersion("") + err = to.CreateMachineObjects([]*clusterv1.Machine{machine}) + if err != nil { + return err + } + glog.Infof("Moved Machine '%s'", machine.GetName()) + } + return nil +} + +// Split the incoming machine set into the master and the non-masters +func splitMachineRoles(machines []*clusterv1.Machine) (*clusterv1.Machine, []*clusterv1.Machine, error) { + nodes := []*clusterv1.Machine{} + masters := []*clusterv1.Machine{} + for _, machine := range machines { + if containsMasterRole(machine.Spec.Roles) { + masters = append(masters, machine) + } else { + nodes = append(nodes, machine) + } + } + if len(masters) != 1 { + return nil, nil, fmt.Errorf("expected one master, got: %v", len(masters)) + } + return masters[0], nodes, nil +} + +func containsMasterRole(roles []clustercommon.MachineRole) bool { + for _, role := range roles { + if role == clustercommon.MasterRole { + return true + } + } + return false } diff --git a/clusterctl/clusterdeployer/clusterdeployer_test.go b/clusterctl/clusterdeployer/clusterdeployer_test.go index f8005d753097..5c094e055eb8 100644 --- a/clusterctl/clusterdeployer/clusterdeployer_test.go +++ b/clusterctl/clusterdeployer/clusterdeployer_test.go @@ -4,12 +4,18 @@ import ( "fmt" "sigs.k8s.io/cluster-api/clusterctl/clusterdeployer" "testing" + + "io/ioutil" + "os" + clustercommon "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" + clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) type testClusterProvisioner struct { err error clusterCreated bool clusterExists bool + kubeconfig string } func (p *testClusterProvisioner) Create() error { @@ -30,43 +36,275 @@ func (p *testClusterProvisioner) Delete() error { } func (p *testClusterProvisioner) GetKubeconfig() (string, error) { - return "", p.err + return p.kubeconfig, p.err +} + +type testClusterClient struct { + ApplyErr error + WaitForClusterV1alpha1ReadyErr error + GetClusterObjectsErr error + GetMachineObjectsErr error + CreateClusterObjectErr error + CreateMachineObjectsErr error + UpdateClusterObjectEndpointErr error + CloseErr error + + clusters []*clusterv1.Cluster + machines []*clusterv1.Machine +} + +func (c *testClusterClient) Apply(string) error { + return c.ApplyErr +} + +func (c *testClusterClient) WaitForClusterV1alpha1Ready() error { + return c.WaitForClusterV1alpha1ReadyErr +} + +func (c *testClusterClient) GetClusterObjects() ([]*clusterv1.Cluster, error) { + return c.clusters, c.GetClusterObjectsErr +} + +func (c *testClusterClient) GetMachineObjects() ([]*clusterv1.Machine, error) { + return c.machines, c.GetMachineObjectsErr +} + +func (c *testClusterClient) CreateClusterObject(cluster *clusterv1.Cluster) error { + if c.CreateClusterObjectErr != nil { + return c.CreateClusterObjectErr + } + c.clusters = append(c.clusters, cluster) + return nil +} +func (c *testClusterClient) CreateMachineObjects(machines []*clusterv1.Machine) error { + if c.CreateMachineObjectsErr != nil { + return c.CreateMachineObjectsErr + } + c.machines = append(c.machines, machines...) + return nil +} +func (c *testClusterClient) UpdateClusterObjectEndpoint(string) error { + return c.UpdateClusterObjectEndpointErr +} +func (c *testClusterClient) Close() error { + return c.CloseErr +} + +type testClusterClientFactory struct { + ClusterClientErr error + clients map[string]*testClusterClient +} + +func newTestClusterClientFactory() *testClusterClientFactory { + return &testClusterClientFactory{ + clients: map[string]*testClusterClient{}, + } +} + +func (f *testClusterClientFactory) ClusterClient(kubeconfig string) (clusterdeployer.ClusterClient, error) { + if f.ClusterClientErr != nil { + return nil, f.ClusterClientErr + } + return f.clients[kubeconfig], nil +} + +type testProviderDeployer struct { + GetIPErr error + GetKubeConfigErr error + ip string + kubeconfig string +} + +func (d *testProviderDeployer) GetIP(machine *clusterv1.Machine) (string, error) { + return d.ip, d.GetIPErr +} +func (d *testProviderDeployer) GetKubeConfig(master *clusterv1.Machine) (string, error) { + return d.kubeconfig, d.GetKubeConfigErr } func TestCreate(t *testing.T) { + const externalKubeconfig = "external" + const internalKubeconfig = "internal" + var testcases = []struct { - name string - provisionExternalErr error - cleanupExternal bool - expectErr bool - expectExternalExists bool - expectExternalCreated bool + name string + provisionExternalErr error + factoryClusterClientErr error + externalClient *testClusterClient + internalClient *testClusterClient + cleanupExternal bool + expectErr bool + expectExternalExists bool + expectExternalCreated bool + expectedInternalClusters int + expectedInternalMachines int }{ { - name: "success", - cleanupExternal: true, - expectExternalExists: false, - expectExternalCreated: true, - expectErr: true, + name: "success", + internalClient: &testClusterClient{}, + externalClient: &testClusterClient{}, + cleanupExternal: true, + expectExternalExists: false, + expectExternalCreated: true, + expectedInternalClusters: 1, + expectedInternalMachines: 2, }, { - name: "success no cleaning external", - cleanupExternal: false, - expectExternalExists: true, - expectExternalCreated: true, - expectErr: true, + name: "success no cleaning external", + internalClient: &testClusterClient{}, + externalClient: &testClusterClient{}, + cleanupExternal: false, + expectExternalExists: true, + expectExternalCreated: true, + expectedInternalClusters: 1, + expectedInternalMachines: 2, }, { name: "fail provision external cluster", + internalClient: &testClusterClient{}, + externalClient: &testClusterClient{}, provisionExternalErr: fmt.Errorf("Test failure"), expectErr: true, }, + { + name: "fail create clients", + internalClient: &testClusterClient{}, + externalClient: &testClusterClient{}, + cleanupExternal: true, + expectExternalCreated: true, + factoryClusterClientErr: fmt.Errorf("Test failure"), + expectErr: true, + }, + { + name: "fail apply yaml to external cluster", + internalClient: &testClusterClient{}, + externalClient: &testClusterClient{ApplyErr: fmt.Errorf("Test failure")}, + cleanupExternal: true, + expectExternalCreated: true, + expectErr: true, + }, + { + name: "fail waiting for api ready on external cluster", + internalClient: &testClusterClient{}, + externalClient: &testClusterClient{WaitForClusterV1alpha1ReadyErr: fmt.Errorf("Test failure")}, + cleanupExternal: true, + expectExternalCreated: true, + expectErr: true, + }, + { + name: "fail getting external cluster objects", + internalClient: &testClusterClient{}, + externalClient: &testClusterClient{GetClusterObjectsErr: fmt.Errorf("Test failure")}, + cleanupExternal: true, + expectExternalCreated: true, + expectErr: true, + }, + { + name: "fail getting external machine objects", + internalClient: &testClusterClient{}, + externalClient: &testClusterClient{GetMachineObjectsErr: fmt.Errorf("Test failure")}, + cleanupExternal: true, + expectExternalCreated: true, + expectErr: true, + }, + { + name: "fail create cluster", + internalClient: &testClusterClient{}, + externalClient: &testClusterClient{CreateClusterObjectErr: fmt.Errorf("Test failure")}, + cleanupExternal: true, + expectExternalCreated: true, + expectErr: true, + }, + { + name: "fail create master", + internalClient: &testClusterClient{}, + externalClient: &testClusterClient{CreateMachineObjectsErr: fmt.Errorf("Test failure")}, + cleanupExternal: true, + expectExternalCreated: true, + expectErr: true, + }, + { + name: "fail update external cluster endpoint", + internalClient: &testClusterClient{}, + externalClient: &testClusterClient{UpdateClusterObjectEndpointErr: fmt.Errorf("Test failure")}, + cleanupExternal: true, + expectExternalCreated: true, + expectErr: true, + }, + { + name: "fail apply yaml to internal cluster", + internalClient: &testClusterClient{ApplyErr: fmt.Errorf("Test failure")}, + externalClient: &testClusterClient{}, + cleanupExternal: true, + expectExternalCreated: true, + expectErr: true, + }, + { + name: "fail wait for api ready on internal cluster", + internalClient: &testClusterClient{WaitForClusterV1alpha1ReadyErr: fmt.Errorf("Test failure")}, + externalClient: &testClusterClient{}, + cleanupExternal: true, + expectExternalCreated: true, + expectErr: true, + }, + { + name: "fail create internal cluster", + internalClient: &testClusterClient{CreateClusterObjectErr: fmt.Errorf("Test failure")}, + externalClient: &testClusterClient{}, + cleanupExternal: true, + expectExternalCreated: true, + expectErr: true, + }, + { + name: "fail create nodes", + internalClient: &testClusterClient{CreateMachineObjectsErr: fmt.Errorf("Test failure")}, + externalClient: &testClusterClient{}, + cleanupExternal: true, + expectExternalCreated: true, + expectedInternalClusters: 1, + expectErr: true, + }, + { + name: "fail update cluster endpoint internal", + internalClient: &testClusterClient{UpdateClusterObjectEndpointErr: fmt.Errorf("Test failure")}, + externalClient: &testClusterClient{}, + cleanupExternal: true, + expectExternalCreated: true, + expectedInternalClusters: 1, + expectedInternalMachines: 1, + expectErr: true, + }, } for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - p := &testClusterProvisioner{err: testcase.provisionExternalErr} - d := clusterdeployer.New(p, testcase.cleanupExternal) - err := d.Create(nil, nil) + kubeconfigOutFile, err := ioutil.TempFile("", "") + if err != nil { + t.Fatalf("could not provision temp file:%v", err) + } + kubeconfigOutFile.Close() + kubeconfigOut := kubeconfigOutFile.Name() + defer os.Remove(kubeconfigOut) + + // Create provisioners & clients and hook them up + p := &testClusterProvisioner{ + err: testcase.provisionExternalErr, + kubeconfig: externalKubeconfig, + } + pd := &testProviderDeployer{} + pd.kubeconfig = internalKubeconfig + f := newTestClusterClientFactory() + f.clients[externalKubeconfig] = testcase.externalClient + f.clients[internalKubeconfig] = testcase.internalClient + f.ClusterClientErr = testcase.factoryClusterClientErr + + // Create + inputCluster := &clusterv1.Cluster{} + inputCluster.Name = "test-cluster" + inputMachines := generateMachines() + d := clusterdeployer.New(p, f, pd, "", kubeconfigOut, testcase.cleanupExternal) + err = d.Create(inputCluster, inputMachines) + + // Validate if (testcase.expectErr && err == nil) || (!testcase.expectErr && err != nil) { t.Fatalf("Unexpected returned error. Got: %v, Want Err: %v", err, testcase.expectErr) } @@ -76,6 +314,32 @@ func TestCreate(t *testing.T) { if testcase.expectExternalCreated != p.clusterCreated { t.Errorf("Unexpected external cluster provisioning. Got: %v, Want: %v", p.clusterCreated, testcase.expectExternalCreated) } + if testcase.expectedInternalClusters != len(testcase.internalClient.clusters) { + t.Fatalf("Unexpected cluster count. Got: %v, Want: %v", len(testcase.internalClient.clusters), testcase.expectedInternalClusters) + } + if testcase.expectedInternalClusters > 1 && inputCluster.Name != testcase.internalClient.clusters[0].Name { + t.Errorf("Provisioned cluster has unexpeted name. Got: %v, Want: %v", testcase.internalClient.clusters[0].Name, inputCluster.Name) + } + + if testcase.expectedInternalMachines != len(testcase.internalClient.machines) { + t.Fatalf("Unexpected machine count. Got: %v, Want: %v", len(testcase.internalClient.machines), testcase.expectedInternalMachines) + } + if testcase.expectedInternalMachines == len(inputMachines) { + for i := range inputMachines { + if inputMachines[i].Name != testcase.internalClient.machines[i].Name { + t.Fatalf("Unexpected machine name at %v. Got: %v, Want: %v", i, inputMachines[i].Name, testcase.internalClient.machines[i].Name) + } + } + } }) } } + +func generateMachines() []*clusterv1.Machine { + master := &clusterv1.Machine{} + master.Name = "test-master" + master.Spec.Roles = []clustercommon.MachineRole{clustercommon.MasterRole} + node := &clusterv1.Machine{} + node.Name = "test.Node" + return []*clusterv1.Machine{master, node} +} diff --git a/clusterctl/clusterdeployer/minikube/minikube.go b/clusterctl/clusterdeployer/minikube/minikube.go index b3bbc5458051..f29897a95095 100644 --- a/clusterctl/clusterdeployer/minikube/minikube.go +++ b/clusterctl/clusterdeployer/minikube/minikube.go @@ -26,11 +26,11 @@ func New(vmDriver string) *Minikube { var minikubeExec = func(env []string, args ...string) (string, error) { const executable = "minikube" - glog.V(5).Infof("Running: %v %v", executable, args) + glog.V(3).Infof("Running: %v %v", executable, args) cmd := exec.Command(executable, args...) cmd.Env = env cmdOut, err := cmd.CombinedOutput() - glog.V(4).Infof("Ran: %v %v Output: %v", executable, args, string(cmdOut)) + glog.V(2).Infof("Ran: %v %v Output: %v", executable, args, string(cmdOut)) return string(cmdOut), err } diff --git a/clusterctl/cmd/create_cluster.go b/clusterctl/cmd/create_cluster.go index 176b297d4d91..d104f39e20ef 100644 --- a/clusterctl/cmd/create_cluster.go +++ b/clusterctl/cmd/create_cluster.go @@ -17,21 +17,27 @@ limitations under the License. package cmd import ( + "fmt" "github.com/ghodss/yaml" "github.com/golang/glog" "github.com/spf13/cobra" "io/ioutil" + "sigs.k8s.io/cluster-api/cloud/google" "sigs.k8s.io/cluster-api/clusterctl/clusterdeployer" "sigs.k8s.io/cluster-api/clusterctl/clusterdeployer/minikube" + "sigs.k8s.io/cluster-api/errors" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" "sigs.k8s.io/cluster-api/util" ) type CreateOptions struct { - Cluster string - Machine string + Cluster string + Machine string + ProviderComponents string CleanupExternalCluster bool - VmDriver string + VmDriver string + Provider string + KubeonfigOutput string } var co = &CreateOptions{} @@ -64,7 +70,21 @@ func RunCreate(co *CreateOptions) error { } mini := minikube.New(co.VmDriver) - d := clusterdeployer.New(mini, co.CleanupExternalCluster) + pd, err := getProvider(co.Provider) + if err != nil { + return err + } + pc, err := ioutil.ReadFile(co.ProviderComponents) + if err != nil { + return err + } + d := clusterdeployer.New( + mini, + clusterdeployer.NewClusterClientFactory(), + pd, + string(pc), + co.KubeonfigOutput, + co.CleanupExternalCluster) err = d.Create(c, m) return err } @@ -73,10 +93,14 @@ func init() { // Required flags createClusterCmd.Flags().StringVarP(&co.Cluster, "cluster", "c", "", "A yaml file containing cluster object definition") createClusterCmd.Flags().StringVarP(&co.Machine, "machines", "m", "", "A yaml file containing machine object definition(s)") + createClusterCmd.Flags().StringVarP(&co.ProviderComponents, "provider-components", "p", "", "A yaml file containing cluster api provider controllers and supporting objects") + // TODO: Remove as soon as code allows https://github.com/kubernetes-sigs/cluster-api/issues/157 + createClusterCmd.Flags().StringVarP(&co.Provider, "provider", "", "", "Which provider deployment logic to use (google/terraform)") // Optional flags createClusterCmd.Flags().BoolVarP(&co.CleanupExternalCluster, "cleanup-external-cluster", "", true, "Whether to cleanup the external cluster after bootstrap") createClusterCmd.Flags().StringVarP(&co.VmDriver, "vm-driver", "", "", "Which vm driver to use for minikube") + createClusterCmd.Flags().StringVarP(&co.KubeonfigOutput, "kubeconfig-out", "", "kubeconfig", "where to output the kubeconfig for the provisioned cluster.") } func parseClusterYaml(file string) (*clusterv1.Cluster, error) { @@ -112,3 +136,15 @@ func parseMachinesYaml(file string) ([]*clusterv1.Machine, error) { return util.MachineP(list.Items), nil } + +func getProvider(provider string) (clusterdeployer.ProviderDeployer, error) { + switch provider { + case "google": + return google.NewMachineActuator(google.MachineActuatorParams{}) + case "terraform": + // TODO: Actually hook up terraform + return nil, errors.NotImplementedError + default: + return nil, fmt.Errorf("Unrecognized provider %v", provider) + } +} diff --git a/clusterctl/cmd/create_cluster_test.go b/clusterctl/cmd/create_cluster_test.go index 967aa415551d..769bc8a0c554 100644 --- a/clusterctl/cmd/create_cluster_test.go +++ b/clusterctl/cmd/create_cluster_test.go @@ -29,19 +29,19 @@ func TestParseClusterYaml(t *testing.T) { } }) var testcases = []struct { - name string - contents string + name string + contents string expectedName string - expectErr bool + expectErr bool }{ { - name: "valid file", - contents: validCluster, + name: "valid file", + contents: validCluster, expectedName: "cluster1", }, { - name: "gibberish in file", - contents: `blah ` + validCluster + ` blah`, + name: "gibberish in file", + contents: `blah ` + validCluster + ` blah`, expectErr: true, }, } @@ -78,19 +78,19 @@ func TestParseMachineYaml(t *testing.T) { } }) var testcases = []struct { - name string - contents string - expectErr bool + name string + contents string + expectErr bool expectedMachineCount int }{ { - name: "valid file", - contents: validMachines, + name: "valid file", + contents: validMachines, expectedMachineCount: 1, }, { - name: "gibberish in file", - contents: `blah ` + validMachines + ` blah`, + name: "gibberish in file", + contents: `blah ` + validMachines + ` blah`, expectErr: true, }, } @@ -119,6 +119,33 @@ func TestParseMachineYaml(t *testing.T) { } } +func TestGetProvider(t *testing.T) { + var testcases = []struct { + provider string + expectErr bool + }{ + { + provider: "google", + }, + { + provider: "terraform", + expectErr: true, + }, + { + provider: "blah blah", + expectErr: true, + }, + } + for _, testcase := range testcases { + t.Run(testcase.provider, func(t *testing.T) { + _, err := getProvider(testcase.provider) + if (testcase.expectErr && err == nil) || (!testcase.expectErr && err != nil) { + t.Fatalf("Unexpected returned error. Got: %v, Want Err: %v", err, testcase.expectErr) + } + }) + } +} + func createTempFile(contents string) (string, error) { f, err := ioutil.TempFile("", "") if err != nil {