diff --git a/cmd/aws-actuator/README.md b/cmd/aws-actuator/README.md index 922bbb2785..23d6da416e 100644 --- a/cmd/aws-actuator/README.md +++ b/cmd/aws-actuator/README.md @@ -109,107 +109,102 @@ $ cat examples/worker-user-data.sh | base64 After some time the kubernetes cluster with the control plane (master node) and the worker node gets provisioned and the worker joins the cluster. -### All in one - -Alternatively, you can run the `aws-actuator bootstrap` that does all the above (up to step 2.): - -```sh -./bin/aws-actuator bootstrap --manifests examples --environment-id UNIQUE_ID -INFO[0000] Reading cluster manifest from examples/cluster.yaml -INFO[0000] Reading master machine manifest from examples/master-machine.yaml -INFO[0000] Reading master user data manifest from examples/master-userdata.yaml -INFO[0000] Reading worker machine manifest from examples/worker-machine.yaml -INFO[0000] Reading worker user data manifest from examples/worker-userdata.yaml -INFO[0000] Creating master machine -DEBU[0000] Describing AMI based on filters bootstrap=create-master-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-master -DEBU[0007] Describing security groups based on filters bootstrap=create-master-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-master -DEBU[0008] Describing subnets based on filters bootstrap=create-master-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-master -WARN[0008] More than one subnet id returned, only first one will be used bootstrap=create-master-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-master -INFO[0009] Master machine created with ipv4: 10.0.102.149, InstanceId: i-0cd65d6ce5640d343 -INFO[0009] Generating worker user data for master listening at 10.0.102.149 -INFO[0009] Creating worker machine -INFO[0009] no stopped instances found for machine UNIQUE_ID-aws-actuator-testing-machine-worker bootstrap=create-worker-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-worker -DEBU[0009] Describing AMI based on filters bootstrap=create-worker-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-worker -DEBU[0014] Describing security groups based on filters bootstrap=create-worker-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-worker -DEBU[0014] Describing subnets based on filters bootstrap=create-worker-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-worker -WARN[0015] More than one subnet id returned, only first one will be used bootstrap=create-worker-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-worker -INFO[0016] Worker machine created with InstanceId: i-0763fb7fafc607ecf -``` - ## Bootstrapping cluster API stack -Running the `aws-actuator bootstrap` with `--cluster-api-stack` will deploy the cluster API stack as well. +The following command will deploy kubernetes cluster with cluster API stack +deployed inside. Worker nodes are deployed with a machineset. It's assumed both `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` are set. +It takes some time before the worker node joins the cluster (~3 minutes). ```sh -$ ./bin/aws-actuator bootstrap --manifests examples --environment-id UNIQUE_ID --cluster-api-stack +$ ./bin/aws-actuator bootstrap --manifests examples --environment-id UNIQUE_ID INFO[0000] Reading cluster manifest from examples/cluster.yaml INFO[0000] Reading master machine manifest from examples/master-machine.yaml INFO[0000] Reading master user data manifest from examples/master-userdata.yaml -INFO[0000] Reading worker machine manifest from examples/worker-machine.yaml -INFO[0000] Reading worker user data manifest from examples/worker-userdata.yaml INFO[0000] Creating master machine -DEBU[0000] Describing AMI based on filters bootstrap=create-master-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-master -DEBU[0007] Describing security groups based on filters bootstrap=create-master-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-master -DEBU[0007] Describing subnets based on filters bootstrap=create-master-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-master -WARN[0007] More than one subnet id returned, only first one will be used bootstrap=create-master-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-master -INFO[0008] Master machine created with ipv4: 10.0.101.159, InstanceId: i-04c41ad24e885a8c6 -INFO[0008] Generating worker user data for master listening at 10.0.101.159 -INFO[0008] Creating worker machine -INFO[0009] no stopped instances found for machine UNIQUE_ID-aws-actuator-testing-machine-worker bootstrap=create-worker-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-worker -DEBU[0009] Describing AMI based on filters bootstrap=create-worker-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-worker -DEBU[0012] Describing security groups based on filters bootstrap=create-worker-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-worker -DEBU[0013] Describing subnets based on filters bootstrap=create-worker-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-worker -WARN[0013] More than one subnet id returned, only first one will be used bootstrap=create-worker-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-worker -INFO[0014] Worker machine created with InstanceId: i-0d548c5592e4e78a7 -INFO[0019] Waiting for master machine PublicDNS -DEBU[0019] checking if machine exists bootstrap=create-worker-machine machine=test/UNIQUE_ID-aws-actuator-testing-machine-master -INFO[0019] PublicDnsName: ec2-34-239-226-191.compute-1.amazonaws.com - -INFO[0024] Pulling kubeconfig from ec2-34-239-226-191.compute-1.amazonaws.com:8443 -INFO[0093] Unable to pull kubeconfig: exit status 1, Warning: Permanently added 'ec2-34-239-226-191.compute-1.amazonaws.com,34.239.226.191' (ECDSA) to the list of known hosts. +DEBU[0000] Describing AMI based on filters bootstrap=create-master-machine machine=test/jchaloup-cama-aws-actuator-testing-machine-master +DEBU[0007] Describing security groups based on filters bootstrap=create-master-machine machine=test/jchaloup-cama-aws-actuator-testing-machine-master +DEBU[0007] Describing subnets based on filters bootstrap=create-master-machine machine=test/jchaloup-cama-aws-actuator-testing-machine-master +WARN[0008] More than one subnet id returned, only first one will be used bootstrap=create-master-machine machine=test/jchaloup-cama-aws-actuator-testing-machine-master +INFO[0009] Master machine created with ipv4: 10.0.102.217, InstanceId: i-0eea29823ae5d50e8 +INFO[0014] Waiting for master machine PublicDNS +DEBU[0014] checking if machine exists bootstrap=create-master-machine machine=test/jchaloup-cama-aws-actuator-testing-machine-master +INFO[0014] PublicDnsName: ec2-34-207-227-3.compute-1.amazonaws.com + +INFO[0019] Pulling kubeconfig from ec2-34-207-227-3.compute-1.amazonaws.com:8443 +INFO[0150] Unable to pull kubeconfig: exit status 255, ssh: connect to host ec2-34-207-227-3.compute-1.amazonaws.com port 22: Connection timed out + +INFO[0154] Pulling kubeconfig from ec2-34-207-227-3.compute-1.amazonaws.com:8443 +INFO[0158] Unable to pull kubeconfig: exit status 1, Warning: Permanently added 'ec2-34-207-227-3.compute-1.amazonaws.com,34.207.227.3' (ECDSA) to the list of known hosts. cat: /etc/kubernetes/admin.conf: No such file or directory -INFO[0094] Pulling kubeconfig from ec2-34-239-226-191.compute-1.amazonaws.com:8443 -INFO[0096] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory - -INFO[0099] Pulling kubeconfig from ec2-34-239-226-191.compute-1.amazonaws.com:8443 -INFO[0101] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory - -INFO[0104] Pulling kubeconfig from ec2-34-239-226-191.compute-1.amazonaws.com:8443 -INFO[0106] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory - -INFO[0109] Pulling kubeconfig from ec2-34-239-226-191.compute-1.amazonaws.com:8443 -INFO[0111] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory - -INFO[0114] Pulling kubeconfig from ec2-34-239-226-191.compute-1.amazonaws.com:8443 -INFO[0116] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory - -INFO[0119] Pulling kubeconfig from ec2-34-239-226-191.compute-1.amazonaws.com:8443 -INFO[0121] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory - -INFO[0124] Pulling kubeconfig from ec2-34-239-226-191.compute-1.amazonaws.com:8443 -INFO[0126] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory - -INFO[0129] Pulling kubeconfig from ec2-34-239-226-191.compute-1.amazonaws.com:8443 -INFO[0131] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory - -INFO[0134] Pulling kubeconfig from ec2-34-239-226-191.compute-1.amazonaws.com:8443 -INFO[0136] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory - -INFO[0139] Pulling kubeconfig from ec2-34-239-226-191.compute-1.amazonaws.com:8443 -INFO[0146] Running kubectl config set-cluster kubernetes --server=https://ec2-34-239-226-191.compute-1.amazonaws.com:8443 -INFO[0151] Waiting for all nodes to come up -INFO[0156] Waiting for all nodes to come up -INFO[0161] Waiting for all nodes to come up -INFO[0166] Waiting for all nodes to come up -INFO[0171] Waiting for all nodes to come up -INFO[0179] Is node "ip-10-0-101-159.ec2.internal" ready?: true - -INFO[0179] Deploying cluster-api stack -INFO[0179] Deploying aws credentials -INFO[0179] Creating "test" namespace... -INFO[0179] Creating "test/aws-credentials-secret" secret... -INFO[0185] Deploying cluster-api server -INFO[0197] Deploying cluster-api controllers +INFO[0159] Pulling kubeconfig from ec2-34-207-227-3.compute-1.amazonaws.com:8443 +INFO[0162] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory + +INFO[0164] Pulling kubeconfig from ec2-34-207-227-3.compute-1.amazonaws.com:8443 +INFO[0167] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory + +INFO[0169] Pulling kubeconfig from ec2-34-207-227-3.compute-1.amazonaws.com:8443 +INFO[0172] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory + +INFO[0174] Pulling kubeconfig from ec2-34-207-227-3.compute-1.amazonaws.com:8443 +INFO[0177] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory + +INFO[0179] Pulling kubeconfig from ec2-34-207-227-3.compute-1.amazonaws.com:8443 +INFO[0182] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory + +INFO[0184] Pulling kubeconfig from ec2-34-207-227-3.compute-1.amazonaws.com:8443 +INFO[0187] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory + +INFO[0189] Pulling kubeconfig from ec2-34-207-227-3.compute-1.amazonaws.com:8443 +INFO[0191] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory + +INFO[0194] Pulling kubeconfig from ec2-34-207-227-3.compute-1.amazonaws.com:8443 +INFO[0197] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory + +INFO[0199] Pulling kubeconfig from ec2-34-207-227-3.compute-1.amazonaws.com:8443 +INFO[0202] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory + +INFO[0204] Pulling kubeconfig from ec2-34-207-227-3.compute-1.amazonaws.com:8443 +INFO[0207] Unable to pull kubeconfig: exit status 1, cat: /etc/kubernetes/admin.conf: No such file or directory + +INFO[0209] Pulling kubeconfig from ec2-34-207-227-3.compute-1.amazonaws.com:8443 +INFO[0212] Running kubectl --kubeconfig=kubeconfig config set-cluster kubernetes --server=https://ec2-34-207-227-3.compute-1.amazonaws.com:8443 +INFO[0217] Waiting for all nodes to come up +INFO[0222] Waiting for all nodes to come up +INFO[0227] Waiting for all nodes to come up +INFO[0232] Waiting for all nodes to come up +INFO[0237] Waiting for all nodes to come up +INFO[0242] Waiting for all nodes to come up +INFO[0248] Is node "ip-10-0-102-217.ec2.internal" ready?: true + +INFO[0248] Deploying cluster-api stack +INFO[0248] Deploying aws credentials +INFO[0248] Creating "test" namespace... +INFO[0248] Creating "test/aws-credentials-secret" secret... +INFO[0254] Deploying cluster-api server +INFO[0271] Deploying cluster-api controllers +INFO[0277] Deploying cluster resource +INFO[0277] Creating "test/tb-asg-35" cluster... +INFO[0277] Unable to deploy cluster manifest: unable to create cluster: an error on the server ("service unavailable") has prevented the request from succeeding (post clusters.cluster.k8s.io) +INFO[0282] Deploying cluster resource +INFO[0282] Creating "test/tb-asg-35" cluster... +INFO[0282] Unable to deploy cluster manifest: unable to create cluster: an error on the server ("service unavailable") has prevented the request from succeeding (post clusters.cluster.k8s.io) +INFO[0287] Deploying cluster resource +INFO[0287] Creating "test/tb-asg-35" cluster... +INFO[0287] Unable to deploy cluster manifest: unable to create cluster: an error on the server ("service unavailable") has prevented the request from succeeding (post clusters.cluster.k8s.io) +INFO[0292] Deploying cluster resource +INFO[0292] Creating "test/tb-asg-35" cluster... +INFO[0292] Unable to deploy cluster manifest: unable to create cluster: an error on the server ("service unavailable") has prevented the request from succeeding (post clusters.cluster.k8s.io) +INFO[0297] Deploying cluster resource +INFO[0297] Creating "test/tb-asg-35" cluster... +INFO[0297] Unable to deploy cluster manifest: unable to create cluster: an error on the server ("service unavailable") has prevented the request from succeeding (post clusters.cluster.k8s.io) +INFO[0302] Deploying cluster resource +INFO[0302] Creating "test/tb-asg-35" cluster... +INFO[0302] Reading worker user data manifest from examples/worker-userdata.yaml +INFO[0302] Generating worker machine set user data for master listening at 10.0.102.217 +INFO[0302] Creating "test/aws-actuator-node-user-data-secret" secret... +INFO[0302] Reading worker machine manifest from examples/worker-machineset.yaml +INFO[0307] Deploying worker machineset +INFO[0307] Creating "test/jchaloup-cama-default-worker-machineset" machineset... ``` diff --git a/cmd/aws-actuator/main.go b/cmd/aws-actuator/main.go index fcce93213a..709f425090 100644 --- a/cmd/aws-actuator/main.go +++ b/cmd/aws-actuator/main.go @@ -46,6 +46,7 @@ import ( "k8s.io/client-go/kubernetes" kubernetesfake "k8s.io/client-go/kubernetes/fake" machineactuator "sigs.k8s.io/cluster-api-provider-aws/cloud/aws/actuators/machine" + "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" "text/template" @@ -172,6 +173,30 @@ func existsCommand() *cobra.Command { } } +func readMachineSetManifest(manifestParams *manifestParams, manifestLoc string) (*clusterv1.MachineSet, error) { + machineset := &clusterv1.MachineSet{} + manifestBytes, err := ioutil.ReadFile(manifestLoc) + if err != nil { + return nil, fmt.Errorf("Unable to read %v: %v", manifestLoc, err) + } + + t, err := template.New("machinesetuserdata").Parse(string(manifestBytes)) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = t.Execute(&buf, *manifestParams) + if err != nil { + return nil, err + } + + if err = yaml.Unmarshal(buf.Bytes(), &machineset); err != nil { + return nil, fmt.Errorf("Unable to unmarshal %v: %v", manifestLoc, err) + } + + return machineset, nil +} + func readMachineManifest(manifestParams *manifestParams, manifestLoc string) (*clusterv1.Machine, error) { machine := &clusterv1.Machine{} manifestBytes, err := ioutil.ReadFile(manifestLoc) @@ -278,6 +303,29 @@ func generateWorkerUserData(masterIP string, workerUserDataSecret *apiv1.Secret) // TestConfig stores clients for managing various resources type TestConfig struct { KubeClient *kubernetes.Clientset + CAPIClient *clientset.Clientset +} + +func createCluster(testConfig *TestConfig, cluster *clusterv1.Cluster) error { + log.Infof("Creating %q cluster...", strings.Join([]string{cluster.Namespace, cluster.Name}, "/")) + if _, err := testConfig.CAPIClient.ClusterV1alpha1().Clusters(cluster.Namespace).Get(cluster.Name, metav1.GetOptions{}); err != nil { + if _, err := testConfig.CAPIClient.ClusterV1alpha1().Clusters(cluster.Namespace).Create(cluster); err != nil { + return fmt.Errorf("unable to create cluster: %v", err) + } + } + + return nil +} + +func createMachineSet(testConfig *TestConfig, machineset *clusterv1.MachineSet) error { + log.Infof("Creating %q machineset...", strings.Join([]string{machineset.Namespace, machineset.Name}, "/")) + if _, err := testConfig.CAPIClient.ClusterV1alpha1().MachineSets(machineset.Namespace).Get(machineset.Name, metav1.GetOptions{}); err != nil { + if _, err := testConfig.CAPIClient.ClusterV1alpha1().MachineSets(machineset.Namespace).Create(machineset); err != nil { + return fmt.Errorf("unable to create machineset: %v", err) + } + } + + return nil } func createSecret(testConfig *TestConfig, secret *apiv1.Secret) error { @@ -320,13 +368,11 @@ func bootstrapCommand() *cobra.Command { machinePrefix := cmd.Flag("environment-id").Value.String() - if cmd.Flag("cluster-api-stack").Value.String() == "true" { - if os.Getenv("AWS_ACCESS_KEY_ID") == "" { - return fmt.Errorf("AWS_ACCESS_KEY_ID env needs to be set") - } - if os.Getenv("AWS_SECRET_ACCESS_KEY") == "" { - return fmt.Errorf("AWS_SECRET_ACCESS_KEY env needs to be set") - } + if os.Getenv("AWS_ACCESS_KEY_ID") == "" { + return fmt.Errorf("AWS_ACCESS_KEY_ID env needs to be set") + } + if os.Getenv("AWS_SECRET_ACCESS_KEY") == "" { + return fmt.Errorf("AWS_SECRET_ACCESS_KEY env needs to be set") } log.Infof("Reading cluster manifest from %v", path.Join(manifestsDir, "cluster.yaml")) @@ -352,21 +398,8 @@ func bootstrapCommand() *cobra.Command { return err } - log.Infof("Reading worker machine manifest from %v", path.Join(manifestsDir, "worker-machine.yaml")) - workerMachine, err := readMachineManifest( - &manifestParams{ - ClusterID: machinePrefix, - }, - path.Join(manifestsDir, "worker-machine.yaml"), - ) - if err != nil { - return err - } - - log.Infof("Reading worker user data manifest from %v", path.Join(manifestsDir, "worker-userdata.yaml")) - workerUserDataSecret, err := readSecretManifest(path.Join(manifestsDir, "worker-userdata.yaml")) - if err != nil { - return err + if machinePrefix != "" { + masterMachine.Name = machinePrefix + "-" + masterMachine.Name } var awsCredentialsSecret *apiv1.Secret @@ -378,11 +411,6 @@ func bootstrapCommand() *cobra.Command { } } - if machinePrefix != "" { - masterMachine.Name = machinePrefix + "-" + masterMachine.Name - workerMachine.Name = machinePrefix + "-" + workerMachine.Name - } - log.Infof("Creating master machine") actuator := createActuator(masterMachine, awsCredentialsSecret, masterUserDataSecret, log.WithField("bootstrap", "create-master-machine")) result, err := actuator.CreateMachine(cluster, masterMachine) @@ -392,154 +420,215 @@ func bootstrapCommand() *cobra.Command { log.Infof("Master machine created with ipv4: %v, InstanceId: %v", *result.PrivateIpAddress, *result.InstanceId) - log.Infof("Generating worker user data for master listening at %v", *result.PrivateIpAddress) - workerUserDataSecret, err = generateWorkerUserData(*result.PrivateIpAddress, workerUserDataSecret) - if err != nil { - return fmt.Errorf("unable to generate worker user data: %v", err) - } + masterMachinePublicDNS := "" + masterMachinePrivateIP := "" + err = wait.Poll(pollInterval, timeoutPoolAWSInterval, func() (bool, error) { + log.Info("Waiting for master machine PublicDNS") + result, err := actuator.Describe(cluster, masterMachine) + if err != nil { + log.Info(err) + return false, nil + } - log.Infof("Creating worker machine") - actuator = createActuator(workerMachine, awsCredentialsSecret, workerUserDataSecret, log.WithField("bootstrap", "create-worker-machine")) - result, err = actuator.CreateMachine(cluster, workerMachine) - if err != nil { - return err - } + log.Infof("PublicDnsName: %v\n", *result.PublicDnsName) + if *result.PublicDnsName == "" { + return false, nil + } - log.Infof("Worker machine created with InstanceId: %v", *result.InstanceId) + masterMachinePublicDNS = *result.PublicDnsName + masterMachinePrivateIP = *result.PrivateIpAddress + return true, nil + }) - if cmd.Flag("cluster-api-stack").Value.String() == "true" { - masterMachinePublicDNS := "" - err = wait.Poll(pollInterval, timeoutPoolAWSInterval, func() (bool, error) { - log.Info("Waiting for master machine PublicDNS") - result, err := actuator.Describe(cluster, masterMachine) - if err != nil { - log.Info(err) - return false, nil - } + err = wait.Poll(pollInterval, timeoutPoolAWSInterval, func() (bool, error) { + log.Infof("Pulling kubeconfig from %v:8443", masterMachinePublicDNS) + output, err := cmdRun("ssh", fmt.Sprintf("ec2-user@%v", masterMachinePublicDNS), "sudo cat /etc/kubernetes/admin.conf") + if err != nil { + log.Infof("Unable to pull kubeconfig: %v, %v", err, string(output)) + return false, nil + } - log.Infof("PublicDnsName: %v\n", *result.PublicDnsName) - if *result.PublicDnsName == "" { - return false, nil - } + f, err := os.Create("kubeconfig") + if err != nil { + return false, err + } - masterMachinePublicDNS = *result.PublicDnsName - return true, nil - }) + if _, err = f.Write(output); err != nil { + f.Close() + return false, err + } + f.Close() - err = wait.Poll(pollInterval, timeoutPoolAWSInterval, func() (bool, error) { - log.Infof("Pulling kubeconfig from %v:8443", masterMachinePublicDNS) - output, err := cmdRun("ssh", fmt.Sprintf("ec2-user@%v", masterMachinePublicDNS), "sudo cat /etc/kubernetes/admin.conf") - if err != nil { - log.Infof("Unable to pull kubeconfig: %v, %v", err, string(output)) - return false, nil - } + return true, nil + }) - f, err := os.Create("kubeconfig") - if err != nil { - return false, err - } + log.Infof("Running kubectl --kubeconfig=kubeconfig config set-cluster kubernetes --server=https://%v:8443", masterMachinePublicDNS) + if _, err := cmdRun("kubectl", "--kubeconfig=kubeconfig", "config", "set-cluster", "kubernetes", fmt.Sprintf("--server=https://%v:8443", masterMachinePublicDNS)); err != nil { + return err + } - if _, err = f.Write(output); err != nil { - f.Close() - return false, err - } - f.Close() + // Wait until the cluster comes up + config, err := controller.GetConfig("kubeconfig") + if err != nil { + return fmt.Errorf("Unable to create config: %v", err) + } + + kubeClient, err := kubernetes.NewForConfig(config) + if err != nil { + glog.Fatalf("Could not create kubernetes client to talk to the apiserver: %v", err) + } - return true, nil - }) + capiclient, err := clientset.NewForConfig(config) + if err != nil { + glog.Fatalf("Could not create client for talking to the apiserver: %v", err) + } - log.Infof("Running kubectl --kubeconfig=kubeconfig config set-cluster kubernetes --server=https://%v:8443", masterMachinePublicDNS) - if _, err := cmdRun("kubectl", "--kubeconfig=kubeconfig", "config", "set-cluster", "kubernetes", fmt.Sprintf("--server=https://%v:8443", masterMachinePublicDNS)); err != nil { - return err - } + tc := &TestConfig{ + KubeClient: kubeClient, + CAPIClient: capiclient, + } - // Wait until the cluster comes up - config, err := controller.GetConfig("kubeconfig") - if err != nil { - return fmt.Errorf("Unable to create config: %v", err) - } - kubeClient, err := kubernetes.NewForConfig(config) + err = wait.Poll(pollInterval, timeoutPoolAWSInterval, func() (bool, error) { + log.Info("Waiting for all nodes to come up") + nodesList, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { - glog.Fatalf("Could not create kubernetes client to talk to the apiserver: %v", err) + return false, nil } - tc := &TestConfig{ - KubeClient: kubeClient, + nodesReady := true + for _, node := range nodesList.Items { + ready := false + for _, c := range node.Status.Conditions { + if c.Type != apiv1.NodeReady { + continue + } + ready = true + } + log.Infof("Is node %q ready?: %v\n", node.Name, ready) + if !ready { + nodesReady = false + } } - err = wait.Poll(pollInterval, timeoutPoolAWSInterval, func() (bool, error) { - log.Info("Waiting for all nodes to come up") - nodesList, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{}) - if err != nil { - return false, nil - } + return nodesReady, nil + }) - nodesReady := true - for _, node := range nodesList.Items { - ready := false - for _, c := range node.Status.Conditions { - if c.Type != apiv1.NodeReady { - continue - } - ready = true - } - log.Infof("Is node %q ready?: %v\n", node.Name, ready) - if !ready { - nodesReady = false - } - } + log.Info("Deploying cluster-api stack") + log.Info("Deploying aws credentials") - return nodesReady, nil - }) + if err := createNamespace(tc, "test"); err != nil { + return err + } - log.Info("Deploying cluster-api stack") - log.Info("Deploying aws credentials") + awsCredentials := &apiv1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "aws-credentials-secret", + Namespace: "test", + }, + Data: map[string][]byte{ + "awsAccessKeyId": []byte(os.Getenv("AWS_ACCESS_KEY_ID")), + "awsSecretAccessKey": []byte(os.Getenv("AWS_SECRET_ACCESS_KEY")), + }, + } - if err := createNamespace(tc, "test"); err != nil { - return err + if err := createSecret(tc, awsCredentials); err != nil { + return err + } + + err = wait.Poll(pollInterval, timeoutPoolAWSInterval, func() (bool, error) { + log.Info("Deploying cluster-api server") + if output, err := cmdRun("kubectl", "--kubeconfig=kubeconfig", "apply", fmt.Sprintf("-f=%v", path.Join(manifestsDir, "cluster-api-server.yaml")), "--validate=false"); err != nil { + log.Infof("Unable to apply %v manifest: %v\n%v", path.Join(manifestsDir, "cluster-api-server.yaml"), err, string(output)) + return false, nil } - awsCredentials := &apiv1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "aws-credentials-secret", - Namespace: "test", - }, - Data: map[string][]byte{ - "awsAccessKeyId": []byte(os.Getenv("AWS_ACCESS_KEY_ID")), - "awsSecretAccessKey": []byte(os.Getenv("AWS_SECRET_ACCESS_KEY")), - }, + return true, nil + }) + + err = wait.Poll(pollInterval, timeoutPoolAWSInterval, func() (bool, error) { + log.Info("Deploying cluster-api controllers") + if output, err := cmdRun("kubectl", "--kubeconfig=kubeconfig", "apply", fmt.Sprintf("-f=%v", path.Join(manifestsDir, "provider-components.yml"))); err != nil { + log.Infof("Unable to apply %v manifest: %v\n%v", path.Join(manifestsDir, "provider-components.yml"), err, string(output)) + return false, nil } + return true, nil + }) - if err := createSecret(tc, awsCredentials); err != nil { - return err + testCluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tb-asg-35", + Namespace: "test", + }, + Spec: clusterv1.ClusterSpec{ + ClusterNetwork: clusterv1.ClusterNetworkingConfig{ + Services: clusterv1.NetworkRanges{ + CIDRBlocks: []string{"10.0.0.1/24"}, + }, + Pods: clusterv1.NetworkRanges{ + CIDRBlocks: []string{"10.0.0.1/24"}, + }, + ServiceDomain: "example.com", + }, + }, + } + + err = wait.Poll(pollInterval, timeoutPoolAWSInterval, func() (bool, error) { + log.Infof("Deploying cluster resource") + + if err := createCluster(tc, testCluster); err != nil { + log.Infof("Unable to deploy cluster manifest: %v", err) + return false, nil } - err = wait.Poll(pollInterval, timeoutPoolAWSInterval, func() (bool, error) { - log.Info("Deploying cluster-api server") - if output, err := cmdRun("kubectl", "--kubeconfig=kubeconfig", "apply", fmt.Sprintf("-f=%v", path.Join(manifestsDir, "cluster-api-server.yaml")), "--validate=false"); err != nil { - log.Infof("Unable to apply %v manifest: %v\n%v", path.Join(manifestsDir, "cluster-api-server.yaml"), err, string(output)) - return false, nil - } + return true, nil + }) - return true, nil - }) + log.Infof("Reading worker user data manifest from %v", path.Join(manifestsDir, "worker-userdata.yaml")) + workerUserDataSecret, err := readSecretManifest(path.Join(manifestsDir, "worker-userdata.yaml")) + if err != nil { + return err + } - err = wait.Poll(pollInterval, timeoutPoolAWSInterval, func() (bool, error) { - log.Info("Deploying cluster-api controllers") - if output, err := cmdRun("kubectl", "--kubeconfig=kubeconfig", "apply", fmt.Sprintf("-f=%v", path.Join(manifestsDir, "provider-components.yml"))); err != nil { - log.Infof("Unable to apply %v manifest: %v\n%v", path.Join(manifestsDir, "provider-components.yml"), err, string(output)) - return false, nil - } - return true, nil - }) + log.Infof("Generating worker machine set user data for master listening at %v", masterMachinePrivateIP) + workerUserDataSecret, err = generateWorkerUserData(masterMachinePrivateIP, workerUserDataSecret) + if err != nil { + return fmt.Errorf("unable to generate worker user data: %v", err) } + if err := createSecret(tc, workerUserDataSecret); err != nil { + return err + } + + log.Infof("Reading worker machine manifest from %v", path.Join(manifestsDir, "worker-machineset.yaml")) + workerMachineSet, err := readMachineSetManifest( + &manifestParams{ + ClusterID: machinePrefix, + }, + path.Join(manifestsDir, "worker-machineset.yaml"), + ) + if err != nil { + return err + } + + if machinePrefix != "" { + workerMachineSet.Name = machinePrefix + "-" + workerMachineSet.Name + } + + err = wait.Poll(pollInterval, timeoutPoolAWSInterval, func() (bool, error) { + log.Info("Deploying worker machineset") + if err := createMachineSet(tc, workerMachineSet); err != nil { + log.Infof("unable to create machineset: %v", err) + return false, nil + } + + return true, nil + }) + return nil }, } cmd.PersistentFlags().StringP("manifests", "", "", "Directory with bootstrapping manifests") - cmd.PersistentFlags().BoolP("cluster-api-stack", "", false, "Deploy cluster API stack") return cmd } diff --git a/examples/worker-machineset.yaml b/examples/worker-machineset.yaml new file mode 100644 index 0000000000..f803b4fcc8 --- /dev/null +++ b/examples/worker-machineset.yaml @@ -0,0 +1,67 @@ +--- +apiVersion: cluster.k8s.io/v1alpha1 +kind: MachineSet +metadata: + name: default-worker-machineset + namespace: test + labels: + sigs.k8s.io/cluster-api-cluster: tb-asg-35 +spec: + replicas: 1 + selector: + matchLabels: + sigs.k8s.io/cluster-api-machineset: worker-machine + sigs.k8s.io/cluster-api-cluster: tb-asg-35 + template: + metadata: + labels: + sigs.k8s.io/cluster-api-machineset: worker-machine + sigs.k8s.io/cluster-api-cluster: tb-asg-35 + spec: + providerConfig: + value: + apiVersion: aws.cluster.k8s.io/v1alpha1 + kind: AWSMachineProviderConfig + ami: + filters: + - name: "tag:image_stage" + values: + - "base" + - name: "tag:operating_system" + values: + - "rhel" + - name: "tag:ready" + values: + - "yes" + credentialsSecret: + name: aws-credentials-secret + instanceType: m4.xlarge + placement: + region: us-east-1 + availabilityZone: us-east-1a + subnet: + filters: + - name: "tag:Name" + values: + - "{{ .ClusterID }}-worker-*" + iamInstanceProfile: + id: openshift_master_launch_instances + keyName: libra + tags: + - name: openshift-node-group-config + value: node-config-worker + - name: host-type + value: worker + - name: sub-host-type + value: default + securityGroups: + - filters: + - name: "tag:Name" + values: + - "{{ .ClusterID }}-*" + publicIp: true + userDataSecret: + name: aws-actuator-node-user-data-secret + versions: + kubelet: 1.10.1 + controlPlane: 1.10.1