diff --git a/cloud/terraform/cmd/terraform-machine-controller/main.go b/cloud/terraform/cmd/terraform-machine-controller/main.go index 79de452ae476..241a2f806136 100644 --- a/cloud/terraform/cmd/terraform-machine-controller/main.go +++ b/cloud/terraform/cmd/terraform-machine-controller/main.go @@ -31,7 +31,8 @@ import ( ) var ( - kubeadmToken = pflag.String("token", "", "Kubeadm token to use to join new machines") + kubeadmToken = pflag.String("token", "", "Kubeadm token to use to join new machines") + namedMachinesPath = pflag.String("namedmachines", "", "path to named machines yaml file") ) func init() { @@ -54,7 +55,7 @@ func main() { glog.Fatalf("Could not create client for talking to the apiserver: %v", err) } - actuator, err := terraform.NewMachineActuator(*kubeadmToken, client.ClusterV1alpha1().Machines(corev1.NamespaceDefault)) + actuator, err := terraform.NewMachineActuator(*kubeadmToken, client.ClusterV1alpha1().Machines(corev1.NamespaceDefault), *namedMachinesPath) if err != nil { glog.Fatalf("Could not create Terraform machine actuator: %v", err) } diff --git a/cloud/terraform/config/configtemplate.go b/cloud/terraform/config/configtemplate.go index 6b531f44e665..7f469e8c0908 100644 --- a/cloud/terraform/config/configtemplate.go +++ b/cloud/terraform/config/configtemplate.go @@ -139,11 +139,14 @@ spec: mountPath: /root/.terraform.d - name: sshkeys mountPath: /root/.ssh + - name: named-machines + mountPath: /etc/named-machines command: - "./terraform-machine-controller" args: - --kubeconfig=/etc/kubernetes/admin.conf - --token={{ .Token }} + - --namedmachines=/etc/named-machines/vsphere_named_machines.yaml resources: requests: cpu: 200m @@ -167,6 +170,9 @@ spec: - name: sshkeys hostPath: path: /home/ubuntu/.ssh + - name: named-machines + configMap: + name: named-machines --- apiVersion: apps/v1beta1 kind: StatefulSet diff --git a/cloud/terraform/machineactuator.go b/cloud/terraform/machineactuator.go index a63866c342ed..7e859656676e 100644 --- a/cloud/terraform/machineactuator.go +++ b/cloud/terraform/machineactuator.go @@ -32,9 +32,13 @@ import ( "github.com/golang/glog" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/cluster-api/cloud/terraform/namedmachines" terraformconfig "sigs.k8s.io/cluster-api/cloud/terraform/terraformproviderconfig" terraformconfigv1 "sigs.k8s.io/cluster-api/cloud/terraform/terraformproviderconfig/v1alpha1" apierrors "sigs.k8s.io/cluster-api/errors" @@ -44,36 +48,67 @@ import ( ) const ( - MasterIpAnnotationKey = "master-ip" + MasterIpAnnotationKey = "master-ip" TerraformConfigAnnotationKey = "tf-config" + + // Filename in which named machines are saved using a ConfigMap (in master). + NamedMachinesFilename = "vsphere_named_machines.yaml" ) type TerraformClient struct { - scheme *runtime.Scheme - codecFactory *serializer.CodecFactory - kubeadmToken string - machineClient client.MachineInterface + scheme *runtime.Scheme + codecFactory *serializer.CodecFactory + kubeadmToken string + machineClient client.MachineInterface + namedMachineWatch *namedmachines.ConfigWatch } -func NewMachineActuator(kubeadmToken string, machineClient client.MachineInterface) (*TerraformClient, error) { +func NewMachineActuator(kubeadmToken string, machineClient client.MachineInterface, namedMachinePath string) (*TerraformClient, error) { scheme, codecFactory, err := terraformconfigv1.NewSchemeAndCodecs() if err != nil { return nil, err } + var nmWatch *namedmachines.ConfigWatch + nmWatch, err = namedmachines.NewConfigWatch(namedMachinePath) + if err != nil { + glog.Errorf("error creating named machine config watch: %+v", err) + } return &TerraformClient{ - scheme: scheme, - codecFactory: codecFactory, - kubeadmToken: kubeadmToken, - machineClient: machineClient, + scheme: scheme, + codecFactory: codecFactory, + kubeadmToken: kubeadmToken, + machineClient: machineClient, + namedMachineWatch: nmWatch, }, nil } -func (tf *TerraformClient) CreateMachineController(cluster *clusterv1.Cluster, initialMachines []*clusterv1.Machine) error { +func (tf *TerraformClient) CreateMachineController(cluster *clusterv1.Cluster, initialMachines []*clusterv1.Machine, clientSet kubernetes.Clientset) error { if err := CreateExtApiServerRoleBinding(); err != nil { return err } + // Create the named machines ConfigMap. + // After pivot-based bootstrap is done, the named machine should be a ConfigMap and this logic will be removed. + namedMachines, err := tf.namedMachineWatch.NamedMachines() + if err != nil { + return err + } + yaml, err := namedMachines.GetYaml() + if err != nil { + return err + } + nmConfigMap := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "named-machines"}, + Data: map[string]string{ + NamedMachinesFilename: yaml, + }, + } + configMaps := clientSet.CoreV1().ConfigMaps(corev1.NamespaceDefault) + if _, err := configMaps.Create(&nmConfigMap); err != nil { + return err + } + if err := CreateApiServerAndController(tf.kubeadmToken); err != nil { return err } @@ -93,11 +128,11 @@ func getHomeDir() (string, error) { } // Fallback to user's profile. - usr, err := user.Current() - if err != nil { - return "", err - } - return usr.HomeDir, nil + usr, err := user.Current() + if err != nil { + return "", err + } + return usr.HomeDir, nil } func (tf *TerraformClient) Create(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { @@ -126,7 +161,15 @@ func (tf *TerraformClient) Create(cluster *clusterv1.Cluster, machine *clusterv1 // Save the config file and variables file to the machinePath tfConfigPath := path.Join(machinePath, "terraform.tf") tfVarsPath := path.Join(machinePath, "variables.tfvars") - if err := saveFile(config.TerraformConfig, tfConfigPath, 0644); err != nil { + namedMachines, err := tf.namedMachineWatch.NamedMachines() + if err != nil { + return err + } + matchedMachine, err := namedMachines.MatchMachine(config.TerraformMachine) + if err != nil { + return err + } + if err := saveFile(matchedMachine.MachineHcl, tfConfigPath, 0644); err != nil { return err } if err := saveFile(strings.Join(config.TerraformVariables, "\n"), tfVarsPath, 0644); err != nil { @@ -286,7 +329,7 @@ func runTerraformCmd(stdout bool, workingDir string, arg ...string) (bytes.Buffe logFileName := fmt.Sprintf("/tmp/cluster-api-%s.log", util.RandomToken()) f, _ := os.Create(logFileName) glog.Infof("Running terraform. Check for logs in %s", logFileName) - multiWriter := io.MultiWriter(&out, f) + multiWriter := io.MultiWriter(&out, f) cmd.Stdout = multiWriter } cmd.Stdin = os.Stdin @@ -325,7 +368,7 @@ func (tf *TerraformClient) GetIP(machine *clusterv1.Machine) (string, error) { if machine.ObjectMeta.Annotations != nil { if ip, ok := machine.ObjectMeta.Annotations[MasterIpAnnotationKey]; ok { glog.Infof("Retuning IP from metadata %s", ip) - return ip, nil + return ip, nil } } @@ -354,9 +397,9 @@ func (tf *TerraformClient) GetKubeConfig(master *clusterv1.Machine) (string, err "ssh", "-i", "~/.ssh/vsphere_tmp", fmt.Sprintf("ubuntu@%s", ip), "echo STARTFILE; cat /etc/kubernetes/admin.conf") - cmd.Stdout = &out - cmd.Stderr = os.Stderr - cmd.Run() + cmd.Stdout = &out + cmd.Stderr = os.Stderr + cmd.Run() result := strings.TrimSpace(out.String()) parts := strings.Split(result, "STARTFILE") if len(parts) != 2 { @@ -391,33 +434,33 @@ func (tf *TerraformClient) SetupRemoteMaster(master *clusterv1.Machine) error { "-r", path.Join(homedir, ".terraform.d"), fmt.Sprintf("ubuntu@%s:~/", ip)) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.Run() + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Run() - // TODO: Bake this into the controller image instead of this hacky thing. - glog.Infof("Copying the terraform binary to master.") + // TODO: Bake this into the controller image instead of this hacky thing. + glog.Infof("Copying the terraform binary to master.") cmd = exec.Command( // TODO: this is taking my private key and username for now. "scp", "-i", "~/.ssh/vsphere_tmp", // TODO: this should be a flag? "-r", "/Users/karangoel/.gvm/pkgsets/go1.9.2/global/src/sigs.k8s.io/cluster-api/cloud/terraform/bin/", fmt.Sprintf("ubuntu@%s:~/.terraform.d/", ip)) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.Run() + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Run() - glog.Infof("Setting up terraform on remote master.") + glog.Infof("Setting up terraform on remote master.") cmd = exec.Command( // TODO: this is taking my private key and username for now. "ssh", "-i", "~/.ssh/vsphere_tmp", fmt.Sprintf("ubuntu@%s", ip), fmt.Sprintf("source ~/.profile; cd ~/.terraform.d/kluster/machines/%s; ~/.terraform.d/terraform init; cp -r ~/.terraform.d/kluster/machines/%s/.terraform/plugins/* ~/.terraform.d/plugins/", machineName, machineName)) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.Run() + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Run() - return nil + return nil } func (tf *TerraformClient) updateAnnotations(machine *clusterv1.Machine, masterEndpointIp string) error { @@ -534,12 +577,12 @@ func run(cmd string, args ...string) error { } func pathExists(path string) (bool, error) { - _, err := os.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return true, err -} \ No newline at end of file + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return true, err +} diff --git a/cloud/terraform/namedmachines/namedmachines.go b/cloud/terraform/namedmachines/namedmachines.go new file mode 100644 index 000000000000..16b141c8e546 --- /dev/null +++ b/cloud/terraform/namedmachines/namedmachines.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package namedmachines + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/ghodss/yaml" +) + +// Config Watch holds the path to the named machines yaml file. +// This is used during bootstrap when the apiserver does not yet exist. +type ConfigWatch struct { + path string +} + +// A single named machine. +type NamedMachine struct { + MachineNane string + MachineHcl string +} + +// A list of named machines. +type NamedMachinesItems struct { + Items []NamedMachine `json:"items"` +} + +// All named machines defined in yaml. +type NamedMachines struct { + namedMachinesItems *NamedMachinesItems +} + +func NewConfigWatch(path string) (*ConfigWatch, error) { + if _, err := os.Stat(path); err != nil { + return nil, err + } + return &ConfigWatch{path: path}, nil +} + +// Returns all named machines for ConfigWatch. +func (cw *ConfigWatch) NamedMachines() (*NamedMachines, error) { + file, err := os.Open(cw.path) + if err != nil { + return nil, err + } + return parseNamedMachinesYaml(file) +} + +func parseNamedMachinesYaml(reader io.Reader) (*NamedMachines, error) { + bytes, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + + items := &NamedMachinesItems{} + err = yaml.Unmarshal(bytes, items) + if err != nil { + return nil, err + } + + return &NamedMachines{items}, nil +} + +func (nm *NamedMachines) GetYaml() (string, error) { + bytes, err := yaml.Marshal(nm.namedMachinesItems) + if err != nil { + return "", err + } + return string(bytes), nil +} + +// Returns a NamedMachine that matches the passed name. +func (nm *NamedMachines) MatchMachine(machineName string) (*NamedMachine, error) { + for _, namedMachine := range nm.namedMachinesItems.Items { + if namedMachine.MachineNane == machineName { + return &namedMachine, nil + } + } + return nil, fmt.Errorf("could not find a machine with name %s", machineName) +} diff --git a/cloud/terraform/terraformproviderconfig/types.go b/cloud/terraform/terraformproviderconfig/types.go index 34939573a11a..36fc54b85199 100644 --- a/cloud/terraform/terraformproviderconfig/types.go +++ b/cloud/terraform/terraformproviderconfig/types.go @@ -24,9 +24,8 @@ import ( type TerraformProviderConfig struct { metav1.TypeMeta `json:",inline"` - // Contents of a terrafrom config file. - // This is the HCL config encoded as a string. - TerraformConfig string `json:"terraformConfig"` + // Name of the machine that's registered in the NamedMachines ConfigMap. + TerraformMachine string `json:"terraformMachine"` // List of contents of terraform variables used. // HCL variables encoded as string. TerraformVariables []string `json:"terraformVariables"` diff --git a/cloud/terraform/terraformproviderconfig/v1alpha1/types.go b/cloud/terraform/terraformproviderconfig/v1alpha1/types.go index 824fb24ffdd0..acb3c283f348 100644 --- a/cloud/terraform/terraformproviderconfig/v1alpha1/types.go +++ b/cloud/terraform/terraformproviderconfig/v1alpha1/types.go @@ -24,8 +24,8 @@ import ( type TerraformProviderConfig struct { metav1.TypeMeta `json:",inline"` - // Contents of a terrafrom config file. - TerraformConfig string `json:"terraformConfig"` + // Name of the machine that's registered in the NamedMachines ConfigMap. + TerraformMachine string `json:"terraformMachine"` // List of contents of terraform variable files used. TerraformVariables []string `json:"terraformVariables"` } diff --git a/tf-deployer/cmd/add.go b/tf-deployer/cmd/add.go index 06372b7ba6ff..344a51df52b4 100644 --- a/tf-deployer/cmd/add.go +++ b/tf-deployer/cmd/add.go @@ -52,7 +52,7 @@ func RunAdd(ao *AddOptions) error { return err } - d := deploy.NewDeployer(kubeConfig) + d := deploy.NewDeployer(kubeConfig, "") return d.AddNodes(machines) } diff --git a/tf-deployer/cmd/create.go b/tf-deployer/cmd/create.go index 06271ebd13a3..1feed252d016 100644 --- a/tf-deployer/cmd/create.go +++ b/tf-deployer/cmd/create.go @@ -25,8 +25,9 @@ import ( ) type CreateOptions struct { - Cluster string - Machine string + Cluster string + Machine string + NamedMachinePath string } var co = &CreateOptions{} @@ -46,6 +47,11 @@ var createCmd = &cobra.Command{ cmd.Help() os.Exit(1) } + if co.NamedMachinePath == "" { + glog.Error("Please provide a yaml file for machine HCL configs.") + cmd.Help() + os.Exit(1) + } if err := RunCreate(co); err != nil { glog.Exit(err) } @@ -63,13 +69,14 @@ func RunCreate(co *CreateOptions) error { return err } - d := deploy.NewDeployer(kubeConfig) + d := deploy.NewDeployer(kubeConfig, co.NamedMachinePath) return d.CreateCluster(cluster, machines) } func init() { createCmd.Flags().StringVarP(&co.Cluster, "cluster", "c", "", "cluster yaml file") createCmd.Flags().StringVarP(&co.Machine, "machines", "m", "", "machine yaml file") + createCmd.Flags().StringVarP(&co.NamedMachinePath, "namedmachines", "n", "", "named machines yaml file") RootCmd.AddCommand(createCmd) } diff --git a/tf-deployer/cmd/delete.go b/tf-deployer/cmd/delete.go index 55229e049dff..cfeaaa90f633 100644 --- a/tf-deployer/cmd/delete.go +++ b/tf-deployer/cmd/delete.go @@ -34,7 +34,7 @@ var deleteCmd = &cobra.Command{ } func RunDelete() error { - d := deploy.NewDeployer(kubeConfig) + d := deploy.NewDeployer(kubeConfig, "") return d.DeleteCluster() } diff --git a/tf-deployer/deploy/deploy.go b/tf-deployer/deploy/deploy.go index e2c18d0cd532..6885d86b0c39 100644 --- a/tf-deployer/deploy/deploy.go +++ b/tf-deployer/deploy/deploy.go @@ -21,6 +21,7 @@ import ( "os" "github.com/golang/glog" + "k8s.io/client-go/kubernetes" "sigs.k8s.io/cluster-api/cloud/terraform" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" @@ -30,16 +31,17 @@ import ( ) type deployer struct { - token string - configPath string - machineDeployer machineDeployer - client v1alpha1.ClusterV1alpha1Interface - clientSet clientset.Interface + token string + configPath string + machineDeployer machineDeployer + client v1alpha1.ClusterV1alpha1Interface + clientSet clientset.Interface + kubernetesClientSet kubernetes.Clientset } // NewDeployer returns a cloud provider specific deployer and // sets kubeconfig path for the cluster to be deployed -func NewDeployer(configPath string) *deployer { +func NewDeployer(configPath, namedMachinesPath string) *deployer { token := util.RandomToken() if configPath == "" { configPath = os.Getenv("KUBECONFIG") @@ -53,7 +55,7 @@ func NewDeployer(configPath string) *deployer { glog.Exit(fmt.Sprintf("Failed to set Kubeconfig path err %v\n", err)) } } - ma, err := terraform.NewMachineActuator(token, nil) + ma, err := terraform.NewMachineActuator(token, nil, namedMachinesPath) if err != nil { glog.Exit(err) } diff --git a/tf-deployer/deploy/deploy_helper.go b/tf-deployer/deploy/deploy_helper.go index 05a777757301..59e17dac3126 100644 --- a/tf-deployer/deploy/deploy_helper.go +++ b/tf-deployer/deploy/deploy_helper.go @@ -82,40 +82,40 @@ func (d *deployer) createCluster(c *clusterv1.Cluster, machines []*clusterv1.Mac return fmt.Errorf("apiserver never came up: %v", err) } - if err := d.initApiClient(); err != nil { - return err - } - - if err := d.waitForServiceAccount(); err != nil { - return fmt.Errorf("service account %s/%s not found: %v", ServiceAccountNs, ServiceAccountName, err) - } - - glog.Info("Deploying the addon apiserver and controller manager...") - if err := d.machineDeployer.CreateMachineController(c, machines); err != nil { - return fmt.Errorf("can't create machine controller: %v", err) - } - - if err := d.waitForClusterResourceReady(); err != nil { - return err - } - - c, err = d.client.Clusters(apiv1.NamespaceDefault).Create(c) - if err != nil { - return err - } - - c.Status.APIEndpoints = append(c.Status.APIEndpoints, - clusterv1.APIEndpoint{ - Host: masterIP, - Port: 443, - }) - if _, err := d.client.Clusters(apiv1.NamespaceDefault).UpdateStatus(c); err != nil { - return err - } - - if err := d.createMachines(machines); err != nil { - return err - } + if err := d.initApiClient(); err != nil { + return err + } + + if err := d.waitForServiceAccount(); err != nil { + return fmt.Errorf("service account %s/%s not found: %v", ServiceAccountNs, ServiceAccountName, err) + } + + glog.Info("Deploying the addon apiserver and controller manager...") + if err := d.machineDeployer.CreateMachineController(c, machines, d.kubernetesClientSet); err != nil { + return fmt.Errorf("can't create machine controller: %v", err) + } + + if err := d.waitForClusterResourceReady(); err != nil { + return err + } + + c, err = d.client.Clusters(apiv1.NamespaceDefault).Create(c) + if err != nil { + return err + } + + c.Status.APIEndpoints = append(c.Status.APIEndpoints, + clusterv1.APIEndpoint{ + Host: masterIP, + Port: 443, + }) + if _, err := d.client.Clusters(apiv1.NamespaceDefault).UpdateStatus(c); err != nil { + return err + } + + if err := d.createMachines(machines); err != nil { + return err + } return nil } @@ -235,8 +235,13 @@ func (d *deployer) initApiClient() error { if err != nil { return err } + kubernetesClientSet, err := util.NewKubernetesClient(d.configPath) + if err != nil { + return err + } d.clientSet = c d.client = c.ClusterV1alpha1() + d.kubernetesClientSet = *kubernetesClientSet return nil } @@ -281,14 +286,9 @@ func (d *deployer) waitForApiserver(master string) error { // Make sure the default service account in kube-system namespace exists. func (d *deployer) waitForServiceAccount() error { - client, err := util.NewKubernetesClient(d.configPath) - if err != nil { - return err - } - waitErr := util.Retry(func() (bool, error) { glog.Info("Waiting for the service account to exist...") - _, err = client.CoreV1().ServiceAccounts(ServiceAccountNs).Get(ServiceAccountName, metav1.GetOptions{}) + _, err := d.kubernetesClientSet.CoreV1().ServiceAccounts(ServiceAccountNs).Get(ServiceAccountName, metav1.GetOptions{}) return (err == nil), nil }, 5) diff --git a/tf-deployer/deploy/machinedeployer.go b/tf-deployer/deploy/machinedeployer.go index fc447056b3d2..7ec914ecb5ad 100644 --- a/tf-deployer/deploy/machinedeployer.go +++ b/tf-deployer/deploy/machinedeployer.go @@ -1,15 +1,16 @@ package deploy import ( -clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -"sigs.k8s.io/cluster-api/pkg/controller/machine" + "k8s.io/client-go/kubernetes" + clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/pkg/controller/machine" ) // Provider-specific machine logic the deployer needs. type machineDeployer interface { machine.Actuator GetIP(machine *clusterv1.Machine) (string, error) - SetupRemoteMaster(machine *clusterv1.Machine) (error) + SetupRemoteMaster(machine *clusterv1.Machine) error GetKubeConfig(master *clusterv1.Machine) (string, error) // Create and start the machine controller. The list of initial @@ -17,6 +18,6 @@ type machineDeployer interface { // are provided in case the function wants to refer to them (and their // ProviderConfigs) to know how to configure the machine controller. // Not idempotent. - CreateMachineController(cluster *clusterv1.Cluster, initialMachines []*clusterv1.Machine) error + CreateMachineController(cluster *clusterv1.Cluster, initialMachines []*clusterv1.Machine, clientSet kubernetes.Clientset) error PostDelete(cluster *clusterv1.Cluster, machines []*clusterv1.Machine) error } diff --git a/tf-deployer/machines.yaml.template b/tf-deployer/machines.yaml.template index f492affba53a..8ba54f8cff3f 100644 --- a/tf-deployer/machines.yaml.template +++ b/tf-deployer/machines.yaml.template @@ -6,17 +6,16 @@ items: labels: set: master spec: - providerConfig: > - { - "apiVersion": "terraformproviderconfig/v1alpha1", - "kind": "TerraformProviderConfig", - "terraformConfig": "", - "terraformVariables": [ + providerConfig: + value: + apiVersion: "terraformproviderconfig/v1alpha1" + kind: "TerraformProviderConfig" + terraformMachine: "standard-master" + terraformVariables: [ "user = \"foo\"", "password = \"bar\"", "vsphere_server = \"192.169.1.1\"", ] - } versions: kubelet: 1.8.3 controlPlane: 1.8.3 @@ -32,17 +31,16 @@ items: labels: set: master spec: - providerConfig: > - { - "apiVersion": "terraformproviderconfig/v1alpha1", - "kind": "TerraformProviderConfig", - "terraformConfig": "", - "terraformVariables": [ + providerConfig: + value: + apiVersion: "terraformproviderconfig/v1alpha1" + kind: "TerraformProviderConfig" + terraformMachine: "standard-node" + terraformVariables: [ "user = \"foo\"", "password = \"bar\"", "vsphere_server = \"192.169.1.1\"", ] - } versions: kubelet: 1.8.3 controlPlane: 1.8.3 diff --git a/tf-deployer/vsphere_named_machines.yaml b/tf-deployer/vsphere_named_machines.yaml new file mode 100644 index 000000000000..7429b4572fc0 --- /dev/null +++ b/tf-deployer/vsphere_named_machines.yaml @@ -0,0 +1,279 @@ +items: +- machineName: standard-master + machineHcl: | + variable "user" {} + variable "password" {} + variable "vsphere_server" {} + + variable "datacenter" {} + variable "datastore" {} + variable "resource_pool" {} + variable "network" {} + variable "num_cpus" {} + variable "memory" {} + variable "vm_template" {} + variable "disk_label" {} + variable "disk_size" {} + + // The domain name to set up each virtual machine as. + variable "virtual_machine_domain" {} + + // The network address for the virtual machines, in the form of 10.0.0.0/24. + variable "virtual_machine_network_address" {} + + // The last octect that serves as the start of the IP addresses for the virtual + // machines. Given the default value here of 100, if the network address is + // 10.0.0.0/24, the 3 virtual machines will be assigned addresses 10.0.0.100, + // 10.0.0.101, and 10.0.0.102. + variable "virtual_machine_ip_address_start" {} + + // The default gateway for the network the virtual machines reside in. + variable "virtual_machine_gateway" {} + + // The DNS servers for the network the virtual machines reside in. + variable "virtual_machine_dns_servers" { + type = "list" + } + + variable "vm_name" { + type = "string" + } + + provider "vsphere" { + user = "${var.user}" + password = "${var.password}" + vsphere_server = "${var.vsphere_server}" + + # if you have a self-signed cert + allow_unverified_ssl = true + } + + data "vsphere_datacenter" "dc" { + name = "${var.datacenter}" + } + + data "vsphere_datastore" "datastore" { + name = "${var.datastore}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" + } + + data "vsphere_resource_pool" "pool" { + name = "${var.resource_pool}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" + } + + data "vsphere_network" "network" { + name = "${var.network}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" + } + + data "vsphere_virtual_machine" "template" { + name = "${var.vm_template}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" + } + + resource "vsphere_virtual_machine" "master" { + name = "${var.vm_name}" + resource_pool_id = "${data.vsphere_resource_pool.pool.id}" + datastore_id = "${data.vsphere_datastore.datastore.id}" + + num_cpus = "${var.num_cpus}" + memory = "${var.memory}" + guest_id = "${data.vsphere_virtual_machine.template.guest_id}" + enable_disk_uuid = "true" + + scsi_type = "${data.vsphere_virtual_machine.template.scsi_type}" + + network_interface { + network_id = "${data.vsphere_network.network.id}" + adapter_type = "${data.vsphere_virtual_machine.template.network_interface_types[0]}" + } + + disk { + label = "${var.disk_label}" + size = "${data.vsphere_virtual_machine.template.disks.0.size}" + eagerly_scrub = "${data.vsphere_virtual_machine.template.disks.0.eagerly_scrub}" + thin_provisioned = "${data.vsphere_virtual_machine.template.disks.0.thin_provisioned}" + } + + clone { + template_uuid = "${data.vsphere_virtual_machine.template.id}" + + customize { + linux_options { + host_name = "${var.vm_name}" + domain = "${var.virtual_machine_domain}" + } + + network_interface { + ipv4_address = "${cidrhost(var.virtual_machine_network_address, var.virtual_machine_ip_address_start + count.index)}" + ipv4_netmask = "${element(split("/", var.virtual_machine_network_address), 1)}" + } + + ipv4_gateway = "${var.virtual_machine_gateway}" + dns_suffix_list = ["${var.virtual_machine_domain}"] + dns_server_list = ["${var.virtual_machine_dns_servers}"] + } + } + + provisioner "file" { + source = "/tmp/machine-startup.sh" + destination = "/tmp/master.sh" + + connection { + type = "ssh" + private_key = "${file("~/.ssh/vsphere_tmp")}" + user = "ubuntu" + agent = true + } + } + + // Copy the private key over so the controller is able to ssh into the nodes. + provisioner "file" { + source = "~/.ssh/vsphere_tmp" + destination = "~/.ssh/id_rsa" + + connection { + type = "ssh" + private_key = "${file("~/.ssh/vsphere_tmp")}" + user = "ubuntu" + agent = true + } + } + + // NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE + // Use username/password here because the machine controller will be running + // on a different machine and the image does not have that machines private key. + // So either I use user/pw or I inject my SSH private key in the controller. + provisioner "remote-exec" { + inline = [ + "echo Making startup script executable...", + "chmod +x /tmp/master.sh", + "echo Running startup script...", + "echo '${var.password}' | sudo -S /tmp/master.sh", + + // This is required for the controller to be able to read the conf file. + "echo '${var.password}' | sudo -S chown ubuntu:ubuntu /etc/kubernetes/admin.conf", + ] + + connection { + type = "ssh" + private_key = "${file("~/.ssh/vsphere_tmp")}" + user = "ubuntu" + agent = true + } + } + } +- machineName: standard-node + machineHcl: | + variable "user" {} + variable "password" {} + variable "vsphere_server" {} + variable "datacenter" {} + variable "datastore" {} + variable "resource_pool" {} + variable "network" {} + variable "num_cpus" {} + variable "memory" {} + variable "vm_template" {} + variable "disk_label" {} + variable "disk_size" {} + variable "virtual_machine_domain" {} + variable "virtual_machine_network_address" {} + variable "virtual_machine_ip_address_start" {} + variable "virtual_machine_gateway" {} + variable "virtual_machine_dns_servers" { + type = "list" + } + variable "vm_name" {} + provider "vsphere" { + user = "${var.user}" + password = "${var.password}" + vsphere_server = "${var.vsphere_server}" + # if you have a self-signed cert + allow_unverified_ssl = true + } + data "vsphere_datacenter" "dc" { + name = "${var.datacenter}" + } + data "vsphere_datastore" "datastore" { + name = "${var.datastore}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" + } + data "vsphere_resource_pool" "pool" { + name = "${var.resource_pool}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" + } + data "vsphere_network" "network" { + name = "${var.network}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" + } + data "vsphere_virtual_machine" "template" { + name = "${var.vm_template}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" + } + resource "vsphere_virtual_machine" "nodes" { + name = "${var.vm_name}" + resource_pool_id = "${data.vsphere_resource_pool.pool.id}" + datastore_id = "${data.vsphere_datastore.datastore.id}" + num_cpus = "${var.num_cpus}" + memory = "${var.memory}" + guest_id = "${data.vsphere_virtual_machine.template.guest_id}" + enable_disk_uuid = "true" + scsi_type = "${data.vsphere_virtual_machine.template.scsi_type}" + network_interface { + network_id = "${data.vsphere_network.network.id}" + adapter_type = "${data.vsphere_virtual_machine.template.network_interface_types[0]}" + } + disk { + label = "${var.disk_label}" + size = "${data.vsphere_virtual_machine.template.disks.0.size}" + eagerly_scrub = "${data.vsphere_virtual_machine.template.disks.0.eagerly_scrub}" + thin_provisioned = "${data.vsphere_virtual_machine.template.disks.0.thin_provisioned}" + } + clone { + template_uuid = "${data.vsphere_virtual_machine.template.id}" + customize { + linux_options { + host_name = "${var.vm_name}" + domain = "${var.virtual_machine_domain}" + } + network_interface { + ipv4_address = "${cidrhost(var.virtual_machine_network_address, var.virtual_machine_ip_address_start + count.index + 1)}" + ipv4_netmask = "${element(split("/", var.virtual_machine_network_address), 1)}" + } + ipv4_gateway = "${var.virtual_machine_gateway}" + dns_suffix_list = ["${var.virtual_machine_domain}"] + dns_server_list = ["${var.virtual_machine_dns_servers}"] + } + } + + // Need to make sure that the private key exists at ~/.ssh/id_rsa + provisioner "file" { + source = "/tmp/machine-startup.sh" + destination = "/tmp/node.sh" + connection { + type = "ssh" + private_key = "${file("~/.ssh/id_rsa")}" + user = "ubuntu" + } + } + // NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE + // Use username/password here because the machine controller will be running + // on a different machine and the image does not have that machines private key. + // So either I use user/pw or I inject my SSH private key in the controller. + provisioner "remote-exec" { + inline = [ + "echo Making startup script executable...", + "ls -al /tmp/", + "chmod +x /tmp/node.sh", + "echo Running startup script...", + "echo '${var.password}' | sudo -S /tmp/node.sh", + ] + connection { + type = "ssh" + private_key = "${file("~/.ssh/id_rsa")}" + user = "ubuntu" + } + } + } \ No newline at end of file