diff --git a/cloud/google/cmd/gce-machine-controller/Makefile b/cloud/google/cmd/gce-machine-controller/Makefile index b16b088faffa..57c4b87bdcf7 100644 --- a/cloud/google/cmd/gce-machine-controller/Makefile +++ b/cloud/google/cmd/gce-machine-controller/Makefile @@ -18,7 +18,7 @@ GCR_BUCKET = k8s-cluster-api PREFIX = gcr.io/$(GCR_BUCKET) DEV_PREFIX ?= gcr.io/$(shell gcloud config get-value project) NAME = gce-machine-controller -TAG = 0.0.9 +TAG = 0.0.10 image: docker build -t "$(PREFIX)/$(NAME):$(TAG)" -f ./Dockerfile ../../../.. diff --git a/cloud/google/gceproviderconfig/register.go b/cloud/google/gceproviderconfig/register.go index 52bb3ed1f43a..161871cc2698 100644 --- a/cloud/google/gceproviderconfig/register.go +++ b/cloud/google/gceproviderconfig/register.go @@ -45,7 +45,10 @@ func Resource(resource string) schema.GroupResource { func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &GCEProviderConfig{}, + &GCEMachineProviderConfig{}, + ) + scheme.AddKnownTypes(SchemeGroupVersion, + &GCEClusterProviderConfig{}, ) return nil } diff --git a/cloud/google/gceproviderconfig/types.go b/cloud/google/gceproviderconfig/types.go index 41b44502132e..e9de3bb48a31 100644 --- a/cloud/google/gceproviderconfig/types.go +++ b/cloud/google/gceproviderconfig/types.go @@ -21,10 +21,9 @@ import ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type GCEProviderConfig struct { +type GCEMachineProviderConfig struct { metav1.TypeMeta `json:",inline"` - Project string `json:"project"` Zone string `json:"zone"` MachineType string `json:"machineType"` @@ -41,3 +40,10 @@ type DiskInitializeParams struct { DiskSizeGb int64 `json:"diskSizeGb"` DiskType string `json:"diskType"` } + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type GCEClusterProviderConfig struct { + metav1.TypeMeta `json:",inline"` + + Project string `json:"project"` +} \ No newline at end of file diff --git a/cloud/google/gceproviderconfig/v1alpha1/register.go b/cloud/google/gceproviderconfig/v1alpha1/register.go index 334ed9b3407a..a588328f896f 100644 --- a/cloud/google/gceproviderconfig/v1alpha1/register.go +++ b/cloud/google/gceproviderconfig/v1alpha1/register.go @@ -1,12 +1,9 @@ /* Copyright 2017 The Kubernetes Authors. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -47,7 +44,10 @@ func init() { func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &GCEProviderConfig{}, + &GCEMachineProviderConfig{}, + ) + scheme.AddKnownTypes(SchemeGroupVersion, + &GCEClusterProviderConfig{}, ) return nil } @@ -80,27 +80,22 @@ func NewCodec() (*GCEProviderConfigCodec, error) { return &codec, nil } -func (codec *GCEProviderConfigCodec) DecodeFromProviderConfig(providerConfig clusterv1.ProviderConfig) (*GCEProviderConfig, error) { - obj, gvk, err := codec.decoder.Decode(providerConfig.Value.Raw, nil, nil) +func (codec *GCEProviderConfigCodec) DecodeFromProviderConfig(providerConfig clusterv1.ProviderConfig, out runtime.Object) (error) { + _, _, err := codec.decoder.Decode(providerConfig.Value.Raw, nil, out) if err != nil { - return nil, fmt.Errorf("decoding failure: %v", err) + return fmt.Errorf("decoding failure: %v", err) } - config, ok := obj.(*GCEProviderConfig) - if !ok { - return nil, fmt.Errorf("failure to cast to gce; type: %v", gvk) - } - return config, nil + return nil } -func (codec *GCEProviderConfigCodec) EncodeToProviderConfig(gceProviderConfig *GCEProviderConfig) (*clusterv1.ProviderConfig, error) { +func (codec *GCEProviderConfigCodec) EncodeToProviderConfig(in runtime.Object) (*clusterv1.ProviderConfig, error) { var buf bytes.Buffer - if err := codec.encoder.Encode(gceProviderConfig, &buf); err != nil { + if err := codec.encoder.Encode(in, &buf); err != nil { return nil, fmt.Errorf("encoding failed: %v", err) } - providerConfig := clusterv1.ProviderConfig{ + return &clusterv1.ProviderConfig{ Value: &runtime.RawExtension{Raw: buf.Bytes()}, - } - return &providerConfig, nil + }, nil } func newEncoder(codecFactory *serializer.CodecFactory) (runtime.Encoder, error) { @@ -110,4 +105,4 @@ func newEncoder(codecFactory *serializer.CodecFactory) (runtime.Encoder, error) } encoder := codecFactory.EncoderForVersion(serializerInfos[0].Serializer, SchemeGroupVersion) return encoder, nil -} +} \ No newline at end of file diff --git a/cloud/google/gceproviderconfig/v1alpha1/types.go b/cloud/google/gceproviderconfig/v1alpha1/types.go index 9b89be7b6d1a..f85be9c1057c 100644 --- a/cloud/google/gceproviderconfig/v1alpha1/types.go +++ b/cloud/google/gceproviderconfig/v1alpha1/types.go @@ -21,10 +21,9 @@ import ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type GCEProviderConfig struct { +type GCEMachineProviderConfig struct { metav1.TypeMeta `json:",inline"` - Project string `json:"project"` Zone string `json:"zone"` MachineType string `json:"machineType"` @@ -41,3 +40,11 @@ type DiskInitializeParams struct { DiskSizeGb int64 `json:"diskSizeGb"` DiskType string `json:"diskType"` } + + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type GCEClusterProviderConfig struct { + metav1.TypeMeta `json:",inline"` + + Project string `json:"project"` +} \ No newline at end of file diff --git a/cloud/google/gceproviderconfig/v1alpha1/zz_generated.deepcopy.go b/cloud/google/gceproviderconfig/v1alpha1/zz_generated.deepcopy.go index 78e70d2ac4f1..3d7d54d08f55 100644 --- a/cloud/google/gceproviderconfig/v1alpha1/zz_generated.deepcopy.go +++ b/cloud/google/gceproviderconfig/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by deepcopy-gen. Do not edit it manually! +// Code generated by deepcopy-gen. DO NOT EDIT. package v1alpha1 @@ -25,27 +25,51 @@ import ( ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCEProviderConfig) DeepCopyInto(out *GCEProviderConfig) { +func (in *GCEClusterProviderConfig) DeepCopyInto(out *GCEClusterProviderConfig) { *out = *in out.TypeMeta = in.TypeMeta return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEProviderConfig. -func (in *GCEProviderConfig) DeepCopy() *GCEProviderConfig { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEClusterProviderConfig. +func (in *GCEClusterProviderConfig) DeepCopy() *GCEClusterProviderConfig { if in == nil { return nil } - out := new(GCEProviderConfig) + out := new(GCEClusterProviderConfig) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *GCEProviderConfig) DeepCopyObject() runtime.Object { +func (in *GCEClusterProviderConfig) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c - } else { + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCEMachineProviderConfig) DeepCopyInto(out *GCEMachineProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEMachineProviderConfig. +func (in *GCEMachineProviderConfig) DeepCopy() *GCEMachineProviderConfig { + if in == nil { return nil } + out := new(GCEMachineProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GCEMachineProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil } diff --git a/cloud/google/gceproviderconfig/zz_generated.deepcopy.go b/cloud/google/gceproviderconfig/zz_generated.deepcopy.go index 26319e3d5af9..f655f8857018 100644 --- a/cloud/google/gceproviderconfig/zz_generated.deepcopy.go +++ b/cloud/google/gceproviderconfig/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2018 The Kubernetes Authors. +Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This file was autogenerated by deepcopy-gen. Do not edit it manually! +// Code generated by deepcopy-gen. DO NOT EDIT. package gceproviderconfig @@ -25,27 +25,51 @@ import ( ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCEProviderConfig) DeepCopyInto(out *GCEProviderConfig) { +func (in *GCEClusterProviderConfig) DeepCopyInto(out *GCEClusterProviderConfig) { *out = *in out.TypeMeta = in.TypeMeta return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEProviderConfig. -func (in *GCEProviderConfig) DeepCopy() *GCEProviderConfig { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEClusterProviderConfig. +func (in *GCEClusterProviderConfig) DeepCopy() *GCEClusterProviderConfig { if in == nil { return nil } - out := new(GCEProviderConfig) + out := new(GCEClusterProviderConfig) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *GCEProviderConfig) DeepCopyObject() runtime.Object { +func (in *GCEClusterProviderConfig) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c - } else { + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCEMachineProviderConfig) DeepCopyInto(out *GCEMachineProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEMachineProviderConfig. +func (in *GCEMachineProviderConfig) DeepCopy() *GCEMachineProviderConfig { + if in == nil { return nil } + out := new(GCEMachineProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GCEMachineProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil } diff --git a/cloud/google/machineactuator.go b/cloud/google/machineactuator.go index 04a0b7e1d4fd..4f8018ed6d1c 100644 --- a/cloud/google/machineactuator.go +++ b/cloud/google/machineactuator.go @@ -1,12 +1,9 @@ /* Copyright 2018 The Kubernetes Authors. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -156,7 +153,7 @@ func (gce *GCEClient) CreateMachineController(cluster *clusterv1.Cluster, initia } // Setup SSH access to master VM - if err := gce.setupSSHAccess(util.GetMaster(initialMachines)); err != nil { + if err := gce.setupSSHAccess(cluster, util.GetMaster(initialMachines)); err != nil { return err } @@ -202,18 +199,23 @@ func (gce *GCEClient) Create(cluster *clusterv1.Cluster, machine *clusterv1.Mach if gce.machineSetupConfigGetter == nil { return errors.New("a valid machineSetupConfigGetter is required") } - config, err := gce.providerconfig(machine.Spec.ProviderConfig) + machineConfig, err := gce.machineproviderconfig(machine.Spec.ProviderConfig) + if err != nil { + return gce.handleMachineError(machine, apierrors.InvalidMachineConfiguration( + "Cannot unmarshal machine's providerConfig field: %v", err)) + } + clusterConfig, err := gce.clusterproviderconfig(cluster.Spec.ProviderConfig) if err != nil { return gce.handleMachineError(machine, apierrors.InvalidMachineConfiguration( - "Cannot unmarshal providerConfig field: %v", err)) + "Cannot unmarshal cluster's providerConfig field: %v", err)) } - if verr := gce.validateMachine(machine, config); verr != nil { + if verr := gce.validateMachine(machine, machineConfig); verr != nil { return gce.handleMachineError(machine, verr) } configParams := &machinesetup.ConfigParams{ - OS: config.OS, + OS: machineConfig.OS, Roles: machine.Spec.Roles, Versions: machine.Spec.Versions, } @@ -226,19 +228,19 @@ func (gce *GCEClient) Create(cluster *clusterv1.Cluster, machine *clusterv1.Mach return err } imagePath := gce.getImagePath(image) - metadata, err := gce.getMetadata(cluster, machine, config, configParams) + metadata, err := gce.getMetadata(cluster, machine, clusterConfig, configParams) if err != nil { return err } - instance, err := gce.instanceIfExists(machine) + instance, err := gce.instanceIfExists(cluster, machine) if err != nil { return err } name := machine.ObjectMeta.Name - project := config.Project - zone := config.Zone + project := clusterConfig.Project + zone := machineConfig.Zone if instance == nil { labels := map[string]string{} @@ -248,7 +250,7 @@ func (gce *GCEClient) Create(cluster *clusterv1.Cluster, machine *clusterv1.Mach op, err := gce.computeService.InstancesInsert(project, zone, &compute.Instance{ Name: name, - MachineType: fmt.Sprintf("zones/%s/machineTypes/%s", zone, config.MachineType), + MachineType: fmt.Sprintf("zones/%s/machineTypes/%s", zone, machineConfig.MachineType), CanIpForward: true, NetworkInterfaces: []*compute.NetworkInterface{ { @@ -261,7 +263,7 @@ func (gce *GCEClient) Create(cluster *clusterv1.Cluster, machine *clusterv1.Mach }, }, }, - Disks: newDisks(config, zone, imagePath, int64(30)), + Disks: newDisks(machineConfig, zone, imagePath, int64(30)), Metadata: metadata, Tags: &compute.Tags{ Items: []string{ @@ -280,7 +282,7 @@ func (gce *GCEClient) Create(cluster *clusterv1.Cluster, machine *clusterv1.Mach }) if err == nil { - err = gce.waitForOperation(config, op) + err = gce.waitForOperation(clusterConfig, op) } if err != nil { @@ -291,7 +293,7 @@ func (gce *GCEClient) Create(cluster *clusterv1.Cluster, machine *clusterv1.Mach // If we have a machineClient, then annotate the machine so that we // remember exactly what VM we created for it. if gce.machineClient != nil { - return gce.updateAnnotations(machine) + return gce.updateAnnotations(cluster, machine) } } else { glog.Infof("Skipped creating a VM that already exists.\n") @@ -300,8 +302,8 @@ func (gce *GCEClient) Create(cluster *clusterv1.Cluster, machine *clusterv1.Mach return nil } -func (gce *GCEClient) Delete(machine *clusterv1.Machine) error { - instance, err := gce.instanceIfExists(machine) +func (gce *GCEClient) Delete(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { + instance, err := gce.instanceIfExists(cluster, machine) if err != nil { return err } @@ -311,13 +313,19 @@ func (gce *GCEClient) Delete(machine *clusterv1.Machine) error { return nil } - config, err := gce.providerconfig(machine.Spec.ProviderConfig) + machineConfig, err := gce.machineproviderconfig(machine.Spec.ProviderConfig) if err != nil { return gce.handleMachineError(machine, - apierrors.InvalidMachineConfiguration("Cannot unmarshal providerConfig field: %v", err)) + apierrors.InvalidMachineConfiguration("Cannot unmarshal machine's providerConfig field: %v", err)) } - if verr := gce.validateMachine(machine, config); verr != nil { + clusterConfig, err := gce.clusterproviderconfig(cluster.Spec.ProviderConfig) + if err != nil { + return gce.handleMachineError(machine, + apierrors.InvalidMachineConfiguration("Cannot unmarshal cluster's providerConfig field: %v", err)) + } + + if verr := gce.validateMachine(machine, machineConfig); verr != nil { return gce.handleMachineError(machine, verr) } @@ -331,14 +339,14 @@ func (gce *GCEClient) Delete(machine *clusterv1.Machine) error { // If the annotations are missing, fall back on providerConfig if project == "" || zone == "" || name == "" { - project = config.Project - zone = config.Zone + project = clusterConfig.Project + zone = machineConfig.Zone name = machine.ObjectMeta.Name } op, err := gce.computeService.InstancesDelete(project, zone, name) if err == nil { - err = gce.waitForOperation(config, op) + err = gce.waitForOperation(clusterConfig, op) } if err != nil { return gce.handleMachineError(machine, apierrors.DeleteMachine( @@ -365,15 +373,13 @@ func (gce *GCEClient) PostCreate(cluster *clusterv1.Cluster, machines []*cluster return fmt.Errorf("error creating service account for ingress controller: %v", err) } - if len(machines) > 0 { - config, err := gce.providerconfig(machines[0].Spec.ProviderConfig) - if err != nil { - return fmt.Errorf("error creating ingress controller: %v", err) - } - err = CreateIngressController(config.Project, cluster.Name) - if err != nil { - return fmt.Errorf("error creating ingress controller: %v", err) - } + clusterConfig, err := gce.clusterproviderconfig(cluster.Spec.ProviderConfig) + if err != nil { + return fmt.Errorf("Cannot unmarshal cluster's providerConfig field: %v", err) + } + err = CreateIngressController(clusterConfig.Project, cluster.Name) + if err != nil { + return fmt.Errorf("error creating ingress controller: %v", err) } return nil @@ -397,10 +403,10 @@ func (gce *GCEClient) PostDelete(cluster *clusterv1.Cluster, machines []*cluster func (gce *GCEClient) Update(cluster *clusterv1.Cluster, goalMachine *clusterv1.Machine) error { // Before updating, do some basic validation of the object first. - config, err := gce.providerconfig(goalMachine.Spec.ProviderConfig) + config, err := gce.machineproviderconfig(goalMachine.Spec.ProviderConfig) if err != nil { return gce.handleMachineError(goalMachine, - apierrors.InvalidMachineConfiguration("Cannot unmarshal providerConfig field: %v", err)) + apierrors.InvalidMachineConfiguration("Cannot unmarshal machine's providerConfig field: %v", err)) } if verr := gce.validateMachine(goalMachine, config); verr != nil { return gce.handleMachineError(goalMachine, verr) @@ -413,13 +419,13 @@ func (gce *GCEClient) Update(cluster *clusterv1.Cluster, goalMachine *clusterv1. currentMachine := (*clusterv1.Machine)(status) if currentMachine == nil { - instance, err := gce.instanceIfExists(goalMachine) + instance, err := gce.instanceIfExists(cluster, goalMachine) if err != nil { return err } if instance != nil && instance.Labels[BootstrapLabelKey] != "" { glog.Infof("Populating current state for boostrap machine %v", goalMachine.ObjectMeta.Name) - return gce.updateAnnotations(goalMachine) + return gce.updateAnnotations(cluster, goalMachine) } else { return fmt.Errorf("Cannot retrieve current state to update machine %v", goalMachine.ObjectMeta.Name) } @@ -432,13 +438,13 @@ func (gce *GCEClient) Update(cluster *clusterv1.Cluster, goalMachine *clusterv1. if util.IsMaster(currentMachine) { glog.Infof("Doing an in-place upgrade for master.\n") // TODO: should we support custom CAs here? - err = gce.updateMasterInplace(currentMachine, goalMachine) + err = gce.updateMasterInplace(cluster, currentMachine, goalMachine) if err != nil { glog.Errorf("master inplace update failed: %v", err) } } else { glog.Infof("re-creating machine %s for update.", currentMachine.ObjectMeta.Name) - err = gce.Delete(currentMachine) + err = gce.Delete(cluster, currentMachine) if err != nil { glog.Errorf("delete machine %s for update failed: %v", currentMachine.ObjectMeta.Name, err) } else { @@ -455,21 +461,26 @@ func (gce *GCEClient) Update(cluster *clusterv1.Cluster, goalMachine *clusterv1. return err } -func (gce *GCEClient) Exists(machine *clusterv1.Machine) (bool, error) { - i, err := gce.instanceIfExists(machine) +func (gce *GCEClient) Exists(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (bool, error) { + i, err := gce.instanceIfExists(cluster, machine) if err != nil { return false, err } return (i != nil), err } -func (gce *GCEClient) GetIP(machine *clusterv1.Machine) (string, error) { - config, err := gce.providerconfig(machine.Spec.ProviderConfig) +func (gce *GCEClient) GetIP(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) { + machineConfig, err := gce.machineproviderconfig(machine.Spec.ProviderConfig) + if err != nil { + return "", err + } + + clusterConfig, err := gce.clusterproviderconfig(cluster.Spec.ProviderConfig) if err != nil { return "", err } - instance, err := gce.computeService.InstancesGet(config.Project, config.Zone, machine.ObjectMeta.Name) + instance, err := gce.computeService.InstancesGet(clusterConfig.Project, machineConfig.Zone, machine.ObjectMeta.Name) if err != nil { return "", err } @@ -486,28 +497,38 @@ func (gce *GCEClient) GetIP(machine *clusterv1.Machine) (string, error) { return publicIP, nil } -func (gce *GCEClient) GetKubeConfig(master *clusterv1.Machine) (string, error) { - config, err := gce.providerconfig(master.Spec.ProviderConfig) +func (gce *GCEClient) GetKubeConfig(cluster *clusterv1.Cluster, master *clusterv1.Machine) (string, error) { + machineConfig, err := gce.machineproviderconfig(master.Spec.ProviderConfig) + if err != nil { + return "", err + } + + clusterConfig, err := gce.clusterproviderconfig(cluster.Spec.ProviderConfig) if err != nil { return "", err } command := "sudo cat /etc/kubernetes/admin.conf" result := strings.TrimSpace(util.ExecCommand( - "gcloud", "compute", "ssh", "--project", config.Project, - "--zone", config.Zone, master.ObjectMeta.Name, "--command", command, "--", "-q")) + "gcloud", "compute", "ssh", "--project", clusterConfig.Project, + "--zone", machineConfig.Zone, master.ObjectMeta.Name, "--command", command, "--", "-q")) return result, nil } -func (gce *GCEClient) updateAnnotations(machine *clusterv1.Machine) error { - config, err := gce.providerconfig(machine.Spec.ProviderConfig) +func (gce *GCEClient) updateAnnotations(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { + machineConfig, err := gce.machineproviderconfig(machine.Spec.ProviderConfig) name := machine.ObjectMeta.Name - project := config.Project - zone := config.Zone + zone := machineConfig.Zone + if err != nil { + return gce.handleMachineError(machine, + apierrors.InvalidMachineConfiguration("Cannot unmarshal machine's providerConfig field: %v", err)) + } + clusterConfig, err := gce.clusterproviderconfig(cluster.Spec.ProviderConfig) + project := clusterConfig.Project if err != nil { return gce.handleMachineError(machine, - apierrors.InvalidMachineConfiguration("Cannot unmarshal providerConfig field: %v", err)) + apierrors.InvalidMachineConfiguration("Cannot unmarshal cluster's providerConfig field: %v", err)) } if machine.ObjectMeta.Annotations == nil { @@ -535,7 +556,7 @@ func (gce *GCEClient) requiresUpdate(a *clusterv1.Machine, b *clusterv1.Machine) } // Gets the instance represented by the given machine -func (gce *GCEClient) instanceIfExists(machine *clusterv1.Machine) (*compute.Instance, error) { +func (gce *GCEClient) instanceIfExists(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (*compute.Instance, error) { identifyingMachine := machine // Try to use the last saved status locating the machine @@ -550,12 +571,17 @@ func (gce *GCEClient) instanceIfExists(machine *clusterv1.Machine) (*compute.Ins } // Get the VM via specified location and name - config, err := gce.providerconfig(identifyingMachine.Spec.ProviderConfig) + machineConfig, err := gce.machineproviderconfig(identifyingMachine.Spec.ProviderConfig) + if err != nil { + return nil, err + } + + clusterConfig, err := gce.clusterproviderconfig(cluster.Spec.ProviderConfig) if err != nil { return nil, err } - instance, err := gce.computeService.InstancesGet(config.Project, config.Zone, identifyingMachine.ObjectMeta.Name) + instance, err := gce.computeService.InstancesGet(clusterConfig.Project, machineConfig.Zone, identifyingMachine.ObjectMeta.Name) if err != nil { // TODO: Use formal way to check for error code 404 if strings.Contains(err.Error(), "Error 404") { @@ -567,11 +593,25 @@ func (gce *GCEClient) instanceIfExists(machine *clusterv1.Machine) (*compute.Ins return instance, nil } -func (gce *GCEClient) providerconfig(providerConfig clusterv1.ProviderConfig) (*gceconfigv1.GCEProviderConfig, error) { - return gce.gceProviderConfigCodec.DecodeFromProviderConfig(providerConfig) +func (gce *GCEClient) machineproviderconfig(providerConfig clusterv1.ProviderConfig) (*gceconfigv1.GCEMachineProviderConfig, error) { + var config gceconfigv1.GCEMachineProviderConfig + err := gce.gceProviderConfigCodec.DecodeFromProviderConfig(providerConfig, &config) + if err != nil { + return nil, err + } + return &config, nil +} + +func (gce *GCEClient) clusterproviderconfig(providerConfig clusterv1.ProviderConfig) (*gceconfigv1.GCEClusterProviderConfig, error) { + var config gceconfigv1.GCEClusterProviderConfig + err := gce.gceProviderConfigCodec.DecodeFromProviderConfig(providerConfig, &config) + if err != nil { + return nil, err + } + return &config, nil } -func (gce *GCEClient) waitForOperation(c *gceconfigv1.GCEProviderConfig, op *compute.Operation) error { +func (gce *GCEClient) waitForOperation(c *gceconfigv1.GCEClusterProviderConfig, op *compute.Operation) error { glog.Infof("Wait for %v %q...", op.OperationType, op.Name) defer glog.Infof("Finish wait for %v %q...", op.OperationType, op.Name) @@ -595,7 +635,7 @@ func (gce *GCEClient) waitForOperation(c *gceconfigv1.GCEProviderConfig, op *com } // getOp returns an updated operation. -func (gce *GCEClient) getOp(c *gceconfigv1.GCEProviderConfig, op *compute.Operation) (*compute.Operation, error) { +func (gce *GCEClient) getOp(c *gceconfigv1.GCEClusterProviderConfig, op *compute.Operation) (*compute.Operation, error) { return gce.computeService.ZoneOperationsGet(c.Project, path.Base(op.Zone), op.Name) } @@ -612,13 +652,13 @@ func (gce *GCEClient) checkOp(op *compute.Operation, err error) error { return errors.New(errs.String()) } -func (gce *GCEClient) updateMasterInplace(oldMachine *clusterv1.Machine, newMachine *clusterv1.Machine) error { +func (gce *GCEClient) updateMasterInplace(cluster *clusterv1.Cluster, oldMachine *clusterv1.Machine, newMachine *clusterv1.Machine) error { if oldMachine.Spec.Versions.ControlPlane != newMachine.Spec.Versions.ControlPlane { // First pull off the latest kubeadm. cmd := "export KUBEADM_VERSION=$(curl -sSL https://dl.k8s.io/release/stable.txt); " + "curl -sSL https://dl.k8s.io/release/${KUBEADM_VERSION}/bin/linux/amd64/kubeadm | sudo tee /usr/bin/kubeadm > /dev/null; " + "sudo chmod a+rx /usr/bin/kubeadm" - _, err := gce.remoteSshCommand(newMachine, cmd) + _, err := gce.remoteSshCommand(cluster, newMachine, cmd) if err != nil { glog.Infof("remotesshcomand error: %v", err) return err @@ -627,7 +667,7 @@ func (gce *GCEClient) updateMasterInplace(oldMachine *clusterv1.Machine, newMach // TODO: We might want to upgrade kubeadm if the target control plane version is newer. // Upgrade control plan. cmd = fmt.Sprintf("sudo kubeadm upgrade apply %s -y", "v"+newMachine.Spec.Versions.ControlPlane) - _, err = gce.remoteSshCommand(newMachine, cmd) + _, err = gce.remoteSshCommand(cluster, newMachine, cmd) if err != nil { glog.Infof("remotesshcomand error: %v", err) return err @@ -638,16 +678,16 @@ func (gce *GCEClient) updateMasterInplace(oldMachine *clusterv1.Machine, newMach if oldMachine.Spec.Versions.Kubelet != newMachine.Spec.Versions.Kubelet { cmd := fmt.Sprintf("sudo kubectl drain %s --kubeconfig /etc/kubernetes/admin.conf --ignore-daemonsets", newMachine.Name) // The errors are intentionally ignored as master has static pods. - gce.remoteSshCommand(newMachine, cmd) + gce.remoteSshCommand(cluster, newMachine, cmd) // Upgrade kubelet to desired version. cmd = fmt.Sprintf("sudo apt-get install kubelet=%s", newMachine.Spec.Versions.Kubelet+"-00") - _, err := gce.remoteSshCommand(newMachine, cmd) + _, err := gce.remoteSshCommand(cluster, newMachine, cmd) if err != nil { glog.Infof("remotesshcomand error: %v", err) return err } cmd = fmt.Sprintf("sudo kubectl uncordon %s --kubeconfig /etc/kubernetes/admin.conf", newMachine.Name) - _, err = gce.remoteSshCommand(newMachine, cmd) + _, err = gce.remoteSshCommand(cluster, newMachine, cmd) if err != nil { glog.Infof("remotesshcomand error: %v", err) return err @@ -657,7 +697,7 @@ func (gce *GCEClient) updateMasterInplace(oldMachine *clusterv1.Machine, newMach return nil } -func (gce *GCEClient) validateMachine(machine *clusterv1.Machine, config *gceconfigv1.GCEProviderConfig) *apierrors.MachineError { +func (gce *GCEClient) validateMachine(machine *clusterv1.Machine, config *gceconfigv1.GCEMachineProviderConfig) *apierrors.MachineError { if machine.Spec.Versions.Kubelet == "" { return apierrors.InvalidMachineConfiguration("spec.versions.kubelet can't be empty") } @@ -709,7 +749,7 @@ func (gce *GCEClient) getImagePath(img string) (imagePath string) { return defaultImg } -func newDisks(config *gceconfigv1.GCEProviderConfig, zone string, imagePath string, minDiskSizeGb int64) []*compute.AttachedDisk { +func newDisks(config *gceconfigv1.GCEMachineProviderConfig, zone string, imagePath string, minDiskSizeGb int64) []*compute.AttachedDisk { var disks []*compute.AttachedDisk for idx, disk := range config.Disks { diskSizeGb := disk.InitializeParams.DiskSizeGb @@ -758,7 +798,7 @@ func getOrNewComputeService(params MachineActuatorParams) (GCEClientComputeServi return computeService, nil } -func (gce *GCEClient) getMetadata(cluster *clusterv1.Cluster, machine *clusterv1.Machine, config *gceconfigv1.GCEProviderConfig, configParams *machinesetup.ConfigParams) (*compute.Metadata, error) { +func (gce *GCEClient) getMetadata(cluster *clusterv1.Cluster, machine *clusterv1.Machine, clusterConfig *gceconfigv1.GCEClusterProviderConfig, configParams *machinesetup.ConfigParams) (*compute.Metadata, error) { var metadataMap map[string]string if machine.Spec.Versions.Kubelet == "" { return nil, errors.New("invalid master configuration: missing Machine.Spec.Versions.Kubelet") @@ -777,7 +817,7 @@ func (gce *GCEClient) getMetadata(cluster *clusterv1.Cluster, machine *clusterv1 "invalid master configuration: missing Machine.Spec.Versions.ControlPlane")) } var err error - metadataMap, err = masterMetadata(gce.kubeadmToken, cluster, machine, config.Project, &machineSetupMetadata) + metadataMap, err = masterMetadata(gce.kubeadmToken, cluster, machine, clusterConfig.Project, &machineSetupMetadata) if err != nil { return nil, err } @@ -791,7 +831,7 @@ func (gce *GCEClient) getMetadata(cluster *clusterv1.Cluster, machine *clusterv1 return nil, errors.New("invalid cluster state: cannot create a Kubernetes node without an API endpoint") } var err error - metadataMap, err = nodeMetadata(gce.kubeadmToken, cluster, machine, config.Project, &machineSetupMetadata) + metadataMap, err = nodeMetadata(gce.kubeadmToken, cluster, machine, clusterConfig.Project, &machineSetupMetadata) if err != nil { return nil, err } @@ -817,4 +857,4 @@ func CreateExtApiServerRoleBinding() error { return run("kubectl", "create", "rolebinding", "-n", "kube-system", "machine-controller", "--role=extension-apiserver-authentication-reader", "--serviceaccount=default:default") -} +} \ No newline at end of file diff --git a/cloud/google/machineactuator_test.go b/cloud/google/machineactuator_test.go index 99312b0f8590..b510c84e5dab 100644 --- a/cloud/google/machineactuator_test.go +++ b/cloud/google/machineactuator_test.go @@ -93,7 +93,7 @@ func (m *GCEClientMachineSetupConfigMock) GetMetadata(params *machinesetup.Confi } func TestNoDisks(t *testing.T) { - config := newGCEProviderConfigFixture() + config := newGCEMachineProviderConfigFixture() config.Disks = make([]gceconfigv1.Disk, 0) receivedInstance, computeServiceMock := newInsertInstanceCapturingMock() createCluster(t, config, computeServiceMock, nil) @@ -101,7 +101,7 @@ func TestNoDisks(t *testing.T) { } func TestMinimumSizeShouldBeEnforced(t *testing.T) { - config := newGCEProviderConfigFixture() + config := newGCEMachineProviderConfigFixture() config.Disks = []gceconfigv1.Disk{ { InitializeParams: gceconfigv1.DiskInitializeParams{ @@ -117,7 +117,7 @@ func TestMinimumSizeShouldBeEnforced(t *testing.T) { } func TestOneDisk(t *testing.T) { - config := newGCEProviderConfigFixture() + config := newGCEMachineProviderConfigFixture() config.Disks = []gceconfigv1.Disk{ { InitializeParams: gceconfigv1.DiskInitializeParams{ @@ -133,7 +133,7 @@ func TestOneDisk(t *testing.T) { } func TestTwoDisks(t *testing.T) { - config := newGCEProviderConfigFixture() + config := newGCEMachineProviderConfigFixture() config.Disks = []gceconfigv1.Disk{ { InitializeParams: gceconfigv1.DiskInitializeParams{ @@ -182,7 +182,7 @@ func checkDiskValues(t *testing.T, disk *compute.AttachedDisk, boot bool, sizeGb } func TestCreateWithCAShouldPopulateMetadata(t *testing.T) { - config := newGCEProviderConfigFixture() + config := newGCEMachineProviderConfigFixture() receivedInstance, computeServiceMock := newInsertInstanceCapturingMock() ca, err := cert.Load("testdata/ca") if err != nil { @@ -217,8 +217,8 @@ func getMetadataItem(t *testing.T, metadata *compute.Metadata, itemKey string) * return nil } -func createCluster(t *testing.T, config gceconfigv1.GCEProviderConfig, computeServiceMock *GCEClientComputeServiceMock, ca *cert.CertificateAuthority) { - cluster := newDefaultClusterFixture() +func createCluster(t *testing.T, config gceconfigv1.GCEMachineProviderConfig, computeServiceMock *GCEClientComputeServiceMock, ca *cert.CertificateAuthority) { + cluster := newDefaultClusterFixture(t) machine := newMachine(t, config) configWatch := newMachineSetupConfigWatcher() params := google.MachineActuatorParams{ @@ -278,7 +278,7 @@ func (cw *TestMachineSetupConfigWatcher) GetMachineSetupConfig() (machinesetup.M return cw.machineSetupConfigMock, nil } -func newMachine(t *testing.T, gceProviderConfig gceconfigv1.GCEProviderConfig) *v1alpha1.Machine { +func newMachine(t *testing.T, gceProviderConfig gceconfigv1.GCEMachineProviderConfig) *v1alpha1.Machine { gceProviderConfigCodec, err := gceconfigv1.NewCodec() if err != nil { t.Fatalf("unable to create GCE provider config codec: %v", err) @@ -287,6 +287,7 @@ func newMachine(t *testing.T, gceProviderConfig gceconfigv1.GCEProviderConfig) * if err != nil { t.Fatalf("unable to encode provider config: %v", err) } + return &v1alpha1.Machine{ Spec: v1alpha1.MachineSpec{ ProviderConfig: *providerConfig, @@ -305,20 +306,39 @@ func newMachine(t *testing.T, gceProviderConfig gceconfigv1.GCEProviderConfig) * } } -func newGCEProviderConfigFixture() gceconfigv1.GCEProviderConfig { - return gceconfigv1.GCEProviderConfig{ +func newGCEMachineProviderConfigFixture() gceconfigv1.GCEMachineProviderConfig { + return gceconfigv1.GCEMachineProviderConfig{ TypeMeta: v1.TypeMeta{ APIVersion: "gceproviderconfig/v1alpha1", - Kind: "GCEProviderConfig", + Kind: "GCEMachineProviderConfig", }, - Project: "project-name-2000", Zone: "us-west5-f", OS: "os-name", Disks: make([]gceconfigv1.Disk, 0), } } -func newDefaultClusterFixture() *v1alpha1.Cluster { +func newGCEClusterProviderConfigFixture() gceconfigv1.GCEClusterProviderConfig { + return gceconfigv1.GCEClusterProviderConfig{ + TypeMeta: v1.TypeMeta{ + APIVersion: "gceproviderconfig/v1alpha1", + Kind: "GCEClusterProviderConfig", + }, + Project: "project-name-2000", + } +} + +func newDefaultClusterFixture(t *testing.T) *v1alpha1.Cluster { + gceProviderConfigCodec, err := gceconfigv1.NewCodec() + if err != nil { + t.Fatalf("unable to create GCE provider config codec: %v", err) + } + gceProviderConfig := newGCEClusterProviderConfigFixture() + providerConfig, err := gceProviderConfigCodec.EncodeToProviderConfig(&gceProviderConfig) + if err != nil { + t.Fatalf("unable to encode provider config: %v", err) + } + return &v1alpha1.Cluster{ TypeMeta: v1.TypeMeta{ Kind: "Cluster", @@ -339,6 +359,7 @@ func newDefaultClusterFixture() *v1alpha1.Cluster { }, }, }, + ProviderConfig: *providerConfig, }, } -} +} \ No newline at end of file diff --git a/cloud/google/pods.go b/cloud/google/pods.go index 2444ab6f7b69..4ba32c6c8a29 100644 --- a/cloud/google/pods.go +++ b/cloud/google/pods.go @@ -34,7 +34,7 @@ import ( var apiServerImage = "gcr.io/k8s-cluster-api/cluster-apiserver:0.0.3" var controllerManagerImage = "gcr.io/k8s-cluster-api/controller-manager:0.0.3" -var machineControllerImage = "gcr.io/k8s-cluster-api/gce-machine-controller:0.0.9" +var machineControllerImage = "gcr.io/k8s-cluster-api/gce-machine-controller:0.0.10" func init() { if img, ok := os.LookupEnv("MACHINE_CONTROLLER_IMAGE"); ok { diff --git a/cloud/google/serviceaccount.go b/cloud/google/serviceaccount.go index d155088a5dd6..057edaf808cc 100644 --- a/cloud/google/serviceaccount.go +++ b/cloud/google/serviceaccount.go @@ -132,31 +132,24 @@ func (gce *GCEClient) createServiceAccount(serviceAccountPrefix string, roles [] return "", "", fmt.Errorf("machine count is zero, cannot create service a/c") } - // TODO: use real go bindings - // Figure out what projects the service account needs permission to. - projects, err := gce.getProjects(initialMachines) + config, err := gce.clusterproviderconfig(cluster.Spec.ProviderConfig) if err != nil { return "", "", err } - // The service account needs to be created in a single project, so just - // use the first one, but grant permission to all projects in the list. - project := projects[0] accountId := serviceAccountPrefix + "-" + util.RandomString(5) - err = run("gcloud", "--project", project, "iam", "service-accounts", "create", "--display-name="+serviceAccountPrefix+" service account", accountId) + err = run("gcloud", "--project", config.Project, "iam", "service-accounts", "create", "--display-name="+serviceAccountPrefix+" service account", accountId) if err != nil { return "", "", fmt.Errorf("couldn't create service account: %v", err) } - email := fmt.Sprintf("%s@%s.iam.gserviceaccount.com", accountId, project) + email := fmt.Sprintf("%s@%s.iam.gserviceaccount.com", accountId, config.Project) - for _, project := range projects { - for _, role := range roles { - err = run("gcloud", "projects", "add-iam-policy-binding", project, "--member=serviceAccount:"+email, "--role=roles/"+role) - if err != nil { - return "", "", fmt.Errorf("couldn't grant permissions to service account: %v", err) - } + for _, role := range roles { + err = run("gcloud", "projects", "add-iam-policy-binding", config.Project, "--member=serviceAccount:"+email, "--role=roles/"+role) + if err != nil { + return "", "", fmt.Errorf("couldn't grant permissions to service account: %v", err) } } @@ -165,7 +158,7 @@ func (gce *GCEClient) createServiceAccount(serviceAccountPrefix string, roles [] } cluster.ObjectMeta.Annotations[ClusterAnnotationPrefix+serviceAccountPrefix] = email - return accountId, project, nil + return accountId, config.Project, nil } func (gce *GCEClient) DeleteMasterNodeServiceAccount(cluster *clusterv1.Cluster, machines []*clusterv1.Machine) error { @@ -190,11 +183,12 @@ func (gce *GCEClient) deleteServiceAccount(serviceAccountPrefix string, roles [] return nil } - projects, err := gce.getProjects(machines) + config, err := gce.clusterproviderconfig(cluster.Spec.ProviderConfig) if err != nil { - return err + glog.Info("cannot parse cluster providerConfig field") + return nil } - project := projects[0] + var email string if cluster.ObjectMeta.Annotations != nil { email = cluster.ObjectMeta.Annotations[ClusterAnnotationPrefix+serviceAccountPrefix] @@ -206,34 +200,20 @@ func (gce *GCEClient) deleteServiceAccount(serviceAccountPrefix string, roles [] } for _, role := range roles { - err = run("gcloud", "projects", "remove-iam-policy-binding", project, "--member=serviceAccount:"+email, "--role=roles/"+role) + err = run("gcloud", "projects", "remove-iam-policy-binding", config.Project, "--member=serviceAccount:"+email, "--role=roles/"+role) } if err != nil { return fmt.Errorf("couldn't remove permissions to service account: %v", err) } - err = run("gcloud", "--project", project, "iam", "service-accounts", "delete", email) + err = run("gcloud", "--project", config.Project, "iam", "service-accounts", "delete", email) if err != nil { return fmt.Errorf("couldn't delete service account: %v", err) } return nil } -func (gce *GCEClient) getProjects(machines []*clusterv1.Machine) ([]string, error) { - // Figure out what projects the service account needs permission to. - var projects []string - for _, machine := range machines { - config, err := gce.providerconfig(machine.Spec.ProviderConfig) - if err != nil { - return nil, err - } - - projects = append(projects, config.Project) - } - return projects, nil -} - func run(cmd string, args ...string) error { c := exec.Command(cmd, args...) if out, err := c.CombinedOutput(); err != nil { diff --git a/cloud/google/ssh.go b/cloud/google/ssh.go index 9be02b7a48d9..24b3cbd06662 100644 --- a/cloud/google/ssh.go +++ b/cloud/google/ssh.go @@ -64,21 +64,26 @@ func cleanupSshKeyPairs() { } // It creates secret to store private key. -func (gce *GCEClient) setupSSHAccess(m *clusterv1.Machine) error { +func (gce *GCEClient) setupSSHAccess(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { // Create public/private key pairs err := createSshKeyPairs() if err != nil { return err } - config, err := gce.providerconfig(m.Spec.ProviderConfig) + machineConfig, err := gce.machineproviderconfig(machine.Spec.ProviderConfig) if err != nil { return err } - err = run("gcloud", "compute", "instances", "add-metadata", m.Name, + clusterConfig, err := gce.clusterproviderconfig(cluster.Spec.ProviderConfig) + if err != nil { + return err + } + + err = run("gcloud", "compute", "instances", "add-metadata", machine.Name, "--metadata-from-file", "ssh-keys="+SshKeyFile+".pub.gcloud", - "--project", config.Project, "--zone", config.Zone) + "--project", clusterConfig.Project, "--zone", machineConfig.Zone) if err != nil { return err } @@ -94,10 +99,10 @@ func (gce *GCEClient) setupSSHAccess(m *clusterv1.Machine) error { return err } -func (gce *GCEClient) remoteSshCommand(m *clusterv1.Machine, cmd string) (string, error) { - glog.Infof("Remote SSH execution '%s' on %s", cmd, m.ObjectMeta.Name) +func (gce *GCEClient) remoteSshCommand(cluster *clusterv1.Cluster, machine *clusterv1.Machine, cmd string) (string, error) { + glog.Infof("Remote SSH execution '%s' on %s", cmd, machine.ObjectMeta.Name) - publicIP, err := gce.GetIP(m) + publicIP, err := gce.GetIP(cluster, machine) if err != nil { return "", err } diff --git a/cloud/terraform/machineactuator.go b/cloud/terraform/machineactuator.go index e380b1221e7c..f66471f0d3bb 100644 --- a/cloud/terraform/machineactuator.go +++ b/cloud/terraform/machineactuator.go @@ -1,12 +1,9 @@ /* Copyright 2017 The Kubernetes Authors. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -343,7 +340,7 @@ func runTerraformCmd(stdout bool, workingDir string, arg ...string) (bytes.Buffe return out, nil } -func (tf *TerraformClient) Delete(machine *clusterv1.Machine) error { +func (tf *TerraformClient) Delete(_ *clusterv1.Cluster, machine *clusterv1.Machine) error { // Check if the instance exists, return if it doesn't instance, err := tf.instanceIfExists(machine) if err != nil { @@ -498,7 +495,7 @@ func (tf *TerraformClient) remoteSshCommand(m *clusterv1.Machine, cmd, privateKe return strings.TrimSpace(parts[1]), nil } -func (tf *TerraformClient) Exists(machine *clusterv1.Machine) (bool, error) { +func (tf *TerraformClient) Exists(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (bool, error) { i, err := tf.instanceIfExists(machine) if err != nil { return false, err @@ -721,4 +718,4 @@ func pathExists(path string) (bool, error) { return false, nil } return true, err -} +} \ No newline at end of file diff --git a/gcp-deployer/.gitignore b/gcp-deployer/.gitignore index b6281a4f9d24..69546c0cd929 100644 --- a/gcp-deployer/.gitignore +++ b/gcp-deployer/.gitignore @@ -1,2 +1,3 @@ gcp-deployer machines.yaml +cluster.yaml diff --git a/gcp-deployer/cluster.yaml b/gcp-deployer/cluster.yaml.template similarity index 81% rename from gcp-deployer/cluster.yaml rename to gcp-deployer/cluster.yaml.template index ccf9bdf2fd9e..128884a53f09 100644 --- a/gcp-deployer/cluster.yaml +++ b/gcp-deployer/cluster.yaml.template @@ -12,4 +12,5 @@ spec: providerConfig: value: apiVersion: "gceproviderconfig/v1alpha1" - kind: "GCEProviderConfig" \ No newline at end of file + kind: "GCEClusterProviderConfig" + project: "$GCLOUD_PROJECT" diff --git a/gcp-deployer/deploy/deploy.go b/gcp-deployer/deploy/deploy.go index 8c18d85f8196..7bc83e1ab348 100644 --- a/gcp-deployer/deploy/deploy.go +++ b/gcp-deployer/deploy/deploy.go @@ -82,7 +82,7 @@ func (d *deployer) CreateCluster(c *clusterv1.Cluster, machines []*clusterv1.Mac vmCreated := false if err := d.createCluster(c, machines, &vmCreated); err != nil { if vmCreated { - d.deleteMasterVM(machines) + d.deleteMasterVM(c, machines) } d.machineDeployer.PostDelete(c, machines) return err @@ -120,7 +120,7 @@ func (d *deployer) DeleteCluster() error { return err } - if err := d.deleteMasterVM(machines); err != nil { + if err := d.deleteMasterVM(cluster, machines); err != nil { glog.Errorf("Error deleting master vm", err) } @@ -132,14 +132,14 @@ func (d *deployer) DeleteCluster() error { return nil } -func (d *deployer) deleteMasterVM(machines []*clusterv1.Machine) error { +func (d *deployer) deleteMasterVM(cluster *clusterv1.Cluster, machines []*clusterv1.Machine) error { master := util.GetMaster(machines) if master == nil { return fmt.Errorf("error deleting master vm, no master found") } glog.Infof("Deleting master vm %s", master.Name) - if err := d.machineDeployer.Delete(master); err != nil { + if err := d.machineDeployer.Delete(cluster, master); err != nil { return err } return nil diff --git a/gcp-deployer/deploy/deploy_helper.go b/gcp-deployer/deploy/deploy_helper.go index e5be2a2fad23..cd71f781cd48 100644 --- a/gcp-deployer/deploy/deploy_helper.go +++ b/gcp-deployer/deploy/deploy_helper.go @@ -72,12 +72,12 @@ func (d *deployer) createCluster(c *clusterv1.Cluster, machines []*clusterv1.Mac *vmCreated = true glog.Infof("Created master %s", master.GetName()) - masterIP, err := d.getMasterIP(master) + masterIP, err := d.getMasterIP(c, master) if err != nil { return fmt.Errorf("unable to get master IP: %v", err) } - if err := d.copyKubeConfig(master); err != nil { + if err := d.copyKubeConfig(c, master); err != nil { return fmt.Errorf("unable to write kubeconfig: %v", err) } @@ -203,9 +203,9 @@ func (d *deployer) getCluster() (*clusterv1.Cluster, error) { return &clusters.Items[0], nil } -func (d *deployer) getMasterIP(master *clusterv1.Machine) (string, error) { +func (d *deployer) getMasterIP(cluster *clusterv1.Cluster, master *clusterv1.Machine) (string, error) { for i := 0; i < MasterIPAttempts; i++ { - ip, err := d.machineDeployer.GetIP(master) + ip, err := d.machineDeployer.GetIP(cluster, master) if err != nil || ip == "" { glog.Info("Hanging for master IP...") time.Sleep(time.Duration(SleepSecondsPerAttempt) * time.Second) @@ -216,10 +216,10 @@ func (d *deployer) getMasterIP(master *clusterv1.Machine) (string, error) { return "", fmt.Errorf("unable to find Master IP after defined wait") } -func (d *deployer) copyKubeConfig(master *clusterv1.Machine) error { +func (d *deployer) copyKubeConfig(cluster *clusterv1.Cluster, master *clusterv1.Machine) error { writeErr := util.Retry(func() (bool, error) { glog.Infof("Waiting for Kubernetes to come up...") - config, err := d.machineDeployer.GetKubeConfig(master) + config, err := d.machineDeployer.GetKubeConfig(cluster, master) if err != nil { glog.Errorf("Error while retriving kubeconfig %s", err) return false, err diff --git a/gcp-deployer/deploy/machinedeployer.go b/gcp-deployer/deploy/machinedeployer.go index 2eaee0f10771..c7d714775036 100644 --- a/gcp-deployer/deploy/machinedeployer.go +++ b/gcp-deployer/deploy/machinedeployer.go @@ -9,8 +9,8 @@ import ( // Provider-specific machine logic the deployer needs. type machineDeployer interface { machine.Actuator - GetIP(machine *clusterv1.Machine) (string, error) - GetKubeConfig(master *clusterv1.Machine) (string, error) + GetIP(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) + GetKubeConfig(cluster *clusterv1.Cluster, master *clusterv1.Machine) (string, error) // Provision infrastructure that the cluster needs before it // can be created diff --git a/gcp-deployer/generate-yaml.sh b/gcp-deployer/generate-yaml.sh index 494d0d1f1421..920a785247ed 100755 --- a/gcp-deployer/generate-yaml.sh +++ b/gcp-deployer/generate-yaml.sh @@ -5,8 +5,10 @@ GCLOUD_PROJECT=$(gcloud config get-value project) ZONE=$(gcloud config get-value compute/zone) ZONE="${ZONE:-us-central1-f}" -TEMPLATE_FILE=machines.yaml.template -GENERATED_FILE=machines.yaml +MACHINE_TEMPLATE_FILE=machines.yaml.template +MACHINE_GENERATED_FILE=machines.yaml +CLUSTER_TEMPLATE_FILE=cluster.yaml.template +CLUSTER_GENERATED_FILE=cluster.yaml OVERWRITE=0 SCRIPT=$(basename $0) @@ -36,9 +38,16 @@ while test $# -gt 0; do esac done -if [ $OVERWRITE -ne 1 ] && [ -f $GENERATED_FILE ]; then - echo File $GENERATED_FILE already exists. Delete it manually before running this script. +if [ $OVERWRITE -ne 1 ] && [ -f $MACHINE_GENERATED_FILE ]; then + echo File $MACHINE_GENERATED_FILE already exists. Delete it manually before running this script. exit 1 fi -sed -e "s/\$GCLOUD_PROJECT/$GCLOUD_PROJECT/" $TEMPLATE_FILE | sed -e "s/\$ZONE/$ZONE/" > $GENERATED_FILE +if [ $OVERWRITE -ne 1 ] && [ -f $CLUSTER_GENERATED_FILE ]; then + echo File $CLUSTER_GENERATED_FILE already exists. Delete it manually before running this script. + exit 1 +fi + +sed -e "s/\$ZONE/$ZONE/" $MACHINE_TEMPLATE_FILE > $MACHINE_GENERATED_FILE + +sed -e "s/\$GCLOUD_PROJECT/$GCLOUD_PROJECT/" $CLUSTER_TEMPLATE_FILE > $CLUSTER_GENERATED_FILE diff --git a/gcp-deployer/machines.yaml.template b/gcp-deployer/machines.yaml.template index b43d467681cb..9c579e1a9f43 100644 --- a/gcp-deployer/machines.yaml.template +++ b/gcp-deployer/machines.yaml.template @@ -9,8 +9,7 @@ items: providerConfig: value: apiVersion: "gceproviderconfig/v1alpha1" - kind: "GCEProviderConfig" - project: "$GCLOUD_PROJECT" + kind: "GCEMachineProviderConfig" zone: "$ZONE" machineType: "n1-standard-2" os: "ubuntu-1604-lts" @@ -36,8 +35,7 @@ items: providerConfig: value: apiVersion: "gceproviderconfig/v1alpha1" - kind: "GCEProviderConfig" - project: "$GCLOUD_PROJECT" + kind: "GCEMachineProviderConfig" zone: "$ZONE" machineType: "n1-standard-1" os: "ubuntu-1604-lts" diff --git a/pkg/controller/machine/actuator.go b/pkg/controller/machine/actuator.go index 6d380ead161e..8451ce4c2bbf 100644 --- a/pkg/controller/machine/actuator.go +++ b/pkg/controller/machine/actuator.go @@ -26,9 +26,9 @@ type Actuator interface { // Create the machine. Create(*clusterv1.Cluster, *clusterv1.Machine) error // Delete the machine. - Delete(*clusterv1.Machine) error + Delete(*clusterv1.Cluster, *clusterv1.Machine) error // Update the machine to the provided definition. - Update(c *clusterv1.Cluster, machine *clusterv1.Machine) error + Update(*clusterv1.Cluster, *clusterv1.Machine) error // Checks if the machine currently exists. - Exists(*clusterv1.Machine) (bool, error) + Exists(*clusterv1.Cluster, *clusterv1.Machine) (bool, error) } diff --git a/pkg/controller/machine/controller.go b/pkg/controller/machine/controller.go index 426c93d5704a..2fa1eaf4b61c 100644 --- a/pkg/controller/machine/controller.go +++ b/pkg/controller/machine/controller.go @@ -110,7 +110,12 @@ func (c *MachineControllerImpl) Reconcile(machine *clusterv1.Machine) error { return nil } - exist, err := c.actuator.Exists(machine) + cluster, err := c.getCluster(machine) + if err != nil { + return err + } + + exist, err := c.actuator.Exists(cluster, machine) if err != nil { glog.Errorf("Error checking existance of machine instance for machine object %v; %v", name, err) return err @@ -149,7 +154,12 @@ func (c *MachineControllerImpl) update(new_machine *clusterv1.Machine) error { } func (c *MachineControllerImpl) delete(machine *clusterv1.Machine) error { - return c.actuator.Delete(machine) + cluster, err := c.getCluster(machine) + if err != nil { + return err + } + + return c.actuator.Delete(cluster, machine) } func (c *MachineControllerImpl) getCluster(machine *clusterv1.Machine) (*clusterv1.Cluster, error) { diff --git a/pkg/controller/machine/testactuator.go b/pkg/controller/machine/testactuator.go index 359383af3e1f..523e3fb56173 100644 --- a/pkg/controller/machine/testactuator.go +++ b/pkg/controller/machine/testactuator.go @@ -1,12 +1,9 @@ /* Copyright 2018 The Kubernetes Authors. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -49,7 +46,7 @@ func (a *TestActuator) Create(*v1alpha1.Cluster, *v1alpha1.Machine) error { return nil } -func (a *TestActuator) Delete(*v1alpha1.Machine) error { +func (a *TestActuator) Delete(*v1alpha1.Cluster, *v1alpha1.Machine) error { defer func() { if a.BlockOnDelete { <-a.unblock @@ -75,7 +72,7 @@ func (a *TestActuator) Update(c *v1alpha1.Cluster, machine *v1alpha1.Machine) er return nil } -func (a *TestActuator) Exists(*v1alpha1.Machine) (bool, error) { +func (a *TestActuator) Exists(*v1alpha1.Cluster, *v1alpha1.Machine) (bool, error) { defer func() { if a.BlockOnExists { <-a.unblock @@ -96,4 +93,4 @@ func NewTestActuator() *TestActuator { func (a *TestActuator) Unblock() { close(a.unblock) -} +} \ No newline at end of file diff --git a/sample/machineset.yaml b/sample/machineset.yaml index 8f8db44ec7dc..6165d1cfa3ae 100644 --- a/sample/machineset.yaml +++ b/sample/machineset.yaml @@ -15,8 +15,7 @@ spec: providerConfig: value: apiVersion: "gceproviderconfig/v1alpha1" - kind: "GCEProviderConfig" - project: "$GCLOUD_PROJECT" + kind: "GCEMachineProviderConfig" zone: "us-central1-f" machineType: "n1-standard-1" os: "ubuntu-1604-lts" diff --git a/tf-deployer/deploy/deploy.go b/tf-deployer/deploy/deploy.go index 6885d86b0c39..08ec48f5027a 100644 --- a/tf-deployer/deploy/deploy.go +++ b/tf-deployer/deploy/deploy.go @@ -70,7 +70,7 @@ func (d *deployer) CreateCluster(c *clusterv1.Cluster, machines []*clusterv1.Mac vmCreated := false if err := d.createCluster(c, machines, &vmCreated); err != nil { if vmCreated { - d.deleteMasterVM(machines) + d.deleteMasterVM(c, machines) } d.machineDeployer.PostDelete(c, machines) return err @@ -108,7 +108,7 @@ func (d *deployer) DeleteCluster() error { return err } - if err := d.deleteMasterVM(machines); err != nil { + if err := d.deleteMasterVM(cluster, machines); err != nil { glog.Errorf("Error deleting master vm", err) } @@ -120,14 +120,14 @@ func (d *deployer) DeleteCluster() error { return nil } -func (d *deployer) deleteMasterVM(machines []*clusterv1.Machine) error { +func (d *deployer) deleteMasterVM(cluster *clusterv1.Cluster, machines []*clusterv1.Machine) error { master := util.GetMaster(machines) if master == nil { return fmt.Errorf("error deleting master vm, no master found") } glog.Infof("Deleting master vm %s", master.Name) - if err := d.machineDeployer.Delete(master); err != nil { + if err := d.machineDeployer.Delete(cluster, master); err != nil { return err } return nil