From 10c76a36eb859cccd95212262323c336ddfba4d2 Mon Sep 17 00:00:00 2001 From: Loc Nguyen Date: Thu, 18 Oct 2018 16:45:37 -0700 Subject: [PATCH] Conform folder struct and controller code to Go standard (#83) * Moved folders around to match standard go project structure Created cmd and pkg as top level folder and moved folders around appropriately. Fixed up the makefiles and dockerfiles. * Use Go standard for struct name for functions Some leftover code from before this repo was under sig-vmware didn't follow the go standard. Fixed them. Resolves #81 --- Gopkg.toml | 2 +- Makefile | 8 +- .../cmd/vsphere-machine-controller/.gitignore | 1 - cmd/clusterctl/.gitignore | 1 + .../clusterctl}/examples/vsphere/.gitignore | 0 .../clusterctl}/examples/vsphere/README.md | 0 .../examples/vsphere/addons.yaml.template | 0 .../examples/vsphere/cluster.yaml.template | 0 .../examples/vsphere/generate-yaml.sh | 0 .../examples/vsphere/machines.yaml.template | 0 .../examples/vsphere/machineset.yaml.template | 0 .../vsphere/provider-components.yaml.template | 0 {clusterctl => cmd/clusterctl}/main.go | 2 +- cmd/vsphere-machine-controller/.gitignore | 1 + .../vsphere-machine-controller/Dockerfile | 4 +- .../vsphere-machine-controller/Makefile | 4 +- .../vsphere-machine-controller/main.go | 2 +- {cloud => pkg/cloud}/vsphere/README.md | 0 .../cloud}/vsphere/clusteractuator.go | 56 +++--- .../cloud}/vsphere/constants/constants.go | 0 {cloud => pkg/cloud}/vsphere/deployer.go | 4 +- .../cloud}/vsphere/instancestatus.go | 0 .../cloud}/vsphere/machineactuator.go | 6 +- .../vsphere/namedmachines/namedmachines.go | 0 .../vsphere/provisioner/common/templates.go | 2 +- .../vsphere/provisioner/govmomi/create.go | 82 ++++----- .../vsphere/provisioner/govmomi/delete.go | 16 +- .../vsphere/provisioner/govmomi/exists.go | 6 +- .../provisioner/govmomi/provisioner.go | 2 +- .../vsphere/provisioner/govmomi/session.go | 9 +- .../vsphere/provisioner/govmomi/update.go | 20 +-- .../vsphere/provisioner/govmomi/utils.go | 36 ++-- .../provisioner/terraform/terraform.go | 170 +++++++++--------- {cloud => pkg/cloud}/vsphere/utils/utils.go | 8 +- .../vsphere/vsphereproviderconfig/doc.go | 0 .../vsphere/vsphereproviderconfig/register.go | 0 .../vsphere/vsphereproviderconfig/types.go | 0 .../vsphereproviderconfig/v1alpha1/doc.go | 0 .../v1alpha1/register.go | 2 +- .../vsphereproviderconfig/v1alpha1/types.go | 0 .../v1alpha1/zz_generated.deepcopy.go | 0 .../zz_generated.deepcopy.go | 0 42 files changed, 222 insertions(+), 222 deletions(-) delete mode 100644 cloud/vsphere/cmd/vsphere-machine-controller/.gitignore create mode 100644 cmd/clusterctl/.gitignore rename {clusterctl => cmd/clusterctl}/examples/vsphere/.gitignore (100%) rename {clusterctl => cmd/clusterctl}/examples/vsphere/README.md (100%) rename {clusterctl => cmd/clusterctl}/examples/vsphere/addons.yaml.template (100%) rename {clusterctl => cmd/clusterctl}/examples/vsphere/cluster.yaml.template (100%) rename {clusterctl => cmd/clusterctl}/examples/vsphere/generate-yaml.sh (100%) rename {clusterctl => cmd/clusterctl}/examples/vsphere/machines.yaml.template (100%) rename {clusterctl => cmd/clusterctl}/examples/vsphere/machineset.yaml.template (100%) rename {clusterctl => cmd/clusterctl}/examples/vsphere/provider-components.yaml.template (100%) rename {clusterctl => cmd/clusterctl}/main.go (91%) create mode 100644 cmd/vsphere-machine-controller/.gitignore rename {cloud/vsphere/cmd => cmd}/vsphere-machine-controller/Dockerfile (96%) rename {cloud/vsphere/cmd => cmd}/vsphere-machine-controller/Makefile (98%) rename {cloud/vsphere/cmd => cmd}/vsphere-machine-controller/main.go (98%) rename {cloud => pkg/cloud}/vsphere/README.md (100%) rename {cloud => pkg/cloud}/vsphere/clusteractuator.go (84%) rename {cloud => pkg/cloud}/vsphere/constants/constants.go (100%) rename {cloud => pkg/cloud}/vsphere/deployer.go (87%) rename {cloud => pkg/cloud}/vsphere/instancestatus.go (100%) rename {cloud => pkg/cloud}/vsphere/machineactuator.go (92%) rename {cloud => pkg/cloud}/vsphere/namedmachines/namedmachines.go (100%) rename {cloud => pkg/cloud}/vsphere/provisioner/common/templates.go (99%) rename {cloud => pkg/cloud}/vsphere/provisioner/govmomi/create.go (86%) rename {cloud => pkg/cloud}/vsphere/provisioner/govmomi/delete.go (75%) rename {cloud => pkg/cloud}/vsphere/provisioner/govmomi/exists.go (81%) rename {cloud => pkg/cloud}/vsphere/provisioner/govmomi/provisioner.go (96%) rename {cloud => pkg/cloud}/vsphere/provisioner/govmomi/session.go (81%) rename {cloud => pkg/cloud}/vsphere/provisioner/govmomi/update.go (77%) rename {cloud => pkg/cloud}/vsphere/provisioner/govmomi/utils.go (77%) rename {cloud => pkg/cloud}/vsphere/provisioner/terraform/terraform.go (83%) rename {cloud => pkg/cloud}/vsphere/utils/utils.go (94%) rename {cloud => pkg/cloud}/vsphere/vsphereproviderconfig/doc.go (100%) rename {cloud => pkg/cloud}/vsphere/vsphereproviderconfig/register.go (100%) rename {cloud => pkg/cloud}/vsphere/vsphereproviderconfig/types.go (100%) rename {cloud => pkg/cloud}/vsphere/vsphereproviderconfig/v1alpha1/doc.go (100%) rename {cloud => pkg/cloud}/vsphere/vsphereproviderconfig/v1alpha1/register.go (95%) rename {cloud => pkg/cloud}/vsphere/vsphereproviderconfig/v1alpha1/types.go (100%) rename {cloud => pkg/cloud}/vsphere/vsphereproviderconfig/v1alpha1/zz_generated.deepcopy.go (100%) rename {cloud => pkg/cloud}/vsphere/vsphereproviderconfig/zz_generated.deepcopy.go (100%) diff --git a/Gopkg.toml b/Gopkg.toml index a33b60edacbc..c9232affe3c7 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -30,7 +30,7 @@ required = [ [[constraint]] name = "sigs.k8s.io/cluster-api" - branch = "master" + revision = "7db708b5df567c79e2e46f9c37cdbf001bef1302" [[constraint]] name = "k8s.io/code-generator" diff --git a/Makefile b/Makefile index 24e011635e1d..8cc4db14dc46 100644 --- a/Makefile +++ b/Makefile @@ -41,18 +41,18 @@ generate: gendeepcopy gendeepcopy: go build -o $$GOPATH/bin/deepcopy-gen sigs.k8s.io/cluster-api-provider-vsphere/vendor/k8s.io/code-generator/cmd/deepcopy-gen deepcopy-gen \ - -i ./cloud/vsphere/vsphereproviderconfig,./cloud/vsphere/vsphereproviderconfig/v1alpha1 \ + -i ./pkg/cloud/vsphere/vsphereproviderconfig,./pkg/cloud/vsphere/vsphereproviderconfig/v1alpha1 \ -O zz_generated.deepcopy \ -h boilerplate.go.txt build: depend - CGO_ENABLED=0 go install -a -ldflags '-extldflags "-static"' sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/cmd/vsphere-machine-controller + CGO_ENABLED=0 go install -a -ldflags '-extldflags "-static"' sigs.k8s.io/cluster-api-provider-vsphere/cmd/vsphere-machine-controller images: depend - $(MAKE) -C cloud/vsphere/cmd/vsphere-machine-controller image + $(MAKE) -C cmd/vsphere-machine-controller image push: depend - $(MAKE) -C cloud/vsphere/cmd/vsphere-machine-controller push + $(MAKE) -C cmd/vsphere-machine-controller push check: depend fmt vet diff --git a/cloud/vsphere/cmd/vsphere-machine-controller/.gitignore b/cloud/vsphere/cmd/vsphere-machine-controller/.gitignore deleted file mode 100644 index bd62db75d2b8..000000000000 --- a/cloud/vsphere/cmd/vsphere-machine-controller/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vsphere-machine-controller \ No newline at end of file diff --git a/cmd/clusterctl/.gitignore b/cmd/clusterctl/.gitignore new file mode 100644 index 000000000000..06ebee288e78 --- /dev/null +++ b/cmd/clusterctl/.gitignore @@ -0,0 +1 @@ +clusterctl diff --git a/clusterctl/examples/vsphere/.gitignore b/cmd/clusterctl/examples/vsphere/.gitignore similarity index 100% rename from clusterctl/examples/vsphere/.gitignore rename to cmd/clusterctl/examples/vsphere/.gitignore diff --git a/clusterctl/examples/vsphere/README.md b/cmd/clusterctl/examples/vsphere/README.md similarity index 100% rename from clusterctl/examples/vsphere/README.md rename to cmd/clusterctl/examples/vsphere/README.md diff --git a/clusterctl/examples/vsphere/addons.yaml.template b/cmd/clusterctl/examples/vsphere/addons.yaml.template similarity index 100% rename from clusterctl/examples/vsphere/addons.yaml.template rename to cmd/clusterctl/examples/vsphere/addons.yaml.template diff --git a/clusterctl/examples/vsphere/cluster.yaml.template b/cmd/clusterctl/examples/vsphere/cluster.yaml.template similarity index 100% rename from clusterctl/examples/vsphere/cluster.yaml.template rename to cmd/clusterctl/examples/vsphere/cluster.yaml.template diff --git a/clusterctl/examples/vsphere/generate-yaml.sh b/cmd/clusterctl/examples/vsphere/generate-yaml.sh similarity index 100% rename from clusterctl/examples/vsphere/generate-yaml.sh rename to cmd/clusterctl/examples/vsphere/generate-yaml.sh diff --git a/clusterctl/examples/vsphere/machines.yaml.template b/cmd/clusterctl/examples/vsphere/machines.yaml.template similarity index 100% rename from clusterctl/examples/vsphere/machines.yaml.template rename to cmd/clusterctl/examples/vsphere/machines.yaml.template diff --git a/clusterctl/examples/vsphere/machineset.yaml.template b/cmd/clusterctl/examples/vsphere/machineset.yaml.template similarity index 100% rename from clusterctl/examples/vsphere/machineset.yaml.template rename to cmd/clusterctl/examples/vsphere/machineset.yaml.template diff --git a/clusterctl/examples/vsphere/provider-components.yaml.template b/cmd/clusterctl/examples/vsphere/provider-components.yaml.template similarity index 100% rename from clusterctl/examples/vsphere/provider-components.yaml.template rename to cmd/clusterctl/examples/vsphere/provider-components.yaml.template diff --git a/clusterctl/main.go b/cmd/clusterctl/main.go similarity index 91% rename from clusterctl/main.go rename to cmd/clusterctl/main.go index 3f5d258b73b6..5eb7402a5d10 100644 --- a/clusterctl/main.go +++ b/cmd/clusterctl/main.go @@ -14,7 +14,7 @@ limitations under the License. package main import ( - _ "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere" + _ "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere" "sigs.k8s.io/cluster-api/clusterctl/cmd" ) diff --git a/cmd/vsphere-machine-controller/.gitignore b/cmd/vsphere-machine-controller/.gitignore new file mode 100644 index 000000000000..737929fbd262 --- /dev/null +++ b/cmd/vsphere-machine-controller/.gitignore @@ -0,0 +1 @@ +manager diff --git a/cloud/vsphere/cmd/vsphere-machine-controller/Dockerfile b/cmd/vsphere-machine-controller/Dockerfile similarity index 96% rename from cloud/vsphere/cmd/vsphere-machine-controller/Dockerfile rename to cmd/vsphere-machine-controller/Dockerfile index f1e67fae260f..515b84557419 100644 --- a/cloud/vsphere/cmd/vsphere-machine-controller/Dockerfile +++ b/cmd/vsphere-machine-controller/Dockerfile @@ -18,9 +18,9 @@ WORKDIR /go/src/sigs.k8s.io/cluster-api-provider-vsphere # This expects that the context passed to the docker build command is # the cluster-api directory. # e.g. docker build -t -f -COPY . . +COPY . . -RUN CGO_ENABLED=0 GOOS=linux go install -a -ldflags '-extldflags "-static"' sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/cmd/vsphere-machine-controller +RUN CGO_ENABLED=0 GOOS=linux go install -a -ldflags '-extldflags "-static"' sigs.k8s.io/cluster-api-provider-vsphere/cmd/vsphere-machine-controller # Final container FROM debian:stretch-slim diff --git a/cloud/vsphere/cmd/vsphere-machine-controller/Makefile b/cmd/vsphere-machine-controller/Makefile similarity index 98% rename from cloud/vsphere/cmd/vsphere-machine-controller/Makefile rename to cmd/vsphere-machine-controller/Makefile index fd930e16b779..bac0c36da6da 100644 --- a/cloud/vsphere/cmd/vsphere-machine-controller/Makefile +++ b/cmd/vsphere-machine-controller/Makefile @@ -21,7 +21,7 @@ NAME = vsphere-machine-controller TAG = 0.0.11 image: - docker build -t "$(PREFIX)/$(NAME):$(TAG)" -f ./Dockerfile ../../../.. + docker build -t "$(PREFIX)/$(NAME):$(TAG)" -f ./Dockerfile ../.. push: image docker push "$(PREFIX)/$(NAME):$(TAG)" @@ -32,7 +32,7 @@ fix_gcs_permissions: gsutil -m acl ch -r -u AllUsers:READ gs://artifacts.$(GCR_BUCKET).appspot.com dev_image: - docker build -t "$(DEV_PREFIX)/$(NAME):$(TAG)-dev" -f ./Dockerfile ../../../.. + docker build -t "$(DEV_PREFIX)/$(NAME):$(TAG)-dev" -f ./Dockerfile ../.. dev_push: dev_image docker push "$(DEV_PREFIX)/$(NAME):$(TAG)-dev" diff --git a/cloud/vsphere/cmd/vsphere-machine-controller/main.go b/cmd/vsphere-machine-controller/main.go similarity index 98% rename from cloud/vsphere/cmd/vsphere-machine-controller/main.go rename to cmd/vsphere-machine-controller/main.go index ee7d1c4c5d13..35d2183200ec 100644 --- a/cloud/vsphere/cmd/vsphere-machine-controller/main.go +++ b/cmd/vsphere-machine-controller/main.go @@ -28,7 +28,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" - "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere" "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" clusterapiclientsetscheme "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme" "sigs.k8s.io/cluster-api/pkg/controller/cluster" diff --git a/cloud/vsphere/README.md b/pkg/cloud/vsphere/README.md similarity index 100% rename from cloud/vsphere/README.md rename to pkg/cloud/vsphere/README.md diff --git a/cloud/vsphere/clusteractuator.go b/pkg/cloud/vsphere/clusteractuator.go similarity index 84% rename from cloud/vsphere/clusteractuator.go rename to pkg/cloud/vsphere/clusteractuator.go index aca2c3bb8fc9..6883508c5db1 100644 --- a/cloud/vsphere/clusteractuator.go +++ b/pkg/cloud/vsphere/clusteractuator.go @@ -28,9 +28,9 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/record" - "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/constants" - vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/utils" - vsphereconfig "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/vsphereproviderconfig" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/constants" + vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/utils" + vsphereconfig "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/vsphereproviderconfig" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1" v1alpha1 "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1" @@ -46,7 +46,7 @@ type ClusterActuator struct { } // Reconcile will create or update the cluster -func (vc *ClusterActuator) Reconcile(cluster *clusterv1.Cluster) error { +func (ca *ClusterActuator) Reconcile(cluster *clusterv1.Cluster) error { glog.Infof("Attempting to reconcile cluster %s", cluster.ObjectMeta.Name) // The generic workflow would be as follows: @@ -57,7 +57,7 @@ func (vc *ClusterActuator) Reconcile(cluster *clusterv1.Cluster) error { // In the absence of the lb creation, the logic would be to simply take the first master node // and use that as the API endpoint for now. if len(cluster.Status.APIEndpoints) == 0 { - err := vc.provisionLoadBalancer(cluster) + err := ca.provisionLoadBalancer(cluster) if err != nil { glog.Infof("Error could not provision the Load Balancer for the cluster: %s", err) return err @@ -67,33 +67,33 @@ func (vc *ClusterActuator) Reconcile(cluster *clusterv1.Cluster) error { //return nil } // At this stage we are expecting the lb endpoint to be present in the final lb implementation - err := vc.ensureLoadBalancerMembers(cluster) + err := ca.ensureLoadBalancerMembers(cluster) if err != nil { glog.Infof("Error setting the Load Balancer members for the cluster: %s", err) return err } // Check if the target kubernetes is ready or not, and update the ProviderStatus if change is detected - err = vc.updateK8sAPIStatus(cluster) + err = ca.updateK8sAPIStatus(cluster) if err != nil { return err } return nil } -func (vc *ClusterActuator) updateK8sAPIStatus(cluster *clusterv1.Cluster) error { - currentClusterAPIStatus, err := vc.getClusterAPIStatus(cluster) +func (ca *ClusterActuator) updateK8sAPIStatus(cluster *clusterv1.Cluster) error { + currentClusterAPIStatus, err := ca.getClusterAPIStatus(cluster) if err != nil { return err } - return vc.updateClusterAPIStatus(cluster, currentClusterAPIStatus) + return ca.updateClusterAPIStatus(cluster, currentClusterAPIStatus) } // fetchKubeConfig returns the cached copy of the Kubeconfig in the secrets for the target cluster // In case the secret does not exist, then it fetches from the target master node and caches it for -func (vc *ClusterActuator) fetchKubeConfig(cluster *clusterv1.Cluster, masters []*clusterv1.Machine) (string, error) { +func (ca *ClusterActuator) fetchKubeConfig(cluster *clusterv1.Cluster, masters []*clusterv1.Machine) (string, error) { var kubeconfig string glog.Infof("attempting to fetch kubeconfig") - secret, err := vc.k8sClient.Core().Secrets(cluster.Namespace).Get(fmt.Sprintf(constants.KubeConfigSecretName, cluster.UID), metav1.GetOptions{}) + secret, err := ca.k8sClient.Core().Secrets(cluster.Namespace).Get(fmt.Sprintf(constants.KubeConfigSecretName, cluster.UID), metav1.GetOptions{}) if err != nil { glog.Info("could not pull secrets for kubeconfig") // TODO: Check for the proper err type for *not present* case. rather than all other cases @@ -115,7 +115,7 @@ func (vc *ClusterActuator) fetchKubeConfig(cluster *clusterv1.Cluster, masters [ }, StringData: configmap, } - secret, err = vc.k8sClient.Core().Secrets(cluster.Namespace).Create(secret) + secret, err = ca.k8sClient.Core().Secrets(cluster.Namespace).Create(secret) if err != nil { glog.Warningf("Could not create the secret for the saving kubeconfig: err [%s]", err.Error()) } @@ -126,8 +126,8 @@ func (vc *ClusterActuator) fetchKubeConfig(cluster *clusterv1.Cluster, masters [ return kubeconfig, nil } -func (vc *ClusterActuator) getClusterAPIStatus(cluster *clusterv1.Cluster) (vsphereconfig.APIStatus, error) { - masters, err := vsphereutils.GetMasterForCluster(cluster, vc.lister) +func (ca *ClusterActuator) getClusterAPIStatus(cluster *clusterv1.Cluster) (vsphereconfig.APIStatus, error) { + masters, err := vsphereutils.GetMasterForCluster(cluster, ca.lister) if err != nil { glog.Infof("Error retrieving master nodes for the cluster: %s", err) return vsphereconfig.ApiNotReady, err @@ -136,7 +136,7 @@ func (vc *ClusterActuator) getClusterAPIStatus(cluster *clusterv1.Cluster) (vsph glog.Infof("No masters for the cluster [%s] present", cluster.Name) return vsphereconfig.ApiNotReady, nil } - kubeconfig, err := vc.fetchKubeConfig(cluster, masters) + kubeconfig, err := ca.fetchKubeConfig(cluster, masters) if err != nil { return vsphereconfig.ApiNotReady, err } @@ -162,7 +162,7 @@ func (vc *ClusterActuator) getClusterAPIStatus(cluster *clusterv1.Cluster) (vsph return vsphereconfig.ApiReady, nil } -func (vc *ClusterActuator) updateClusterAPIStatus(cluster *clusterv1.Cluster, newStatus vsphereconfig.APIStatus) error { +func (ca *ClusterActuator) updateClusterAPIStatus(cluster *clusterv1.Cluster, newStatus vsphereconfig.APIStatus) error { oldProviderStatus, err := vsphereutils.GetClusterProviderStatus(cluster) if err != nil { return err @@ -181,7 +181,7 @@ func (vc *ClusterActuator) updateClusterAPIStatus(cluster *clusterv1.Cluster, ne out, err := json.Marshal(newProviderStatus) ncluster := cluster.DeepCopy() ncluster.Status.ProviderStatus = &runtime.RawExtension{Raw: out} - _, err = vc.clusterV1alpha1.Clusters(ncluster.Namespace).UpdateStatus(ncluster) + _, err = ca.clusterV1alpha1.Clusters(ncluster.Namespace).UpdateStatus(ncluster) if err != nil { glog.Infof("Error in updating the cluster api status from [%s] to [%s]: %s", oldProviderStatus.APIStatus, newStatus, err) return err @@ -189,7 +189,7 @@ func (vc *ClusterActuator) updateClusterAPIStatus(cluster *clusterv1.Cluster, ne return nil } -func (vc *ClusterActuator) provisionLoadBalancer(cluster *clusterv1.Cluster) error { +func (ca *ClusterActuator) provisionLoadBalancer(cluster *clusterv1.Cluster) error { // TODO(ssurana): // 1. implement the lb provisioning // 2. update the lb public endpoint to the cluster endpoint @@ -198,9 +198,9 @@ func (vc *ClusterActuator) provisionLoadBalancer(cluster *clusterv1.Cluster) err // ensureLoadBalancerMembers would be responsible for keeping the master API endpoints // synced with the lb members at all times. -func (vc *ClusterActuator) ensureLoadBalancerMembers(cluster *clusterv1.Cluster) error { +func (ca *ClusterActuator) ensureLoadBalancerMembers(cluster *clusterv1.Cluster) error { // This is the temporary implementation until we do the proper LB implementation - err := vc.setMasterNodeIPAsEndpoint(cluster) + err := ca.setMasterNodeIPAsEndpoint(cluster) if err != nil { glog.Infof("Error registering master node's IP as API Endpoint for the cluster: %s", err) return err @@ -210,10 +210,10 @@ func (vc *ClusterActuator) ensureLoadBalancerMembers(cluster *clusterv1.Cluster) // TODO(ssurana): Remove this method once we have the proper lb implementation // Temporary implementation: Simply use the first master IP that you can find -func (vc *ClusterActuator) setMasterNodeIPAsEndpoint(cluster *clusterv1.Cluster) error { +func (ca *ClusterActuator) setMasterNodeIPAsEndpoint(cluster *clusterv1.Cluster) error { ncluster := cluster.DeepCopy() if len(ncluster.Status.APIEndpoints) == 0 { - masters, err := vsphereutils.GetMasterForCluster(ncluster, vc.lister) + masters, err := vsphereutils.GetMasterForCluster(ncluster, ca.lister) if err != nil { glog.Infof("Error retrieving master nodes for the cluster: %s", err) return err @@ -231,21 +231,21 @@ func (vc *ClusterActuator) setMasterNodeIPAsEndpoint(cluster *clusterv1.Cluster) Host: ip, Port: constants.ApiServerPort, }} - _, err = vc.clusterV1alpha1.Clusters(ncluster.Namespace).UpdateStatus(ncluster) + _, err = ca.clusterV1alpha1.Clusters(ncluster.Namespace).UpdateStatus(ncluster) if err != nil { - vc.eventRecorder.Eventf(ncluster, corev1.EventTypeWarning, "Failed Update", "Error in updating API Endpoint: %s", err) + ca.eventRecorder.Eventf(ncluster, corev1.EventTypeWarning, "Failed Update", "Error in updating API Endpoint: %s", err) glog.Infof("Error in updating the status: %s", err) return err } - vc.eventRecorder.Eventf(ncluster, corev1.EventTypeNormal, "Updated", "Updated API Endpoint to %v", ip) + ca.eventRecorder.Eventf(ncluster, corev1.EventTypeNormal, "Updated", "Updated API Endpoint to %v", ip) } } return nil } // Delete will delete any cluster level resources for the cluster. -func (vc *ClusterActuator) Delete(cluster *clusterv1.Cluster) error { - vc.eventRecorder.Eventf(cluster, corev1.EventTypeNormal, "Deleted", "Deleting cluster %s", cluster.Name) +func (ca *ClusterActuator) Delete(cluster *clusterv1.Cluster) error { + ca.eventRecorder.Eventf(cluster, corev1.EventTypeNormal, "Deleted", "Deleting cluster %s", cluster.Name) glog.Infof("Attempting to cleaning up resources of cluster %s", cluster.ObjectMeta.Name) return nil } diff --git a/cloud/vsphere/constants/constants.go b/pkg/cloud/vsphere/constants/constants.go similarity index 100% rename from cloud/vsphere/constants/constants.go rename to pkg/cloud/vsphere/constants/constants.go diff --git a/cloud/vsphere/deployer.go b/pkg/cloud/vsphere/deployer.go similarity index 87% rename from cloud/vsphere/deployer.go rename to pkg/cloud/vsphere/deployer.go index 9b4bc38dc1fd..e1f9d6696f93 100644 --- a/cloud/vsphere/deployer.go +++ b/pkg/cloud/vsphere/deployer.go @@ -17,7 +17,7 @@ limitations under the License. package vsphere import ( - vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/utils" + vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/utils" clustercommon "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) @@ -37,7 +37,7 @@ func NewDeploymentClient() *DeploymentClient { return &DeploymentClient{} } -func (*DeploymentClient) GetIP(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) { +func (d *DeploymentClient) GetIP(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) { return vsphereutils.GetIP(cluster, machine) } diff --git a/cloud/vsphere/instancestatus.go b/pkg/cloud/vsphere/instancestatus.go similarity index 100% rename from cloud/vsphere/instancestatus.go rename to pkg/cloud/vsphere/instancestatus.go diff --git a/cloud/vsphere/machineactuator.go b/pkg/cloud/vsphere/machineactuator.go similarity index 92% rename from cloud/vsphere/machineactuator.go rename to pkg/cloud/vsphere/machineactuator.go index 8b619ba4a11d..4916ab3aee6c 100644 --- a/cloud/vsphere/machineactuator.go +++ b/pkg/cloud/vsphere/machineactuator.go @@ -23,9 +23,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/record" - "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/provisioner/govmomi" - "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/provisioner/terraform" - vsphereconfigv1 "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/vsphereproviderconfig/v1alpha1" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/provisioner/govmomi" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/provisioner/terraform" + vsphereconfigv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/vsphereproviderconfig/v1alpha1" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1" v1alpha1 "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1" diff --git a/cloud/vsphere/namedmachines/namedmachines.go b/pkg/cloud/vsphere/namedmachines/namedmachines.go similarity index 100% rename from cloud/vsphere/namedmachines/namedmachines.go rename to pkg/cloud/vsphere/namedmachines/namedmachines.go diff --git a/cloud/vsphere/provisioner/common/templates.go b/pkg/cloud/vsphere/provisioner/common/templates.go similarity index 99% rename from cloud/vsphere/provisioner/common/templates.go rename to pkg/cloud/vsphere/provisioner/common/templates.go index cdd08e227fce..9f760144c3ff 100644 --- a/cloud/vsphere/provisioner/common/templates.go +++ b/pkg/cloud/vsphere/provisioner/common/templates.go @@ -21,7 +21,7 @@ import ( "fmt" "text/template" - vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/utils" + vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/utils" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) diff --git a/cloud/vsphere/provisioner/govmomi/create.go b/pkg/cloud/vsphere/provisioner/govmomi/create.go similarity index 86% rename from cloud/vsphere/provisioner/govmomi/create.go rename to pkg/cloud/vsphere/provisioner/govmomi/create.go index 72117c247077..ef6392d7f942 100644 --- a/cloud/vsphere/provisioner/govmomi/create.go +++ b/pkg/cloud/vsphere/provisioner/govmomi/create.go @@ -15,19 +15,19 @@ import ( "github.com/vmware/govmomi/vim25/types" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/constants" - vpshereprovisionercommon "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/provisioner/common" - vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/utils" - vsphereconfig "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/vsphereproviderconfig" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/constants" + vpshereprovisionercommon "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/provisioner/common" + vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/utils" + vsphereconfig "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/vsphereproviderconfig" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" clustererror "sigs.k8s.io/cluster-api/pkg/controller/error" apierrors "sigs.k8s.io/cluster-api/pkg/errors" "sigs.k8s.io/cluster-api/pkg/util" ) -func (vc *Provisioner) Create(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { +func (pv *Provisioner) Create(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { glog.Infof("govmomi.Actuator.Create %s", machine.Spec.Name) - s, err := vc.sessionFromProviderConfig(cluster, machine) + s, err := pv.sessionFromProviderConfig(cluster, machine) if err != nil { return err } @@ -41,13 +41,13 @@ func (vc *Provisioner) Create(cluster *clusterv1.Cluster, machine *clusterv1.Mac task := vsphereutils.GetActiveTasks(machine) if task != "" { // In case an active task is going on, wait for its completion - return vc.verifyAndUpdateTask(s, machine, task) + return pv.verifyAndUpdateTask(s, machine, task) } - vc.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "Creating", "Creating Machine %v", machine.Name) - return vc.cloneVirtualMachine(s, cluster, machine) + pv.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "Creating", "Creating Machine %v", machine.Name) + return pv.cloneVirtualMachine(s, cluster, machine) } -func (vc *Provisioner) verifyAndUpdateTask(s *SessionContext, machine *clusterv1.Machine, taskmoref string) error { +func (pv *Provisioner) verifyAndUpdateTask(s *SessionContext, machine *clusterv1.Machine, taskmoref string) error { ctx, cancel := context.WithCancel(*s.context) defer cancel() // If a task does exist on the @@ -60,7 +60,7 @@ func (vc *Provisioner) verifyAndUpdateTask(s *SessionContext, machine *clusterv1 if err != nil { //TODO: inspect the error and act appropriately. // Naive assumption is that the task does not exist any more, thus clear that from the machine - return vc.setTaskRef(machine, "") + return pv.setTaskRef(machine, "") } switch taskmo.Info.State { // Queued or Running @@ -71,22 +71,22 @@ func (vc *Provisioner) verifyAndUpdateTask(s *SessionContext, machine *clusterv1 case types.TaskInfoStateSuccess: if taskmo.Info.DescriptionId == "VirtualMachine.clone" { vmref := taskmo.Info.Result.(types.ManagedObjectReference) - vc.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "Created", "Created Machine %s(%s)", machine.Name, vmref.Value) + pv.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "Created", "Created Machine %s(%s)", machine.Name, vmref.Value) // Update the Machine object with the VM Reference annotation - err := vc.updateVMReference(machine, vmref.Value) + err := pv.updateVMReference(machine, vmref.Value) if err != nil { return err } - return vc.setTaskRef(machine, "") + return pv.setTaskRef(machine, "") } else if taskmo.Info.DescriptionId == "VirtualMachine.reconfigure" { - vc.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "Reconfigured", "Reconfigured Machine %s", taskmo.Info.EntityName) + pv.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "Reconfigured", "Reconfigured Machine %s", taskmo.Info.EntityName) } - return vc.setTaskRef(machine, "") + return pv.setTaskRef(machine, "") case types.TaskInfoStateError: if taskmo.Info.DescriptionId == "VirtualMachine.clone" { - vc.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "Failed", "Creation failed for Machine %v", machine.Name) + pv.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "Failed", "Creation failed for Machine %v", machine.Name) // Clear the reference to the failed task so that the next reconcile loop can re-create it - return vc.setTaskRef(machine, "") + return pv.setTaskRef(machine, "") } default: glog.Warningf("unknown state %s for task %s detected", taskmoref, taskmo.Info.State) @@ -96,9 +96,9 @@ func (vc *Provisioner) verifyAndUpdateTask(s *SessionContext, machine *clusterv1 } // CloneVirtualMachine clones the template to a virtual machine. -func (vc *Provisioner) cloneVirtualMachine(s *SessionContext, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { - // Fetch the user-data for the cloud-init first, so that we can fail fast before even trying to connect to VC - userData, err := vc.getCloudInitUserData(cluster, machine) +func (pv *Provisioner) cloneVirtualMachine(s *SessionContext, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { + // Fetch the user-data for the cloud-init first, so that we can fail fast before even trying to connect to pv + userData, err := pv.getCloudInitUserData(cluster, machine) if err != nil { // err returned by the getCloudInitUserData would be of type RequeueAfterError in case kubeadm is not ready yet return err @@ -193,7 +193,7 @@ func (vc *Provisioner) cloneVirtualMachine(s *SessionContext, cluster *clusterv1 prop.Info.Value = userData } if p.Id == "public-keys" { - prop.Info.Value, err = vc.GetSSHPublicKey(cluster) + prop.Info.Value, err = pv.GetSSHPublicKey(cluster) if err != nil { return err } @@ -240,7 +240,7 @@ func (vc *Provisioner) cloneVirtualMachine(s *SessionContext, cluster *clusterv1 if err != nil { return err } - return vc.setTaskRef(machine, task.Reference().Value) + return pv.setTaskRef(machine, task.Reference().Value) } @@ -258,17 +258,17 @@ func Properties(vm *object.VirtualMachine) (*mo.VirtualMachine, error) { } // Removes the current task reference from the Machine object -func (vc *Provisioner) removeTaskRef(machine *clusterv1.Machine) error { +func (pv *Provisioner) removeTaskRef(machine *clusterv1.Machine) error { nmachine := machine.DeepCopy() if nmachine.ObjectMeta.Annotations == nil { return nil } delete(nmachine.ObjectMeta.Annotations, constants.VirtualMachineTaskRef) - _, err := vc.clusterV1alpha1.Machines(nmachine.Namespace).Update(nmachine) + _, err := pv.clusterV1alpha1.Machines(nmachine.Namespace).Update(nmachine) return err } -func (vc *Provisioner) updateVMReference(machine *clusterv1.Machine, vmref string) error { +func (pv *Provisioner) updateVMReference(machine *clusterv1.Machine, vmref string) error { oldProviderStatus, err := vsphereutils.GetMachineProviderStatus(machine) if err != nil { return err @@ -288,7 +288,7 @@ func (vc *Provisioner) updateVMReference(machine *clusterv1.Machine, vmref strin out, err := json.Marshal(newProviderStatus) newMachine := machine.DeepCopy() newMachine.Status.ProviderStatus = &runtime.RawExtension{Raw: out} - _, err = vc.clusterV1alpha1.Machines(newMachine.Namespace).UpdateStatus(newMachine) + _, err = pv.clusterV1alpha1.Machines(newMachine.Namespace).UpdateStatus(newMachine) if err != nil { glog.Infof("Error in updating the machine ref: %s", err) return err @@ -296,7 +296,7 @@ func (vc *Provisioner) updateVMReference(machine *clusterv1.Machine, vmref strin return nil } -func (vc *Provisioner) setTaskRef(machine *clusterv1.Machine, taskref string) error { +func (pv *Provisioner) setTaskRef(machine *clusterv1.Machine, taskref string) error { oldProviderStatus, err := vsphereutils.GetMachineProviderStatus(machine) if err != nil { return err @@ -316,7 +316,7 @@ func (vc *Provisioner) setTaskRef(machine *clusterv1.Machine, taskref string) er out, err := json.Marshal(newProviderStatus) newMachine := machine.DeepCopy() newMachine.Status.ProviderStatus = &runtime.RawExtension{Raw: out} - _, err = vc.clusterV1alpha1.Machines(newMachine.Namespace).UpdateStatus(newMachine) + _, err = pv.clusterV1alpha1.Machines(newMachine.Namespace).UpdateStatus(newMachine) if err != nil { glog.Infof("Error in updating the machine ref: %s", err) return err @@ -327,7 +327,7 @@ func (vc *Provisioner) setTaskRef(machine *clusterv1.Machine, taskref string) er // We are storing these as annotations and not in Machine Status because that's intended for // "Provider-specific status" that will usually be used to detect updates. Additionally, // Status requires yet another version API resource which is too heavy to store IP and TF state. -func (vc *Provisioner) updateAnnotations(cluster *clusterv1.Cluster, machine *clusterv1.Machine, vmIP string, vm *object.VirtualMachine) error { +func (pv *Provisioner) updateAnnotations(cluster *clusterv1.Cluster, machine *clusterv1.Machine, vmIP string, vm *object.VirtualMachine) error { glog.Infof("Updating annotations for machine %s", machine.ObjectMeta.Name) nmachine := machine.DeepCopy() if nmachine.ObjectMeta.Annotations == nil { @@ -339,7 +339,7 @@ func (vc *Provisioner) updateAnnotations(cluster *clusterv1.Cluster, machine *cl nmachine.ObjectMeta.Annotations[constants.KubeletVersionAnnotationKey] = nmachine.Spec.Versions.Kubelet nmachine.ObjectMeta.Annotations[constants.VirtualMachineRef] = vm.Reference().Value - _, err := vc.clusterV1alpha1.Machines(nmachine.Namespace).Update(nmachine) + _, err := pv.clusterV1alpha1.Machines(nmachine.Namespace).Update(nmachine) if err != nil { return err } @@ -351,7 +351,7 @@ func (vc *Provisioner) updateAnnotations(cluster *clusterv1.Cluster, machine *cl return err } ncluster.Status.ProviderStatus = &runtime.RawExtension{Raw: out} - _, err = vc.clusterV1alpha1.Clusters(ncluster.Namespace).UpdateStatus(ncluster) + _, err = pv.clusterV1alpha1.Clusters(ncluster.Namespace).UpdateStatus(ncluster) if err != nil { glog.Infof("Error in updating the status: %s", err) return err @@ -359,12 +359,12 @@ func (vc *Provisioner) updateAnnotations(cluster *clusterv1.Cluster, machine *cl return nil } -func (vc *Provisioner) getCloudInitUserData(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) { - script, err := vc.getStartupScript(cluster, machine) +func (pv *Provisioner) getCloudInitUserData(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) { + script, err := pv.getStartupScript(cluster, machine) if err != nil { return "", err } - config, err := vc.getCloudProviderConfig(cluster, machine) + config, err := pv.getCloudProviderConfig(cluster, machine) if err != nil { return "", err } @@ -382,7 +382,7 @@ func (vc *Provisioner) getCloudInitUserData(cluster *clusterv1.Cluster, machine return userdata, nil } -func (vc *Provisioner) getCloudProviderConfig(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) { +func (pv *Provisioner) getCloudProviderConfig(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) { clusterConfig, err := vsphereutils.GetClusterProviderConfig(cluster.Spec.ProviderConfig) if err != nil { return "", err @@ -413,24 +413,24 @@ func (vc *Provisioner) getCloudProviderConfig(cluster *clusterv1.Cluster, machin // Builds and returns the startup script for the passed machine and cluster. // Returns the full path of the saved startup script and possible error. -func (vc *Provisioner) getStartupScript(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) { +func (pv *Provisioner) getStartupScript(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) { config, err := vsphereutils.GetMachineProviderConfig(machine.Spec.ProviderConfig) if err != nil { - return "", vc.HandleMachineError(machine, apierrors.InvalidMachineConfiguration( + return "", pv.HandleMachineError(machine, apierrors.InvalidMachineConfiguration( "Cannot unmarshal providerConfig field: %v", err), constants.CreateEventAction) } preloaded := false if val, ok := config.MachineVariables["preloaded"]; ok { preloaded, err = strconv.ParseBool(val) if err != nil { - return "", vc.HandleMachineError(machine, apierrors.InvalidMachineConfiguration( + return "", pv.HandleMachineError(machine, apierrors.InvalidMachineConfiguration( "Invalid value for preloaded: %v", err), constants.CreateEventAction) } } var startupScript string if util.IsMaster(machine) { if machine.Spec.Versions.ControlPlane == "" { - return "", vc.HandleMachineError(machine, apierrors.InvalidMachineConfiguration( + return "", pv.HandleMachineError(machine, apierrors.InvalidMachineConfiguration( "invalid master configuration: missing Machine.Spec.Versions.ControlPlane"), constants.CreateEventAction) } var err error @@ -449,7 +449,7 @@ func (vc *Provisioner) getStartupScript(cluster *clusterv1.Cluster, machine *clu glog.Infof("invalid cluster state: cannot create a Kubernetes node without an API endpoint") return "", &clustererror.RequeueAfterError{RequeueAfter: constants.RequeueAfterSeconds} } - kubeadmToken, err := vc.GetKubeadmToken(cluster) + kubeadmToken, err := pv.GetKubeadmToken(cluster) if err != nil { glog.Infof("Error generating kubeadm token, will requeue: %s", err.Error()) return "", &clustererror.RequeueAfterError{RequeueAfter: constants.RequeueAfterSeconds} diff --git a/cloud/vsphere/provisioner/govmomi/delete.go b/pkg/cloud/vsphere/provisioner/govmomi/delete.go similarity index 75% rename from cloud/vsphere/provisioner/govmomi/delete.go rename to pkg/cloud/vsphere/provisioner/govmomi/delete.go index f7e0b8694aff..ee4faaca7cd9 100644 --- a/cloud/vsphere/provisioner/govmomi/delete.go +++ b/pkg/cloud/vsphere/provisioner/govmomi/delete.go @@ -11,20 +11,20 @@ import ( "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" corev1 "k8s.io/api/core/v1" - vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/utils" + vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/utils" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) // Delete the machine -func (vc *Provisioner) Delete(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { - s, err := vc.sessionFromProviderConfig(cluster, machine) +func (pv *Provisioner) Delete(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { + s, err := pv.sessionFromProviderConfig(cluster, machine) if err != nil { return err } ctx, cancel := context.WithCancel(*s.context) defer cancel() - if exists, _ := vc.Exists(cluster, machine); exists { + if exists, _ := pv.Exists(cluster, machine); exists { moref, err := vsphereutils.GetVMId(machine) if err != nil { return err @@ -38,7 +38,7 @@ func (vc *Provisioner) Delete(cluster *clusterv1.Cluster, machine *clusterv1.Mac if err != nil { return err } - vc.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "Killing", "Killing machine %v", machine.Name) + pv.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "Killing", "Killing machine %v", machine.Name) vmo := object.NewVirtualMachine(s.session.Client, vmref) if vm.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOn { task, err := vmo.PowerOff(ctx) @@ -56,11 +56,11 @@ func (vc *Provisioner) Delete(cluster *clusterv1.Cluster, machine *clusterv1.Mac taskinfo, err := task.WaitForResult(ctx, nil) if taskinfo.State == types.TaskInfoStateSuccess { glog.Infof("Virtual Machine %v deleted successfully", vm.Name) - vc.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "Killed", "Machine %v deletion complete", machine.Name) + pv.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "Killed", "Machine %v deletion complete", machine.Name) return nil } - vc.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "Killed", "Machine %v deletion complete", machine.Name) - glog.Errorf("VM Deletion failed on VC with following reason %v", taskinfo.Reason) + pv.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "Killed", "Machine %v deletion complete", machine.Name) + glog.Errorf("VM Deletion failed on pv with following reason %v", taskinfo.Reason) return errors.New("VM Deletion failed") } return nil diff --git a/cloud/vsphere/provisioner/govmomi/exists.go b/pkg/cloud/vsphere/provisioner/govmomi/exists.go similarity index 81% rename from cloud/vsphere/provisioner/govmomi/exists.go rename to pkg/cloud/vsphere/provisioner/govmomi/exists.go index 3ee063999f6b..bc125cda6c54 100644 --- a/cloud/vsphere/provisioner/govmomi/exists.go +++ b/pkg/cloud/vsphere/provisioner/govmomi/exists.go @@ -6,17 +6,17 @@ import ( "github.com/golang/glog" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" - vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/utils" + vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/utils" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) -func (vc *Provisioner) Exists(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (bool, error) { +func (pv *Provisioner) Exists(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (bool, error) { glog.Infof("govmomi.Actuator.Exists %s", machine.Spec.Name) if machine.Status.NodeRef != nil { glog.Infof("govmomi.Actuator.Exists() - running on target cluster, returning exist") return true, nil } - s, err := vc.sessionFromProviderConfig(cluster, machine) + s, err := pv.sessionFromProviderConfig(cluster, machine) if err != nil { return false, err } diff --git a/cloud/vsphere/provisioner/govmomi/provisioner.go b/pkg/cloud/vsphere/provisioner/govmomi/provisioner.go similarity index 96% rename from cloud/vsphere/provisioner/govmomi/provisioner.go rename to pkg/cloud/vsphere/provisioner/govmomi/provisioner.go index d71b10b9d6eb..fd7cec4b237e 100644 --- a/cloud/vsphere/provisioner/govmomi/provisioner.go +++ b/pkg/cloud/vsphere/provisioner/govmomi/provisioner.go @@ -21,6 +21,6 @@ func New(clusterV1alpha1 clusterv1alpha1.ClusterV1alpha1Interface, k8sClient kub lister: lister, eventRecorder: eventRecorder, sessioncache: make(map[string]interface{}), - k8sClient: k8sClient, + k8sClient: k8sClient, }, nil } diff --git a/cloud/vsphere/provisioner/govmomi/session.go b/pkg/cloud/vsphere/provisioner/govmomi/session.go similarity index 81% rename from cloud/vsphere/provisioner/govmomi/session.go rename to pkg/cloud/vsphere/provisioner/govmomi/session.go index 08edb0564488..a4c245d89830 100644 --- a/cloud/vsphere/provisioner/govmomi/session.go +++ b/pkg/cloud/vsphere/provisioner/govmomi/session.go @@ -9,8 +9,7 @@ import ( "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/vim25/soap" - // "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/constants" - vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/utils" + vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/utils" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) @@ -20,13 +19,13 @@ type SessionContext struct { finder *find.Finder } -func (vc *Provisioner) sessionFromProviderConfig(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (*SessionContext, error) { +func (pv *Provisioner) sessionFromProviderConfig(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (*SessionContext, error) { var sc SessionContext vsphereConfig, err := vsphereutils.GetClusterProviderConfig(cluster.Spec.ProviderConfig) if err != nil { return nil, err } - if ses, ok := vc.sessioncache[vsphereConfig.VsphereServer+vsphereConfig.VsphereUser]; ok { + if ses, ok := pv.sessioncache[vsphereConfig.VsphereServer+vsphereConfig.VsphereUser]; ok { s, ok := ses.(SessionContext) if ok { // Test if the session is valid and return @@ -52,6 +51,6 @@ func (vc *Provisioner) sessionFromProviderConfig(cluster *clusterv1.Cluster, mac sc.context = &ctx finder := find.NewFinder(sc.session.Client, false) sc.finder = finder - vc.sessioncache[vsphereConfig.VsphereServer+vsphereConfig.VsphereUser] = sc + pv.sessioncache[vsphereConfig.VsphereServer+vsphereConfig.VsphereUser] = sc return &sc, nil } diff --git a/cloud/vsphere/provisioner/govmomi/update.go b/pkg/cloud/vsphere/provisioner/govmomi/update.go similarity index 77% rename from cloud/vsphere/provisioner/govmomi/update.go rename to pkg/cloud/vsphere/provisioner/govmomi/update.go index 68a9d1ff5ccc..8a78a067b0a8 100644 --- a/cloud/vsphere/provisioner/govmomi/update.go +++ b/pkg/cloud/vsphere/provisioner/govmomi/update.go @@ -12,19 +12,19 @@ import ( "github.com/vmware/govmomi/vim25/types" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/constants" - vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/utils" - vsphereconfig "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/vsphereproviderconfig" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/constants" + vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/utils" + vsphereconfig "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/vsphereproviderconfig" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) -func (vc *Provisioner) Update(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { +func (pv *Provisioner) Update(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { // Fetch any active task in vsphere if any // If an active task is there, glog.Infof("govmomi.Actuator.Update %s", machine.Spec.Name) - s, err := vc.sessionFromProviderConfig(cluster, machine) + s, err := pv.sessionFromProviderConfig(cluster, machine) if err != nil { return err } @@ -56,21 +56,21 @@ func (vc *Provisioner) Update(cluster *clusterv1.Cluster, machine *clusterv1.Mac if err != nil { return err } - vc.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "IP Detected", "IP %s detected for Virtual Machine %s", vmIP, vm.Name) - return vc.updateIP(cluster, machine, vmIP) + pv.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "IP Detected", "IP %s detected for Virtual Machine %s", vmIP, vm.Name) + return pv.updateIP(cluster, machine, vmIP) } return nil } // Updates the detected IP for the machine and updates the cluster object signifying a change in the infrastructure -func (vc *Provisioner) updateIP(cluster *clusterv1.Cluster, machine *clusterv1.Machine, vmIP string) error { +func (pv *Provisioner) updateIP(cluster *clusterv1.Cluster, machine *clusterv1.Machine, vmIP string) error { nmachine := machine.DeepCopy() if nmachine.ObjectMeta.Annotations == nil { nmachine.ObjectMeta.Annotations = make(map[string]string) } glog.Infof("updateIP - IP = %s", vmIP) nmachine.ObjectMeta.Annotations[constants.VmIpAnnotationKey] = vmIP - _, err := vc.clusterV1alpha1.Machines(nmachine.Namespace).Update(nmachine) + _, err := pv.clusterV1alpha1.Machines(nmachine.Namespace).Update(nmachine) if err != nil { return err } @@ -79,6 +79,6 @@ func (vc *Provisioner) updateIP(cluster *clusterv1.Cluster, machine *clusterv1.M out, err := json.Marshal(status) ncluster := cluster.DeepCopy() ncluster.Status.ProviderStatus = &runtime.RawExtension{Raw: out} - _, err = vc.clusterV1alpha1.Clusters(ncluster.Namespace).UpdateStatus(ncluster) + _, err = pv.clusterV1alpha1.Clusters(ncluster.Namespace).UpdateStatus(ncluster) return err } diff --git a/cloud/vsphere/provisioner/govmomi/utils.go b/pkg/cloud/vsphere/provisioner/govmomi/utils.go similarity index 77% rename from cloud/vsphere/provisioner/govmomi/utils.go rename to pkg/cloud/vsphere/provisioner/govmomi/utils.go index 1b3e44f1a9d8..2df526f45575 100644 --- a/cloud/vsphere/provisioner/govmomi/utils.go +++ b/pkg/cloud/vsphere/provisioner/govmomi/utils.go @@ -10,14 +10,14 @@ import ( "github.com/golang/glog" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/constants" - vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/utils" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/constants" + vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/utils" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" apierrors "sigs.k8s.io/cluster-api/pkg/errors" "sigs.k8s.io/cluster-api/pkg/kubeadm" ) -func (vc *Provisioner) GetKubeadmToken(cluster *clusterv1.Cluster) (string, error) { +func (pv *Provisioner) GetKubeadmToken(cluster *clusterv1.Cluster) (string, error) { var token string if cluster.ObjectMeta.Annotations != nil { if token, ok := cluster.ObjectMeta.Annotations[constants.KubeadmToken]; ok { @@ -28,14 +28,14 @@ func (vc *Provisioner) GetKubeadmToken(cluster *clusterv1.Cluster) (string, erro } } // From the cluster locate the master node - master, err := vsphereutils.GetMasterForCluster(cluster, vc.lister) + master, err := vsphereutils.GetMasterForCluster(cluster, pv.lister) if err != nil { return "", err } if len(master) == 0 { return "", errors.New("No master available") } - kubeconfig, err := vc.GetKubeConfig(cluster) + kubeconfig, err := pv.GetKubeConfig(cluster) if err != nil { return "", err } @@ -60,7 +60,7 @@ func (vc *Provisioner) GetKubeadmToken(cluster *clusterv1.Cluster) (string, erro ncluster.ObjectMeta.Annotations[constants.KubeadmToken] = token // Even though this time might be off by few sec compared to the actual expiry on the token it should not have any impact ncluster.ObjectMeta.Annotations[constants.KubeadmTokenExpiryTime] = time.Now().Add(constants.KubeadmTokenTtl).Format(time.RFC3339) - _, err = vc.clusterV1alpha1.Clusters(cluster.Namespace).Update(ncluster) + _, err = pv.clusterV1alpha1.Clusters(cluster.Namespace).Update(ncluster) if err != nil { glog.Infof("Could not cache the kubeadm token on cluster object: %s", err) } @@ -71,17 +71,17 @@ func (vc *Provisioner) GetKubeadmToken(cluster *clusterv1.Cluster) (string, erro // the appropriate reason/message on the Machine.Status. If not, such as during // cluster installation, it will operate as a no-op. It also returns the // original error for convenience, so callers can do "return handleMachineError(...)". -func (vc *Provisioner) HandleMachineError(machine *clusterv1.Machine, err *apierrors.MachineError, eventAction string) error { - if vc.clusterV1alpha1 != nil { +func (pv *Provisioner) HandleMachineError(machine *clusterv1.Machine, err *apierrors.MachineError, eventAction string) error { + if pv.clusterV1alpha1 != nil { nmachine := machine.DeepCopy() reason := err.Reason message := err.Message nmachine.Status.ErrorReason = &reason nmachine.Status.ErrorMessage = &message - vc.clusterV1alpha1.Machines(nmachine.Namespace).UpdateStatus(nmachine) + pv.clusterV1alpha1.Machines(nmachine.Namespace).UpdateStatus(nmachine) } if eventAction != "" { - vc.eventRecorder.Eventf(machine, corev1.EventTypeWarning, "Failed"+eventAction, "%v", err.Reason) + pv.eventRecorder.Eventf(machine, corev1.EventTypeWarning, "Failed"+eventAction, "%v", err.Reason) } glog.Errorf("Machine error: %v", err.Message) @@ -92,34 +92,34 @@ func (vc *Provisioner) HandleMachineError(machine *clusterv1.Machine, err *apier // the appropriate reason/message on the Cluster.Status. If not, such as during // cluster installation, it will operate as a no-op. It also returns the // original error for convenience, so callers can do "return handleClusterError(...)". -func (vc *Provisioner) HandleClusterError(cluster *clusterv1.Cluster, err *apierrors.ClusterError, eventAction string) error { - if vc.clusterV1alpha1 != nil { +func (pv *Provisioner) HandleClusterError(cluster *clusterv1.Cluster, err *apierrors.ClusterError, eventAction string) error { + if pv.clusterV1alpha1 != nil { ncluster := cluster.DeepCopy() reason := err.Reason message := err.Message ncluster.Status.ErrorReason = reason ncluster.Status.ErrorMessage = message - vc.clusterV1alpha1.Clusters(ncluster.Namespace).UpdateStatus(ncluster) + pv.clusterV1alpha1.Clusters(ncluster.Namespace).UpdateStatus(ncluster) } if eventAction != "" { - vc.eventRecorder.Eventf(cluster, corev1.EventTypeWarning, "Failed"+eventAction, "%v", err.Reason) + pv.eventRecorder.Eventf(cluster, corev1.EventTypeWarning, "Failed"+eventAction, "%v", err.Reason) } glog.Errorf("Cluster error: %v", err.Message) return err } -func (vc *Provisioner) GetSSHPublicKey(cluster *clusterv1.Cluster) (string, error) { +func (pv *Provisioner) GetSSHPublicKey(cluster *clusterv1.Cluster) (string, error) { // TODO(ssurana): the secret currently is stored in the default namespace. This needs to be changed - secret, err := vc.k8sClient.Core().Secrets("default").Get("sshkeys", metav1.GetOptions{}) + secret, err := pv.k8sClient.Core().Secrets("default").Get("sshkeys", metav1.GetOptions{}) if err != nil { return "", err } return string(secret.Data["vsphere_tmp.pub"]), nil } -func (vc *Provisioner) GetKubeConfig(cluster *clusterv1.Cluster) (string, error) { - secret, err := vc.k8sClient.Core().Secrets(cluster.Namespace).Get(fmt.Sprintf(constants.KubeConfigSecretName, cluster.UID), metav1.GetOptions{}) +func (pv *Provisioner) GetKubeConfig(cluster *clusterv1.Cluster) (string, error) { + secret, err := pv.k8sClient.Core().Secrets(cluster.Namespace).Get(fmt.Sprintf(constants.KubeConfigSecretName, cluster.UID), metav1.GetOptions{}) if err != nil { return "", err } diff --git a/cloud/vsphere/provisioner/terraform/terraform.go b/pkg/cloud/vsphere/provisioner/terraform/terraform.go similarity index 83% rename from cloud/vsphere/provisioner/terraform/terraform.go rename to pkg/cloud/vsphere/provisioner/terraform/terraform.go index f7898990d354..e42c391d06a1 100644 --- a/cloud/vsphere/provisioner/terraform/terraform.go +++ b/pkg/cloud/vsphere/provisioner/terraform/terraform.go @@ -39,12 +39,12 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" - "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/constants" - "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/namedmachines" - vpshereprovisionercommon "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/provisioner/common" - vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/utils" - vsphereconfig "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/vsphereproviderconfig" - vsphereconfigv1 "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/vsphereproviderconfig/v1alpha1" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/constants" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/namedmachines" + vpshereprovisionercommon "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/provisioner/common" + vsphereutils "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/utils" + vsphereconfig "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/vsphereproviderconfig" + vsphereconfigv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/vsphereproviderconfig/v1alpha1" "sigs.k8s.io/cluster-api/clusterctl/clusterdeployer" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1" @@ -104,8 +104,8 @@ func saveFile(contents, path string, perm os.FileMode) error { // Stage the machine for running terraform. // Return: machine's staging dir path, error -func (vc *Provisioner) prepareStageMachineDir(machine *clusterv1.Machine, eventAction string) (string, error) { - err := vc.cleanUpStagingDir(machine) +func (pv *Provisioner) prepareStageMachineDir(machine *clusterv1.Machine, eventAction string) (string, error) { + err := pv.cleanUpStagingDir(machine) if err != nil { return "", err } @@ -113,7 +113,7 @@ func (vc *Provisioner) prepareStageMachineDir(machine *clusterv1.Machine, eventA machineName := machine.ObjectMeta.Name config, err := vsphereutils.GetMachineProviderConfig(machine.Spec.ProviderConfig) if err != nil { - return "", vc.handleMachineError(machine, apierrors.InvalidMachineConfiguration( + return "", pv.handleMachineError(machine, apierrors.InvalidMachineConfiguration( "Cannot unmarshal providerConfig field: %v", err), eventAction) } @@ -126,7 +126,7 @@ func (vc *Provisioner) prepareStageMachineDir(machine *clusterv1.Machine, eventA tfConfigPath := path.Join(machinePath, TfConfigFilename) tfVarsPath := path.Join(machinePath, TfVarsFilename) - namedMachines, err := vc.namedMachineWatch.NamedMachines() + namedMachines, err := pv.namedMachineWatch.NamedMachines() if err != nil { return "", err } @@ -147,7 +147,7 @@ func (vc *Provisioner) prepareStageMachineDir(machine *clusterv1.Machine, eventA } // Save the tfstate file (if not bootstrapping). - _, err = vc.stageTfState(machine) + _, err = pv.stageTfState(machine) if err != nil { return "", err } @@ -156,7 +156,7 @@ func (vc *Provisioner) prepareStageMachineDir(machine *clusterv1.Machine, eventA } // Returns the path to the tfstate file staged from the tf state in annotations. -func (vc *Provisioner) stageTfState(machine *clusterv1.Machine) (string, error) { +func (pv *Provisioner) stageTfState(machine *clusterv1.Machine) (string, error) { machinePath := fmt.Sprintf(MachinePathStageFormat, machine.ObjectMeta.Name) tfStateFilePath := path.Join(machinePath, TfStateFilename) @@ -190,24 +190,24 @@ func (vc *Provisioner) stageTfState(machine *clusterv1.Machine) (string, error) } // Cleans up the staging directory. -func (vc *Provisioner) cleanUpStagingDir(machine *clusterv1.Machine) error { +func (pv *Provisioner) cleanUpStagingDir(machine *clusterv1.Machine) error { glog.Infof("Cleaning up the staging dir for machine %s", machine.ObjectMeta.Name) return os.RemoveAll(fmt.Sprintf(MachinePathStageFormat, machine.ObjectMeta.Name)) } // Builds and saves the startup script for the passed machine and cluster. // Returns the full path of the saved startup script and possible error. -func (vc *Provisioner) saveStartupScript(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) { +func (pv *Provisioner) saveStartupScript(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) { config, err := vsphereutils.GetMachineProviderConfig(machine.Spec.ProviderConfig) if err != nil { - return "", vc.handleMachineError(machine, apierrors.InvalidMachineConfiguration( + return "", pv.handleMachineError(machine, apierrors.InvalidMachineConfiguration( "Cannot unmarshal providerConfig field: %v", err), constants.CreateEventAction) } preloaded := false if val, ok := config.MachineVariables["preloaded"]; ok { preloaded, err = strconv.ParseBool(val) if err != nil { - return "", vc.handleMachineError(machine, apierrors.InvalidMachineConfiguration( + return "", pv.handleMachineError(machine, apierrors.InvalidMachineConfiguration( "Invalid value for preloaded: %v", err), constants.CreateEventAction) } } @@ -216,7 +216,7 @@ func (vc *Provisioner) saveStartupScript(cluster *clusterv1.Cluster, machine *cl if util.IsMaster(machine) { if machine.Spec.Versions.ControlPlane == "" { - return "", vc.handleMachineError(machine, apierrors.InvalidMachineConfiguration( + return "", pv.handleMachineError(machine, apierrors.InvalidMachineConfiguration( "invalid master configuration: missing Machine.Spec.Versions.ControlPlane"), constants.CreateEventAction) } var err error @@ -234,7 +234,7 @@ func (vc *Provisioner) saveStartupScript(cluster *clusterv1.Cluster, machine *cl if len(cluster.Status.APIEndpoints) == 0 { return "", errors.New("invalid cluster state: cannot create a Kubernetes node without an API endpoint") } - kubeadmToken, err := vc.getKubeadmToken(cluster) + kubeadmToken, err := pv.getKubeadmToken(cluster) if err != nil { return "", err } @@ -262,10 +262,10 @@ func (vc *Provisioner) saveStartupScript(cluster *clusterv1.Cluster, machine *cl return startupScriptPath, nil } -func (vc *Provisioner) Create(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { +func (pv *Provisioner) Create(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { config, err := vsphereutils.GetMachineProviderConfig(machine.Spec.ProviderConfig) if err != nil { - return vc.handleMachineError(machine, apierrors.InvalidMachineConfiguration( + return pv.handleMachineError(machine, apierrors.InvalidMachineConfiguration( "Cannot unmarshal providerConfig field: %v", err), constants.CreateEventAction) } @@ -274,15 +274,15 @@ func (vc *Provisioner) Create(cluster *clusterv1.Cluster, machine *clusterv1.Mac return err } - if verr := vc.validateMachine(machine, config); verr != nil { - return vc.handleMachineError(machine, verr, constants.CreateEventAction) + if verr := pv.validateMachine(machine, config); verr != nil { + return pv.handleMachineError(machine, verr, constants.CreateEventAction) } - if verr := vc.validateCluster(cluster); verr != nil { + if verr := pv.validateCluster(cluster); verr != nil { return verr } - machinePath, err := vc.prepareStageMachineDir(machine, constants.CreateEventAction) + machinePath, err := pv.prepareStageMachineDir(machine, constants.CreateEventAction) if err != nil { return errors.New(fmt.Sprintf("error while staging machine: %+v", err)) } @@ -290,14 +290,14 @@ func (vc *Provisioner) Create(cluster *clusterv1.Cluster, machine *clusterv1.Mac glog.Infof("Staged for machine create at %s", machinePath) // Save the startup script. - startupScriptPath, err := vc.saveStartupScript(cluster, machine) + startupScriptPath, err := pv.saveStartupScript(cluster, machine) if err != nil { return errors.New(fmt.Sprintf("could not write startup script %+v", err)) } defer cleanUpStartupScript(machine.Name, startupScriptPath) glog.Infof("Checking if machine %s exists", machine.ObjectMeta.Name) - instance, err := vc.instanceIfExists(machine) + instance, err := pv.instanceIfExists(machine) if err != nil { return err } @@ -335,10 +335,10 @@ func (vc *Provisioner) Create(cluster *clusterv1.Cluster, machine *clusterv1.Mac glog.Infof("Machine %s created with ip address %s", machine.ObjectMeta.Name, vmIp) // Annotate the machine so that we remember exactly what VM we created for it. - tfState, _ := vc.GetTfState(machine) - vc.cleanUpStagingDir(machine) - vc.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "Created", "Created Machine %v", machine.Name) - return vc.updateAnnotations(cluster, machine, vmIp, tfState) + tfState, _ := pv.GetTfState(machine) + pv.cleanUpStagingDir(machine) + pv.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "Created", "Created Machine %v", machine.Name) + return pv.updateAnnotations(cluster, machine, vmIp, tfState) } else { glog.Infof("Skipped creating a VM for machine %s that already exists.", machine.ObjectMeta.Name) } @@ -401,9 +401,9 @@ func runTerraformCmd(stdout bool, workingDir string, arg ...string) (bytes.Buffe return out, nil } -func (vc *Provisioner) Delete(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { +func (pv *Provisioner) Delete(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { // Check if the instance exists, return if it doesn't - instance, err := vc.instanceIfExists(machine) + instance, err := pv.instanceIfExists(machine) if err != nil { return err } @@ -416,7 +416,7 @@ func (vc *Provisioner) Delete(cluster *clusterv1.Cluster, machine *clusterv1.Mac return err } - machinePath, err := vc.prepareStageMachineDir(machine, constants.DeleteEventAction) + machinePath, err := pv.prepareStageMachineDir(machine, constants.DeleteEventAction) // destroy it args := []string{ @@ -435,30 +435,30 @@ func (vc *Provisioner) Delete(cluster *clusterv1.Cluster, machine *clusterv1.Mac return fmt.Errorf("could not run terraform: %s", err) } - vc.cleanUpStagingDir(machine) + pv.cleanUpStagingDir(machine) // Update annotation for the state. machine.ObjectMeta.Annotations[StatusMachineTerraformState] = "" - _, err = vc.clusterV1alpha1.Machines(machine.Namespace).Update(machine) + _, err = pv.clusterV1alpha1.Machines(machine.Namespace).Update(machine) if err == nil { - vc.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "Killing", "Killing machine %v", machine.Name) + pv.eventRecorder.Eventf(machine, corev1.EventTypeNormal, "Killing", "Killing machine %v", machine.Name) } return err } -func (vc *Provisioner) PostDelete(cluster *clusterv1.Cluster) error { +func (pv *Provisioner) PostDelete(cluster *clusterv1.Cluster) error { return nil } -func (vc *Provisioner) Update(cluster *clusterv1.Cluster, goalMachine *clusterv1.Machine) error { +func (pv *Provisioner) Update(cluster *clusterv1.Cluster, goalMachine *clusterv1.Machine) error { // Check if the annotations we want to track exist, if not, the user likely created a master machine with their own annotation. if _, ok := goalMachine.ObjectMeta.Annotations[constants.ControlPlaneVersionAnnotationKey]; !ok { - ip, _ := vc.deploymentClient.GetIP(nil, goalMachine) + ip, _ := pv.deploymentClient.GetIP(nil, goalMachine) glog.Info("Version annotations do not exist. Populating existing state for bootstrapped machine.") - tfState, _ := vc.GetTfState(goalMachine) - return vc.updateAnnotations(cluster, goalMachine, ip, tfState) + tfState, _ := pv.GetTfState(goalMachine) + return pv.updateAnnotations(cluster, goalMachine, ip, tfState) } if util.IsMaster(goalMachine) { @@ -466,9 +466,9 @@ func (vc *Provisioner) Update(cluster *clusterv1.Cluster, goalMachine *clusterv1 glog.Info("Upgrade for master machine.. Check if upgrade needed.") // If the saved versions and new versions differ, do in-place upgrade. - if vc.needsMasterUpdate(goalMachine) { + if pv.needsMasterUpdate(goalMachine) { glog.Infof("Doing in-place upgrade for master from v%s to v%s", goalMachine.Annotations[constants.ControlPlaneVersionAnnotationKey], goalMachine.Spec.Versions.ControlPlane) - err := vc.updateMasterInPlace(goalMachine) + err := pv.updateMasterInPlace(goalMachine) if err != nil { glog.Errorf("Master in-place upgrade failed: %+v", err) return err @@ -477,9 +477,9 @@ func (vc *Provisioner) Update(cluster *clusterv1.Cluster, goalMachine *clusterv1 glog.Info("UNSUPPORTED MASTER UPDATE.") } } else { - if vc.needsNodeUpdate(goalMachine) { + if pv.needsNodeUpdate(goalMachine) { // Node upgrades - if err := vc.updateNode(cluster, goalMachine); err != nil { + if err := pv.updateNode(cluster, goalMachine); err != nil { glog.Errorf("Node %s update failed: %+v", goalMachine.ObjectMeta.Name, err) return err } @@ -491,33 +491,33 @@ func (vc *Provisioner) Update(cluster *clusterv1.Cluster, goalMachine *clusterv1 return nil } -func (vc *Provisioner) needsControlPlaneUpdate(machine *clusterv1.Machine) bool { +func (pv *Provisioner) needsControlPlaneUpdate(machine *clusterv1.Machine) bool { return machine.Spec.Versions.ControlPlane != machine.Annotations[constants.ControlPlaneVersionAnnotationKey] } -func (vc *Provisioner) needsKubeletUpdate(machine *clusterv1.Machine) bool { +func (pv *Provisioner) needsKubeletUpdate(machine *clusterv1.Machine) bool { return machine.Spec.Versions.Kubelet != machine.Annotations[constants.KubeletVersionAnnotationKey] } // Returns true if the node is needed to be upgraded. -func (vc *Provisioner) needsNodeUpdate(machine *clusterv1.Machine) bool { +func (pv *Provisioner) needsNodeUpdate(machine *clusterv1.Machine) bool { return !util.IsMaster(machine) && - vc.needsKubeletUpdate(machine) + pv.needsKubeletUpdate(machine) } // Returns true if the master is needed to be upgraded. -func (vc *Provisioner) needsMasterUpdate(machine *clusterv1.Machine) bool { +func (pv *Provisioner) needsMasterUpdate(machine *clusterv1.Machine) bool { return util.IsMaster(machine) && - vc.needsControlPlaneUpdate(machine) + pv.needsControlPlaneUpdate(machine) // TODO: we should support kubelet upgrades here as well. } -func (vc *Provisioner) updateKubelet(machine *clusterv1.Machine) error { - if vc.needsKubeletUpdate(machine) { +func (pv *Provisioner) updateKubelet(machine *clusterv1.Machine) error { + if pv.needsKubeletUpdate(machine) { // Kubelet packages are versioned 1.10.1-00 and so on. kubeletAptVersion := machine.Spec.Versions.Kubelet + "-00" cmd := fmt.Sprintf("sudo apt-get install kubelet=%s", kubeletAptVersion) - if _, err := vc.remoteSshCommand(machine, cmd, "~/.ssh/vsphere_tmp", "ubuntu"); err != nil { + if _, err := pv.remoteSshCommand(machine, cmd, "~/.ssh/vsphere_tmp", "ubuntu"); err != nil { glog.Errorf("remoteSshCommand while installing new kubelet version: %v", err) return err } @@ -525,19 +525,19 @@ func (vc *Provisioner) updateKubelet(machine *clusterv1.Machine) error { return nil } -func (vc *Provisioner) updateControlPlane(machine *clusterv1.Machine) error { - if vc.needsControlPlaneUpdate(machine) { +func (pv *Provisioner) updateControlPlane(machine *clusterv1.Machine) error { + if pv.needsControlPlaneUpdate(machine) { // Pull the kudeadm for target version K8s. cmd := fmt.Sprintf("curl -sSL https://dl.k8s.io/release/v%s/bin/linux/amd64/kubeadm | sudo tee /usr/bin/kubeadm > /dev/null; "+ "sudo chmod a+rx /usr/bin/kubeadm", machine.Spec.Versions.ControlPlane) - if _, err := vc.remoteSshCommand(machine, cmd, "~/.ssh/vsphere_tmp", "ubuntu"); err != nil { + if _, err := pv.remoteSshCommand(machine, cmd, "~/.ssh/vsphere_tmp", "ubuntu"); err != nil { glog.Infof("remoteSshCommand failed while downloading new kubeadm: %+v", err) return err } // Next upgrade control plane cmd = fmt.Sprintf("sudo kubeadm upgrade apply %s -y", "v"+machine.Spec.Versions.ControlPlane) - if _, err := vc.remoteSshCommand(machine, cmd, "~/.ssh/vsphere_tmp", "ubuntu"); err != nil { + if _, err := pv.remoteSshCommand(machine, cmd, "~/.ssh/vsphere_tmp", "ubuntu"); err != nil { glog.Infof("remoteSshCommand failed while upgrading control plane: %+v", err) return err } @@ -546,12 +546,12 @@ func (vc *Provisioner) updateControlPlane(machine *clusterv1.Machine) error { } // Update the passed node machine by recreating it. -func (vc *Provisioner) updateNode(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { - if err := vc.Delete(cluster, machine); err != nil { +func (pv *Provisioner) updateNode(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { + if err := pv.Delete(cluster, machine); err != nil { return err } - if err := vc.Create(cluster, machine); err != nil { + if err := pv.Create(cluster, machine); err != nil { return err } return nil @@ -559,24 +559,24 @@ func (vc *Provisioner) updateNode(cluster *clusterv1.Cluster, machine *clusterv1 // Assumes that update is needed. // For now support only K8s control plane upgrades. -func (vc *Provisioner) updateMasterInPlace(machine *clusterv1.Machine) error { +func (pv *Provisioner) updateMasterInPlace(machine *clusterv1.Machine) error { // Execute a control plane upgrade. - if err := vc.updateControlPlane(machine); err != nil { + if err := pv.updateControlPlane(machine); err != nil { return err } // Update annotation for version. machine.ObjectMeta.Annotations[constants.ControlPlaneVersionAnnotationKey] = machine.Spec.Versions.ControlPlane - if _, err := vc.clusterV1alpha1.Machines(machine.Namespace).Update(machine); err != nil { + if _, err := pv.clusterV1alpha1.Machines(machine.Namespace).Update(machine); err != nil { return err } return nil } -func (vc *Provisioner) remoteSshCommand(m *clusterv1.Machine, cmd, privateKeyPath, sshUser string) (string, error) { +func (pv *Provisioner) remoteSshCommand(m *clusterv1.Machine, cmd, privateKeyPath, sshUser string) (string, error) { glog.Infof("Remote SSH execution '%s' on %s", cmd, m.ObjectMeta.Name) - publicIP, err := vc.deploymentClient.GetIP(nil, m) + publicIP, err := pv.deploymentClient.GetIP(nil, m) if err != nil { return "", err } @@ -600,15 +600,15 @@ func (vc *Provisioner) remoteSshCommand(m *clusterv1.Machine, cmd, privateKeyPat return strings.TrimSpace(parts[1]), nil } -func (vc *Provisioner) Exists(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (bool, error) { - i, err := vc.instanceIfExists(machine) +func (pv *Provisioner) Exists(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (bool, error) { + i, err := pv.instanceIfExists(machine) if err != nil { return false, err } return i != nil, err } -func (vc *Provisioner) GetTfState(machine *clusterv1.Machine) (string, error) { +func (pv *Provisioner) GetTfState(machine *clusterv1.Machine) (string, error) { if machine.ObjectMeta.Annotations != nil { if tfStateB64, ok := machine.ObjectMeta.Annotations[StatusMachineTerraformState]; ok { glog.Infof("Returning tfstate for machine %s from annotation", machine.ObjectMeta.Name) @@ -633,7 +633,7 @@ func (vc *Provisioner) GetTfState(machine *clusterv1.Machine) (string, error) { // We are storing these as annotations and not in Machine Status because that's intended for // "Provider-specific status" that will usually be used to detect updates. Additionally, // Status requires yet another version API resource which is too heavy to store IP and TF state. -func (vc *Provisioner) updateAnnotations(cluster *clusterv1.Cluster, machine *clusterv1.Machine, vmIP, tfState string) error { +func (pv *Provisioner) updateAnnotations(cluster *clusterv1.Cluster, machine *clusterv1.Machine, vmIP, tfState string) error { glog.Infof("Updating annotations for machine %s", machine.ObjectMeta.Name) if machine.ObjectMeta.Annotations == nil { machine.ObjectMeta.Annotations = make(map[string]string) @@ -646,7 +646,7 @@ func (vc *Provisioner) updateAnnotations(cluster *clusterv1.Cluster, machine *cl machine.ObjectMeta.Annotations[constants.KubeletVersionAnnotationKey] = machine.Spec.Versions.Kubelet machine.ObjectMeta.Annotations[StatusMachineTerraformState] = tfStateB64 - _, err := vc.clusterV1alpha1.Machines(machine.Namespace).Update(machine) + _, err := pv.clusterV1alpha1.Machines(machine.Namespace).Update(machine) if err != nil { return err } @@ -654,7 +654,7 @@ func (vc *Provisioner) updateAnnotations(cluster *clusterv1.Cluster, machine *cl status := &vsphereconfig.VsphereClusterProviderStatus{LastUpdated: time.Now().UTC().String()} out, err := json.Marshal(status) cluster.Status.ProviderStatus = &runtime.RawExtension{Raw: out} - _, err = vc.clusterV1alpha1.Clusters(cluster.Namespace).UpdateStatus(cluster) + _, err = pv.clusterV1alpha1.Clusters(cluster.Namespace).UpdateStatus(cluster) if err != nil { glog.Infof("Error in updating the status: %s", err) return err @@ -663,9 +663,9 @@ func (vc *Provisioner) updateAnnotations(cluster *clusterv1.Cluster, machine *cl } // Returns the machine object if the passed machine exists in terraform state. -func (vc *Provisioner) instanceIfExists(machine *clusterv1.Machine) (*clusterv1.Machine, error) { +func (pv *Provisioner) instanceIfExists(machine *clusterv1.Machine) (*clusterv1.Machine, error) { machinePath := fmt.Sprintf(MachinePathStageFormat, machine.ObjectMeta.Name) - tfStateFilePath, err := vc.stageTfState(machine) + tfStateFilePath, err := pv.stageTfState(machine) if err != nil { return nil, err } @@ -686,14 +686,14 @@ func (vc *Provisioner) instanceIfExists(machine *clusterv1.Machine) (*clusterv1. return nil, nil } -func (vc *Provisioner) validateMachine(machine *clusterv1.Machine, config *vsphereconfig.VsphereMachineProviderConfig) *apierrors.MachineError { +func (pv *Provisioner) validateMachine(machine *clusterv1.Machine, config *vsphereconfig.VsphereMachineProviderConfig) *apierrors.MachineError { if machine.Spec.Versions.Kubelet == "" { return apierrors.InvalidMachineConfiguration("spec.versions.kubelet can't be empty") } return nil } -func (vc *Provisioner) validateCluster(cluster *clusterv1.Cluster) error { +func (pv *Provisioner) validateCluster(cluster *clusterv1.Cluster) error { if cluster.Spec.ClusterNetwork.ServiceDomain == "" { return errors.New("invalid cluster configuration: missing Cluster.Spec.ClusterNetwork.ServiceDomain") } @@ -714,13 +714,13 @@ func (vc *Provisioner) validateCluster(cluster *clusterv1.Cluster) error { return nil } -func (vc *Provisioner) getKubeadmToken(cluster *clusterv1.Cluster) (string, error) { +func (pv *Provisioner) getKubeadmToken(cluster *clusterv1.Cluster) (string, error) { // From the cluster locate the master node - master, err := vc.getMasterForCluster(cluster) + master, err := pv.getMasterForCluster(cluster) if err != nil { return "", err } - kubeconfig, err := vc.deploymentClient.GetKubeConfig(cluster, master) + kubeconfig, err := pv.deploymentClient.GetKubeConfig(cluster, master) if err != nil { return "", err } @@ -739,8 +739,8 @@ func (vc *Provisioner) getKubeadmToken(cluster *clusterv1.Cluster) (string, erro return strings.TrimSpace(output), err } -func (vc *Provisioner) getMasterForCluster(cluster *clusterv1.Cluster) (*clusterv1.Machine, error) { - machines, err := vc.lister.Machines().Lister().Machines(cluster.Namespace).List(labels.Everything()) +func (pv *Provisioner) getMasterForCluster(cluster *clusterv1.Cluster) (*clusterv1.Machine, error) { + machines, err := pv.lister.Machines().Lister().Machines(cluster.Namespace).List(labels.Everything()) if err != nil { return nil, err } @@ -788,16 +788,16 @@ func createTempFile(contents string) (string, error) { // the appropriate reason/message on the Machine.Status. If not, such as during // cluster installation, it will operate as a no-op. It also returns the // original error for convenience, so callers can do "return handleMachineError(...)". -func (vc *Provisioner) handleMachineError(machine *clusterv1.Machine, err *apierrors.MachineError, eventAction string) error { - if vc.clusterV1alpha1 != nil { +func (pv *Provisioner) handleMachineError(machine *clusterv1.Machine, err *apierrors.MachineError, eventAction string) error { + if pv.clusterV1alpha1 != nil { reason := err.Reason message := err.Message machine.Status.ErrorReason = &reason machine.Status.ErrorMessage = &message - vc.clusterV1alpha1.Machines(machine.Namespace).UpdateStatus(machine) + pv.clusterV1alpha1.Machines(machine.Namespace).UpdateStatus(machine) } if eventAction != "" { - vc.eventRecorder.Eventf(machine, corev1.EventTypeWarning, "Failed"+eventAction, "%v", err.Reason) + pv.eventRecorder.Eventf(machine, corev1.EventTypeWarning, "Failed"+eventAction, "%v", err.Reason) } glog.Errorf("Machine error: %v", err.Message) diff --git a/cloud/vsphere/utils/utils.go b/pkg/cloud/vsphere/utils/utils.go similarity index 94% rename from cloud/vsphere/utils/utils.go rename to pkg/cloud/vsphere/utils/utils.go index 227916091b6a..07f516387857 100644 --- a/cloud/vsphere/utils/utils.go +++ b/pkg/cloud/vsphere/utils/utils.go @@ -13,9 +13,9 @@ import ( "github.com/golang/glog" "k8s.io/apimachinery/pkg/labels" - "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/constants" - vsphereconfig "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/vsphereproviderconfig" - vsphereconfigv1 "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/vsphereproviderconfig/v1alpha1" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/constants" + vsphereconfig "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/vsphereproviderconfig" + vsphereconfigv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/vsphereproviderconfig/v1alpha1" clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" v1alpha1 "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1" "sigs.k8s.io/cluster-api/pkg/util" @@ -182,7 +182,7 @@ func GetKubeConfig(cluster *clusterv1.Cluster, master *clusterv1.Machine) (strin cmd.Stderr = os.Stderr err = cmd.Run() if err != nil { - glog.Infof("ssh failed with error = %#v", err) + glog.Infof("ssh failed with error = %s", err.Error()) } result := strings.TrimSpace(out.String()) if len(result) > 0 { diff --git a/cloud/vsphere/vsphereproviderconfig/doc.go b/pkg/cloud/vsphere/vsphereproviderconfig/doc.go similarity index 100% rename from cloud/vsphere/vsphereproviderconfig/doc.go rename to pkg/cloud/vsphere/vsphereproviderconfig/doc.go diff --git a/cloud/vsphere/vsphereproviderconfig/register.go b/pkg/cloud/vsphere/vsphereproviderconfig/register.go similarity index 100% rename from cloud/vsphere/vsphereproviderconfig/register.go rename to pkg/cloud/vsphere/vsphereproviderconfig/register.go diff --git a/cloud/vsphere/vsphereproviderconfig/types.go b/pkg/cloud/vsphere/vsphereproviderconfig/types.go similarity index 100% rename from cloud/vsphere/vsphereproviderconfig/types.go rename to pkg/cloud/vsphere/vsphereproviderconfig/types.go diff --git a/cloud/vsphere/vsphereproviderconfig/v1alpha1/doc.go b/pkg/cloud/vsphere/vsphereproviderconfig/v1alpha1/doc.go similarity index 100% rename from cloud/vsphere/vsphereproviderconfig/v1alpha1/doc.go rename to pkg/cloud/vsphere/vsphereproviderconfig/v1alpha1/doc.go diff --git a/cloud/vsphere/vsphereproviderconfig/v1alpha1/register.go b/pkg/cloud/vsphere/vsphereproviderconfig/v1alpha1/register.go similarity index 95% rename from cloud/vsphere/vsphereproviderconfig/v1alpha1/register.go rename to pkg/cloud/vsphere/vsphereproviderconfig/v1alpha1/register.go index 4cfce241974a..0744bec18e08 100644 --- a/cloud/vsphere/vsphereproviderconfig/v1alpha1/register.go +++ b/pkg/cloud/vsphere/vsphereproviderconfig/v1alpha1/register.go @@ -20,7 +20,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" - "sigs.k8s.io/cluster-api-provider-vsphere/cloud/vsphere/vsphereproviderconfig" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/cloud/vsphere/vsphereproviderconfig" ) const GroupName = "vsphereproviderconfig" diff --git a/cloud/vsphere/vsphereproviderconfig/v1alpha1/types.go b/pkg/cloud/vsphere/vsphereproviderconfig/v1alpha1/types.go similarity index 100% rename from cloud/vsphere/vsphereproviderconfig/v1alpha1/types.go rename to pkg/cloud/vsphere/vsphereproviderconfig/v1alpha1/types.go diff --git a/cloud/vsphere/vsphereproviderconfig/v1alpha1/zz_generated.deepcopy.go b/pkg/cloud/vsphere/vsphereproviderconfig/v1alpha1/zz_generated.deepcopy.go similarity index 100% rename from cloud/vsphere/vsphereproviderconfig/v1alpha1/zz_generated.deepcopy.go rename to pkg/cloud/vsphere/vsphereproviderconfig/v1alpha1/zz_generated.deepcopy.go diff --git a/cloud/vsphere/vsphereproviderconfig/zz_generated.deepcopy.go b/pkg/cloud/vsphere/vsphereproviderconfig/zz_generated.deepcopy.go similarity index 100% rename from cloud/vsphere/vsphereproviderconfig/zz_generated.deepcopy.go rename to pkg/cloud/vsphere/vsphereproviderconfig/zz_generated.deepcopy.go