From 2dd18868acac0603472bac59db1d8191c07cc8b6 Mon Sep 17 00:00:00 2001 From: Dominik Zyla Date: Fri, 12 Apr 2019 16:04:47 +0100 Subject: [PATCH] Switch dep to use release-0.1 branch instead of version (#715) --- Gopkg.lock | 6 +++--- Gopkg.toml | 2 +- .../clusterclient/clusterclient.go | 15 +++++++------ .../clusterdeployer/clusterdeployer.go | 21 ++++++++++++++----- 4 files changed, 27 insertions(+), 17 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 834d4eb408..97aabe27f5 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1137,7 +1137,8 @@ revision = "c2654d5206da6b7b6ace12841e8f359bb89b443c" [[projects]] - digest = "1:bc04752ec8a48d212a0db41c2085bb468ae7c8c34bcb317f552b68f4fe34865b" + branch = "release-0.1" + digest = "1:f533e4944f03eb4169dc77e4b04ee462e2b99f339b385044d914e196dc3e26ec" name = "sigs.k8s.io/cluster-api" packages = [ "cmd/clusterctl/clientcmd", @@ -1165,8 +1166,7 @@ "pkg/util", ] pruneopts = "T" - revision = "29fee0a9ef590af33bbaa167ed17a8be50825679" - version = "0.1.0" + revision = "872d75c97d0610ab695d3b758375f7ae12cd1165" [[projects]] digest = "1:4e97dc637c467531240326f56e253c6d42b519424b6a89d80664ef0f3ed4f721" diff --git a/Gopkg.toml b/Gopkg.toml index 91b9d6a5f3..10b6bf58e7 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -50,7 +50,7 @@ required = [ [[constraint]] name = "sigs.k8s.io/cluster-api" - version = "0.1.0" + branch = "release-0.1" # For dependency below: Refer to issue https://github.com/golang/dep/issues/1799 [[override]] diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient/clusterclient.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient/clusterclient.go index 9bdd7a9fc9..bc4fcf4a38 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient/clusterclient.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient/clusterclient.go @@ -1019,17 +1019,16 @@ func GetClusterAPIObject(client Client, clusterName, namespace string) (*cluster return nil, nil, nil, errors.Wrapf(err, "unable to fetch cluster %s/%s", namespace, clusterName) } - controlPlane, nodes, err := ExtractControlPlaneMachine(machines) + controlPlane, nodes, err := ExtractControlPlaneMachines(machines) if err != nil { return nil, nil, nil, errors.Wrapf(err, "unable to fetch control plane machine in cluster %s/%s", namespace, clusterName) } - return cluster, controlPlane, nodes, nil + return cluster, controlPlane[0], nodes, nil } -// ExtractControlPlaneMachine separates the machines running the control plane (singular) from the incoming machines. +// ExtractControlPlaneMachines separates the machines running the control plane from the incoming machines. // This is currently done by looking at which machine specifies the control plane version. -// TODO: Cleanup. -func ExtractControlPlaneMachine(machines []*clusterv1.Machine) (*clusterv1.Machine, []*clusterv1.Machine, error) { +func ExtractControlPlaneMachines(machines []*clusterv1.Machine) ([]*clusterv1.Machine, []*clusterv1.Machine, error) { nodes := []*clusterv1.Machine{} controlPlaneMachines := []*clusterv1.Machine{} for _, machine := range machines { @@ -1039,8 +1038,8 @@ func ExtractControlPlaneMachine(machines []*clusterv1.Machine) (*clusterv1.Machi nodes = append(nodes, machine) } } - if len(controlPlaneMachines) != 1 { - return nil, nil, errors.Errorf("expected one control plane machine, got: %v", len(controlPlaneMachines)) + if len(controlPlaneMachines) < 1 { + return nil, nil, errors.Errorf("expected one or more control plane machines, got: %v", len(controlPlaneMachines)) } - return controlPlaneMachines[0], nodes, nil + return controlPlaneMachines, nodes, nil } diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer.go index 73d17b3e09..f3263b508c 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer.go @@ -57,7 +57,7 @@ func New( // Create the cluster from the provided cluster definition and machine list. func (d *ClusterDeployer) Create(cluster *clusterv1.Cluster, machines []*clusterv1.Machine, provider provider.Deployer, kubeconfigOutput string, providerComponentsStoreFactory provider.ComponentsStoreFactory) error { - controlPlaneMachine, nodes, err := clusterclient.ExtractControlPlaneMachine(machines) + controlPlaneMachines, nodes, err := clusterclient.ExtractControlPlaneMachines(machines) if err != nil { return errors.Wrap(err, "unable to separate control plane machines from node machines") } @@ -89,12 +89,12 @@ func (d *ClusterDeployer) Create(cluster *clusterv1.Cluster, machines []*cluster cluster.Namespace = bootstrapClient.GetContextNamespace() } - klog.Infof("Creating control plane %v in namespace %q", controlPlaneMachine.Name, cluster.Namespace) - if err := phases.ApplyMachines(bootstrapClient, cluster.Namespace, []*clusterv1.Machine{controlPlaneMachine}); err != nil { + klog.Infof("Creating control plane %v in namespace %q", controlPlaneMachines[0].Name, cluster.Namespace) + if err := phases.ApplyMachines(bootstrapClient, cluster.Namespace, []*clusterv1.Machine{controlPlaneMachines[0]}); err != nil { return errors.Wrap(err, "unable to create control plane machine") } - klog.Infof("Updating bootstrap cluster object for cluster %v in namespace %q with control plane endpoint running on %s", cluster.Name, cluster.Namespace, controlPlaneMachine.Name) + klog.Infof("Updating bootstrap cluster object for cluster %v in namespace %q with control plane endpoint running on %s", cluster.Name, cluster.Namespace, controlPlaneMachines[0].Name) if err := d.updateClusterEndpoint(bootstrapClient, provider, cluster.Name, cluster.Namespace); err != nil { return errors.Wrap(err, "unable to update bootstrap cluster endpoint") } @@ -130,11 +130,22 @@ func (d *ClusterDeployer) Create(cluster *clusterv1.Cluster, machines []*cluster // For some reason, endpoint doesn't get updated in bootstrap cluster sometimes. So we // update the target cluster endpoint as well to be sure. - klog.Infof("Updating target cluster object with control plane endpoint running on %s", controlPlaneMachine.Name) + klog.Infof("Updating target cluster object with control plane endpoint running on %s", controlPlaneMachines[0].Name) if err := d.updateClusterEndpoint(targetClient, provider, cluster.Name, cluster.Namespace); err != nil { return errors.Wrap(err, "unable to update target cluster endpoint") } + if len(controlPlaneMachines) > 1 { + // TODO(h0tbird) Done serially until kubernetes/kubeadm#1097 is resolved and all + // supported versions of k8s we are deploying (using kubeadm) have the fix. + klog.Info("Creating additional control plane machines in target cluster.") + for _, controlPlaneMachine := range controlPlaneMachines[1:] { + if err := phases.ApplyMachines(targetClient, cluster.Namespace, []*clusterv1.Machine{controlPlaneMachine}); err != nil { + return errors.Wrap(err, "unable to create additional control plane machines") + } + } + } + klog.Info("Creating node machines in target cluster.") if err := phases.ApplyMachines(targetClient, cluster.Namespace, nodes); err != nil { return errors.Wrap(err, "unable to create node machines")