From 999502d7d58d9b956c29cd5a3f72ac8d8fa5b8b2 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Thu, 25 Apr 2019 12:45:06 +0200 Subject: [PATCH] Sync with the latest cluster-api and cluster-api-actuator-pkg dep ensure -v -update github.com/openshift/cluster-api-actuator-pkg dep ensure -v -update github.com/openshift/cluster-api --- Gopkg.lock | 8 +- .../cluster-api-actuator-pkg/Gopkg.lock | 4 +- .../cluster-api-actuator-pkg/Makefile | 25 ++- .../hack/ci-integration.sh | 8 + .../pkg/e2e/autoscaler/autoscaler.go | 183 +++++++++--------- .../pkg/manifests/manifests.go | 8 +- .../pkg/controller/machine/controller.go | 2 +- .../machinedeployment/controller.go | 2 +- .../pkg/controller/machineset/controller.go | 2 +- 9 files changed, 128 insertions(+), 114 deletions(-) create mode 100755 vendor/github.com/openshift/cluster-api-actuator-pkg/hack/ci-integration.sh diff --git a/Gopkg.lock b/Gopkg.lock index 8ef4792a82..2ed1a29a8b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -485,7 +485,7 @@ [[projects]] branch = "openshift-4.0-cluster-api-0.0.0-alpha.4" - digest = "1:6c1953688007e98a582591088bfede8a3bb9e81dce2cdac3b4ace98730f6e2e5" + digest = "1:22cd3b43b4d813a4961b8774e13f7c8b7a8a3d0bfd6246290e956667a03d8086" name = "github.com/openshift/cluster-api" packages = [ "pkg/apis", @@ -505,11 +505,11 @@ "pkg/util", ] pruneopts = "T" - revision = "f46665c25908ee8fd5cb6506f374e3ae254a08dc" + revision = "88d73f06ea7453ec900d043a2c4c35794abf4998" [[projects]] branch = "master" - digest = "1:7cbcae314ea9375bebbecfc8b520bbce9786f9c4d1b42824f2a1613695e3faad" + digest = "1:10a5ccf52b9b2af2f8e9bf51ef8e67324e9e999f73099ddea1c849ae5295ced8" name = "github.com/openshift/cluster-api-actuator-pkg" packages = [ "pkg/e2e/autoscaler", @@ -520,7 +520,7 @@ "pkg/types", ] pruneopts = "" - revision = "59a034672129ff21181ac38871a47dabbc900c7f" + revision = "7405f6233af2dd7ff0bc6ac815d4a7e954af8ade" [[projects]] branch = "master" diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/Gopkg.lock b/vendor/github.com/openshift/cluster-api-actuator-pkg/Gopkg.lock index ab96cbd654..2d0b80767d 100644 --- a/vendor/github.com/openshift/cluster-api-actuator-pkg/Gopkg.lock +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/Gopkg.lock @@ -372,7 +372,7 @@ [[projects]] branch = "openshift-4.0-cluster-api-0.0.0-alpha.4" - digest = "1:dbd28c2d6b9c58d0dfbfaa7cb2efce07fc36f812c2eb52197856d40d91a635bb" + digest = "1:999aa1e8d36fbd410b276ef5a47d62d4a2caaece354e4377f7b8d27f9d75f3ca" name = "github.com/openshift/cluster-api" packages = [ "pkg/apis/cluster/common", @@ -388,7 +388,7 @@ "pkg/util", ] pruneopts = "" - revision = "5c833e464afbd8a9b3aca3f1d09943478bd90521" + revision = "776449739aa75bee4287469e3c82ded381f50b3c" [[projects]] branch = "master" diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/Makefile b/vendor/github.com/openshift/cluster-api-actuator-pkg/Makefile index 2d4952d5fd..dbf8725c08 100644 --- a/vendor/github.com/openshift/cluster-api-actuator-pkg/Makefile +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/Makefile @@ -45,22 +45,21 @@ build-e2e: .PHONY: test-e2e test-e2e: ## Run openshift specific e2e test - go test -timeout 90m \ - -v github.com/openshift/cluster-api-actuator-pkg/pkg/e2e \ - -kubeconfig $${KUBECONFIG:-~/.kube/config} \ - -machine-api-namespace $${NAMESPACE:-openshift-machine-api} \ - -ginkgo.v \ - -args -v 5 -logtostderr true + # Run operator tests first to preserve logs for troubleshooting test + # failures and flakes. + # Feature:Operator tests remove deployments. Thus loosing all the logs + # previously acquired. + hack/ci-integration.sh -ginkgo.v -ginkgo.noColor=true -ginkgo.focus "Feature:Operators" + hack/ci-integration.sh -ginkgo.v -ginkgo.noColor=true -ginkgo.skip "Feature:Operators" .PHONY: k8s-e2e k8s-e2e: ## Run k8s specific e2e test - go test -timeout 30m \ - -v github.com/openshift/cluster-api-actuator-pkg/pkg/e2e \ - -kubeconfig $${KUBECONFIG:-~/.kube/config} \ - -machine-api-namespace $${NAMESPACE:-kube-system} \ - -ginkgo.v \ - -args -v 5 -logtostderr true - + # Run operator tests first to preserve logs for troubleshooting test + # failures and flakes. + # Feature:Operator tests remove deployments. Thus loosing all the logs + # previously acquired. + NAMESPACE=kube-system hack/ci-integration.sh -ginkgo.v -ginkgo.noColor=true -ginkgo.focus "Feature:Operators" + NAMESPACE=kube-system hack/ci-integration.sh -ginkgo.v -ginkgo.noColor=true -ginkgo.skip "Feature:Operators" .PHONY: help help: diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/hack/ci-integration.sh b/vendor/github.com/openshift/cluster-api-actuator-pkg/hack/ci-integration.sh new file mode 100755 index 0000000000..66db62562d --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/hack/ci-integration.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +go test -timeout 90m \ + -v github.com/openshift/cluster-api-actuator-pkg/pkg/e2e \ + -kubeconfig ${KUBECONFIG:-~/.kube/config} \ + -machine-api-namespace ${NAMESPACE:-openshift-machine-api} \ + -args -v 5 -logtostderr \ + $@ diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go index 3fd410bdf5..99c83f0105 100644 --- a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go @@ -178,13 +178,87 @@ func machineAutoscalerResource(targetMachineSet *mapiv1beta1.MachineSet, minRepl var _ = g.Describe("[Feature:Machines] Autoscaler should", func() { defer g.GinkgoRecover() + scaleUpFunc := func(client runtimeclient.Client, targetMachineSet mapiv1beta1.MachineSet, nodeTestLabel string, initialNumberOfReplicas int32, expectedReplicas int32) { + g.By(fmt.Sprintf("Creating MachineAutoscaler object for targetMachineSet %q", targetMachineSet.Name)) + machineAutoscaler := machineAutoscalerResource(&targetMachineSet, 1, expectedReplicas) + err := client.Create(context.TODO(), machineAutoscaler) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By(fmt.Sprintf("Waiting for cluster to scale out number of replicas of targetMachineSet %q", targetMachineSet.Name)) + err = wait.PollImmediate(5*time.Second, e2e.WaitLong, func() (bool, error) { + ms, err := e2e.GetMachineSet(context.TODO(), client, targetMachineSet.Name) + if err != nil { + glog.Errorf("error getting machineset object: %v, retrying...", err) + return false, nil + } + glog.Infof("MachineSet %q. Initial number of replicas: %d. Current number of replicas: %d", targetMachineSet.Name, initialNumberOfReplicas, pointer.Int32PtrDerefOr(ms.Spec.Replicas, e2e.DefaultMachineSetReplicas)) + return pointer.Int32PtrDerefOr(ms.Spec.Replicas, e2e.DefaultMachineSetReplicas) == expectedReplicas, nil + }) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By(fmt.Sprintf("Waiting for cluster to scale up nodes for targetMachineSet %q", targetMachineSet.Name)) + err = wait.PollImmediate(5*time.Second, e2e.WaitLong, func() (bool, error) { + scaledMachines := mapiv1beta1.MachineList{} + if err := client.List(context.TODO(), runtimeclient.MatchingLabels(targetMachineSet.Spec.Selector.MatchLabels), &scaledMachines); err != nil { + glog.Errorf("Error querying api for machineset object: %v, retrying...", err) + return false, nil + } + + // get all linked nodes and label them + nodeCounter := 0 + for _, machine := range scaledMachines.Items { + if machine.Status.NodeRef == nil { + glog.Errorf("Machine %q does not have node reference set", machine.Name) + return false, nil + } + glog.Infof("Machine %q is linked to node %q", machine.Name, machine.Status.NodeRef.Name) + nodeCounter++ + } + + glog.Infof("Expecting %d nodes. Current number of nodes in the group: %d", expectedReplicas, nodeCounter) + return nodeCounter == int(expectedReplicas), nil + }) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By(fmt.Sprintf("Labeling all nodes belonging to the machineset %q (after scale up phase)", targetMachineSet.Name)) + err = labelMachineSetNodes(client, &targetMachineSet, nodeTestLabel) + o.Expect(err).NotTo(o.HaveOccurred()) + } + + scaleDownFunc := func(client runtimeclient.Client, targetMachineSet string, nodeTestLabel string, initialNumberOfReplicas int32) { + g.By(fmt.Sprintf("Waiting for machineset %q to have at most initial number of replicas", targetMachineSet)) + err := wait.PollImmediate(5*time.Second, e2e.WaitLong, func() (bool, error) { + ms, err := e2e.GetMachineSet(context.TODO(), client, targetMachineSet) + if err != nil { + glog.Errorf("error getting machineset object: %v, retrying...", err) + return false, nil + } + msReplicas := pointer.Int32PtrDerefOr(ms.Spec.Replicas, e2e.DefaultMachineSetReplicas) + glog.Infof("Initial number of replicas: %d. Current number of replicas: %d", initialNumberOfReplicas, msReplicas) + if msReplicas > initialNumberOfReplicas { + return false, nil + } + + // Make sure all scaled down nodes are really gone (so they don't affect tests to be run next) + scaledNodes := corev1.NodeList{} + if err := client.List(context.TODO(), runtimeclient.MatchingLabels(map[string]string{nodeTestLabel: ""}), &scaledNodes); err != nil { + glog.Errorf("Error querying api for node objects: %v, retrying...", err) + return false, nil + } + scaledNodesLen := int32(len(scaledNodes.Items)) + glog.Infof("Current number of replicas: %d. Current number of nodes: %d", msReplicas, scaledNodesLen) + return scaledNodesLen <= msReplicas && scaledNodesLen <= initialNumberOfReplicas, nil + }) + o.Expect(err).NotTo(o.HaveOccurred()) + } g.It("scale out", func() { var err error client, err := e2e.LoadClient() o.Expect(err).NotTo(o.HaveOccurred()) - nodeTestLabel := fmt.Sprintf("machine.openshift.io/autoscaling-test-%v", string(uuid.NewUUID())) + nodeTestLabel0 := fmt.Sprintf("machine.openshift.io/autoscaling-test-%v", string(uuid.NewUUID())) + nodeTestLabel1 := fmt.Sprintf("machine.openshift.io/autoscaling-test-%v", string(uuid.NewUUID())) // We want to clean up these objects on any subsequent error. defer func() { @@ -213,19 +287,25 @@ var _ = g.Describe("[Feature:Machines] Autoscaler should", func() { } }() - g.By("Getint target machineSet") + g.By("Getting target machineSet") machinesets, err := e2e.GetMachineSets(context.TODO(), client) o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(len(machinesets)).To(o.BeNumerically(">", 0)) + o.Expect(len(machinesets)).To(o.BeNumerically(">", 1)) - targetMachineSet := machinesets[0] - glog.Infof("Target machineSet %s", targetMachineSet.Name) + targetMachineSet0 := machinesets[0] + glog.Infof("Target machineSet0 %q", targetMachineSet0.Name) + targetMachineSet1 := machinesets[1] + glog.Infof("Target machineSet1 %q", targetMachineSet1.Name) // When we add support for machineDeployments on the installer, cluster-autoscaler and cluster-autoscaler-operator // we need to test against deployments instead so we skip this test. - if ownerReferences := targetMachineSet.GetOwnerReferences(); len(ownerReferences) > 0 { + if ownerReferences0 := targetMachineSet0.GetOwnerReferences(); len(ownerReferences0) > 0 { // glog.Infof("MachineSet %s is owned by a machineDeployment. Please run tests against machineDeployment instead", targetMachineSet.Name) - g.Skip(fmt.Sprintf("MachineSet %s is owned by a machineDeployment. Please run tests against machineDeployment instead", targetMachineSet.Name)) + g.Skip(fmt.Sprintf("MachineSet %q is owned by a machineDeployment. Please run tests against machineDeployment instead", targetMachineSet0.Name)) + } + + if ownerReferences1 := targetMachineSet1.GetOwnerReferences(); len(ownerReferences1) > 0 { + g.Skip(fmt.Sprintf("MachineSet %q is owned by a machineDeployment. Please run tests against machineDeployment instead", targetMachineSet1.Name)) } g.By("Create ClusterAutoscaler object") @@ -233,68 +313,17 @@ var _ = g.Describe("[Feature:Machines] Autoscaler should", func() { err = client.Create(context.TODO(), clusterAutoscaler) o.Expect(err).NotTo(o.HaveOccurred()) - initialNumberOfReplicas := pointer.Int32PtrDerefOr(targetMachineSet.Spec.Replicas, e2e.DefaultMachineSetReplicas) - - g.By("Creating MachineAutoscaler objects") - machineAutoscaler := machineAutoscalerResource(&targetMachineSet, 1, initialNumberOfReplicas+1) - err = client.Create(context.TODO(), machineAutoscaler) - o.Expect(err).NotTo(o.HaveOccurred()) - - g.By("Labeling all nodes belonging to the machineset (before scale up phase)") - err = labelMachineSetNodes(client, &targetMachineSet, nodeTestLabel) - o.Expect(err).NotTo(o.HaveOccurred()) - - glog.Info("Get nodeList") - nodeList := corev1.NodeList{} - err = client.List(context.TODO(), runtimeclient.MatchingLabels(map[string]string{nodeTestLabel: ""}), &nodeList) - o.Expect(err).NotTo(o.HaveOccurred()) - - nodeGroupInitialTotalNodes := len(nodeList.Items) - glog.Infof("Cluster initial number of nodes in node group %v is %d", targetMachineSet.Name, nodeGroupInitialTotalNodes) + initialNumberOfReplicas0 := pointer.Int32PtrDerefOr(targetMachineSet0.Spec.Replicas, e2e.DefaultMachineSetReplicas) + initialNumberOfReplicas1 := pointer.Int32PtrDerefOr(targetMachineSet1.Spec.Replicas, e2e.DefaultMachineSetReplicas) + glog.Infof("initialNumberOfReplicas0 %d, initialNumberOfReplicas1 %d", initialNumberOfReplicas0, initialNumberOfReplicas1) + o.Expect(initialNumberOfReplicas0).To(o.BeNumerically("==", initialNumberOfReplicas1)) g.By("Creating workload") err = client.Create(context.TODO(), newWorkLoad()) o.Expect(err).NotTo(o.HaveOccurred()) - g.By("Waiting for cluster to scale out number of replicas") - err = wait.PollImmediate(5*time.Second, e2e.WaitLong, func() (bool, error) { - ms, err := e2e.GetMachineSet(context.TODO(), client, targetMachineSet.Name) - if err != nil { - glog.Errorf("error getting machineset object: %v, retrying...", err) - return false, nil - } - glog.Infof("MachineSet %s. Initial number of replicas: %d. Current number of replicas: %d", targetMachineSet.Name, initialNumberOfReplicas, pointer.Int32PtrDerefOr(ms.Spec.Replicas, e2e.DefaultMachineSetReplicas)) - return pointer.Int32PtrDerefOr(ms.Spec.Replicas, e2e.DefaultMachineSetReplicas) > initialNumberOfReplicas, nil - }) - o.Expect(err).NotTo(o.HaveOccurred()) - - g.By("Waiting for cluster to scale up nodes") - err = wait.PollImmediate(5*time.Second, e2e.WaitLong, func() (bool, error) { - scaledMachines := mapiv1beta1.MachineList{} - if err := client.List(context.TODO(), runtimeclient.MatchingLabels(targetMachineSet.Spec.Selector.MatchLabels), &scaledMachines); err != nil { - glog.Errorf("Error querying api for machineset object: %v, retrying...", err) - return false, nil - } - - // get all linked nodes and label them - nodeCounter := 0 - for _, machine := range scaledMachines.Items { - if machine.Status.NodeRef == nil { - glog.Errorf("Machine %q does not have node reference set", machine.Name) - return false, nil - } - glog.Infof("Machine %q is linked to node %q", machine.Name, machine.Status.NodeRef.Name) - nodeCounter++ - } - - glog.Infof("Expecting at least one new node to come up. Initial number of node group nodes: %d. Current number of nodes in the group: %d", nodeGroupInitialTotalNodes, nodeCounter) - return nodeCounter > nodeGroupInitialTotalNodes, nil - }) - o.Expect(err).NotTo(o.HaveOccurred()) - - g.By("Labeling all nodes belonging to the machineset (after scale up phase)") - err = labelMachineSetNodes(client, &targetMachineSet, nodeTestLabel) - o.Expect(err).NotTo(o.HaveOccurred()) + scaleUpFunc(client, targetMachineSet1, nodeTestLabel1, initialNumberOfReplicas1, initialNumberOfReplicas1+2) + scaleUpFunc(client, targetMachineSet0, nodeTestLabel0, initialNumberOfReplicas0, initialNumberOfReplicas1+1) // Delete workload g.By("Deleting workload") @@ -305,30 +334,8 @@ var _ = g.Describe("[Feature:Machines] Autoscaler should", func() { // start to scale down the unneeded nodes. We wait for that // condition; if successful we assert that (a smoke test of) // scale down is functional. - g.By("Waiting for cluster to have at most initial number of replicas") - err = wait.PollImmediate(5*time.Second, e2e.WaitLong, func() (bool, error) { - ms, err := e2e.GetMachineSet(context.TODO(), client, targetMachineSet.Name) - if err != nil { - glog.Errorf("error getting machineset object: %v, retrying...", err) - return false, nil - } - msReplicas := pointer.Int32PtrDerefOr(ms.Spec.Replicas, e2e.DefaultMachineSetReplicas) - glog.Infof("Initial number of replicas: %d. Current number of replicas: %d", initialNumberOfReplicas, msReplicas) - if msReplicas > initialNumberOfReplicas { - return false, nil - } - - // Make sure all scaled down nodes are really gone (so they don't affect tests to be run next) - scaledNodes := corev1.NodeList{} - if err := client.List(context.TODO(), runtimeclient.MatchingLabels(map[string]string{nodeTestLabel: ""}), &scaledNodes); err != nil { - glog.Errorf("Error querying api for node objects: %v, retrying...", err) - return false, nil - } - scaledNodesLen := int32(len(scaledNodes.Items)) - glog.Infof("Current number of replicas: %d. Current number of nodes: %d", msReplicas, scaledNodesLen) - return scaledNodesLen <= msReplicas && scaledNodesLen <= initialNumberOfReplicas, nil - }) - o.Expect(err).NotTo(o.HaveOccurred()) + scaleDownFunc(client, targetMachineSet0.Name, nodeTestLabel0, initialNumberOfReplicas0) + scaleDownFunc(client, targetMachineSet1.Name, nodeTestLabel1, initialNumberOfReplicas1) }) }) diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/manifests/manifests.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/manifests/manifests.go index e975c0134a..9f38ac1c01 100644 --- a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/manifests/manifests.go +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/manifests/manifests.go @@ -1056,8 +1056,8 @@ func WorkerMachineSet(clusterID, namespace string, providerSpec machinev1beta1.P Spec: machinev1beta1.MachineSetSpec{ Selector: metav1.LabelSelector{ MatchLabels: map[string]string{ - "sigs.k8s.io/cluster-api-machineset": clusterID + "-worker-machineset-" + randomUUID[:6], - "machine.openshift.io/cluster-api-cluster": clusterID, + "machine.openshift.io/cluster-api-machineset": clusterID + "-worker-machineset-" + randomUUID[:6], + "machine.openshift.io/cluster-api-cluster": clusterID, }, }, Replicas: &replicas, @@ -1065,8 +1065,8 @@ func WorkerMachineSet(clusterID, namespace string, providerSpec machinev1beta1.P ObjectMeta: metav1.ObjectMeta{ GenerateName: clusterID + "-worker-machine-" + randomUUID[:6] + "-", Labels: map[string]string{ - "sigs.k8s.io/cluster-api-machineset": clusterID + "-worker-machineset-" + randomUUID[:6], - "machine.openshift.io/cluster-api-cluster": clusterID, + "machine.openshift.io/cluster-api-machineset": clusterID + "-worker-machineset-" + randomUUID[:6], + "machine.openshift.io/cluster-api-cluster": clusterID, }, }, Spec: machinev1beta1.MachineSpec{ diff --git a/vendor/github.com/openshift/cluster-api/pkg/controller/machine/controller.go b/vendor/github.com/openshift/cluster-api/pkg/controller/machine/controller.go index 227793a4dc..d41d0e9aaf 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/controller/machine/controller.go +++ b/vendor/github.com/openshift/cluster-api/pkg/controller/machine/controller.go @@ -304,7 +304,7 @@ func (r *ReconcileMachine) drainNode(machine *machinev1.Machine) error { func (r *ReconcileMachine) getCluster(ctx context.Context, machine *machinev1.Machine) (*machinev1.Cluster, error) { if machine.Labels[machinev1.MachineClusterLabelName] == "" { - klog.Infof("Machine %q in namespace %q doesn't specify %q label, assuming nil cluster", machine.Name, machinev1.MachineClusterLabelName, machine.Namespace) + klog.Infof("Machine %q in namespace %q doesn't specify %q label, assuming nil cluster", machine.Name, machine.Namespace, machinev1.MachineClusterLabelName) return nil, nil } diff --git a/vendor/github.com/openshift/cluster-api/pkg/controller/machinedeployment/controller.go b/vendor/github.com/openshift/cluster-api/pkg/controller/machinedeployment/controller.go index 67e9c38941..e986773f07 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/controller/machinedeployment/controller.go +++ b/vendor/github.com/openshift/cluster-api/pkg/controller/machinedeployment/controller.go @@ -271,7 +271,7 @@ func (r *ReconcileMachineDeployment) reconcile(ctx context.Context, d *v1beta1.M func (r *ReconcileMachineDeployment) getCluster(d *v1beta1.MachineDeployment) (*v1beta1.Cluster, error) { if d.Spec.Template.Labels[v1beta1.MachineClusterLabelName] == "" { - klog.Infof("Deployment %q in namespace %q doesn't specify %q label, assuming nil cluster", d.Name, v1beta1.MachineClusterLabelName, d.Namespace) + klog.Infof("Deployment %q in namespace %q doesn't specify %q label, assuming nil cluster", d.Name, d.Namespace, v1beta1.MachineClusterLabelName) return nil, nil } diff --git a/vendor/github.com/openshift/cluster-api/pkg/controller/machineset/controller.go b/vendor/github.com/openshift/cluster-api/pkg/controller/machineset/controller.go index 48d8d3cfaa..421179ef26 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/controller/machineset/controller.go +++ b/vendor/github.com/openshift/cluster-api/pkg/controller/machineset/controller.go @@ -293,7 +293,7 @@ func (r *ReconcileMachineSet) reconcile(ctx context.Context, machineSet *machine func (r *ReconcileMachineSet) getCluster(ms *machinev1beta1.MachineSet) (*machinev1beta1.Cluster, error) { if ms.Spec.Template.Labels[machinev1beta1.MachineClusterLabelName] == "" { - klog.Infof("MachineSet %q in namespace %q doesn't specify %q label, assuming nil cluster", ms.Name, machinev1beta1.MachineClusterLabelName, ms.Namespace) + klog.Infof("MachineSet %q in namespace %q doesn't specify %q label, assuming nil cluster", ms.Name, ms.Namespace, machinev1beta1.MachineClusterLabelName) return nil, nil }